You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

net_processing.cpp 166KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642
  1. // Copyright (c) 2009-2010 Satoshi Nakamoto
  2. // Copyright (c) 2009-2016 The Starwels developers
  3. // Distributed under the MIT software license, see the accompanying
  4. // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5. #include "net_processing.h"
  6. #include "addrman.h"
  7. #include "arith_uint256.h"
  8. #include "blockencodings.h"
  9. #include "chainparams.h"
  10. #include "consensus/validation.h"
  11. #include "hash.h"
  12. #include "init.h"
  13. #include "validation.h"
  14. #include "merkleblock.h"
  15. #include "net.h"
  16. #include "netmessagemaker.h"
  17. #include "netbase.h"
  18. #include "policy/fees.h"
  19. #include "policy/policy.h"
  20. #include "primitives/block.h"
  21. #include "primitives/transaction.h"
  22. #include "random.h"
  23. #include "reverse_iterator.h"
  24. #include "scheduler.h"
  25. #include "tinyformat.h"
  26. #include "txmempool.h"
  27. #include "ui_interface.h"
  28. #include "util.h"
  29. #include "utilmoneystr.h"
  30. #include "utilstrencodings.h"
  31. #include "validationinterface.h"
  32. #if defined(NDEBUG)
  33. # error "Starwels cannot be compiled without assertions."
  34. #endif
  35. std::atomic<int64_t> nTimeBestReceived(0); // Used only to inform the wallet of when we last received a block
  36. struct IteratorComparator
  37. {
  38. template<typename I>
  39. bool operator()(const I& a, const I& b)
  40. {
  41. return &(*a) < &(*b);
  42. }
  43. };
  44. struct COrphanTx {
  45. // When modifying, adapt the copy of this definition in tests/DoS_tests.
  46. CTransactionRef tx;
  47. NodeId fromPeer;
  48. int64_t nTimeExpire;
  49. };
  50. std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
  51. std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
  52. void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
  53. static size_t vExtraTxnForCompactIt = 0;
  54. static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main);
  55. static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
  56. // Internal stuff
  57. namespace {
  58. /** Number of nodes with fSyncStarted. */
  59. int nSyncStarted = 0;
  60. /**
  61. * Sources of received blocks, saved to be able to send them reject
  62. * messages or ban them when processing happens afterwards. Protected by
  63. * cs_main.
  64. * Set mapBlockSource[hash].second to false if the node should not be
  65. * punished if the block is invalid.
  66. */
  67. std::map<uint256, std::pair<NodeId, bool>> mapBlockSource;
  68. /**
  69. * Filter for transactions that were recently rejected by
  70. * AcceptToMemoryPool. These are not rerequested until the chain tip
  71. * changes, at which point the entire filter is reset. Protected by
  72. * cs_main.
  73. *
  74. * Without this filter we'd be re-requesting txs from each of our peers,
  75. * increasing bandwidth consumption considerably. For instance, with 100
  76. * peers, half of which relay a tx we don't accept, that might be a 50x
  77. * bandwidth increase. A flooding attacker attempting to roll-over the
  78. * filter using minimum-sized, 60byte, transactions might manage to send
  79. * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
  80. * two minute window to send invs to us.
  81. *
  82. * Decreasing the false positive rate is fairly cheap, so we pick one in a
  83. * million to make it highly unlikely for users to have issues with this
  84. * filter.
  85. *
  86. * Memory used: 1.3 MB
  87. */
  88. std::unique_ptr<CRollingBloomFilter> recentRejects;
  89. uint256 hashRecentRejectsChainTip;
  90. /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
  91. struct QueuedBlock {
  92. uint256 hash;
  93. const CBlockIndex* pindex; //!< Optional.
  94. bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
  95. std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
  96. };
  97. std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight;
  98. /** Stack of nodes which we have set to announce using compact blocks */
  99. std::list<NodeId> lNodesAnnouncingHeaderAndIDs;
  100. /** Number of preferable block download peers. */
  101. int nPreferredDownload = 0;
  102. /** Number of peers from which we're downloading blocks. */
  103. int nPeersWithValidatedDownloads = 0;
  104. /** Number of outbound peers with m_chain_sync.m_protect. */
  105. int g_outbound_peers_with_protect_from_disconnect = 0;
  106. /** When our tip was last updated. */
  107. int64_t g_last_tip_update = 0;
  108. /** Relay map, protected by cs_main. */
  109. typedef std::map<uint256, CTransactionRef> MapRelay;
  110. MapRelay mapRelay;
  111. /** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
  112. std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
  113. } // namespace
  114. namespace {
  115. struct CBlockReject {
  116. unsigned char chRejectCode;
  117. std::string strRejectReason;
  118. uint256 hashBlock;
  119. };
  120. /**
  121. * Maintain validation-specific state about nodes, protected by cs_main, instead
  122. * by CNode's own locks. This simplifies asynchronous operation, where
  123. * processing of incoming data is done after the ProcessMessage call returns,
  124. * and we're no longer holding the node's locks.
  125. */
  126. struct CNodeState {
  127. //! The peer's address
  128. const CService address;
  129. //! Whether we have a fully established connection.
  130. bool fCurrentlyConnected;
  131. //! Accumulated misbehaviour score for this peer.
  132. int nMisbehavior;
  133. //! Whether this peer should be disconnected and banned (unless whitelisted).
  134. bool fShouldBan;
  135. //! String name of this peer (debugging/logging purposes).
  136. const std::string name;
  137. //! List of asynchronously-determined block rejections to notify this peer about.
  138. std::vector<CBlockReject> rejects;
  139. //! The best known block we know this peer has announced.
  140. const CBlockIndex *pindexBestKnownBlock;
  141. //! The hash of the last unknown block this peer has announced.
  142. uint256 hashLastUnknownBlock;
  143. //! The last full block we both have.
  144. const CBlockIndex *pindexLastCommonBlock;
  145. //! The best header we have sent our peer.
  146. const CBlockIndex *pindexBestHeaderSent;
  147. //! Length of current-streak of unconnecting headers announcements
  148. int nUnconnectingHeaders;
  149. //! Whether we've started headers synchronization with this peer.
  150. bool fSyncStarted;
  151. //! When to potentially disconnect peer for stalling headers download
  152. int64_t nHeadersSyncTimeout;
  153. //! Since when we're stalling block download progress (in microseconds), or 0.
  154. int64_t nStallingSince;
  155. std::list<QueuedBlock> vBlocksInFlight;
  156. //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
  157. int64_t nDownloadingSince;
  158. int nBlocksInFlight;
  159. int nBlocksInFlightValidHeaders;
  160. //! Whether we consider this a preferred download peer.
  161. bool fPreferredDownload;
  162. //! Whether this peer wants invs or headers (when possible) for block announcements.
  163. bool fPreferHeaders;
  164. //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
  165. bool fPreferHeaderAndIDs;
  166. /**
  167. * Whether this peer will send us cmpctblocks if we request them.
  168. * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
  169. * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
  170. */
  171. bool fProvidesHeaderAndIDs;
  172. //! Whether this peer can give us witnesses
  173. bool fHaveWitness;
  174. //! Whether this peer wants witnesses in cmpctblocks/blocktxns
  175. bool fWantsCmpctWitness;
  176. /**
  177. * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
  178. * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
  179. */
  180. bool fSupportsDesiredCmpctVersion;
  181. /** State used to enforce CHAIN_SYNC_TIMEOUT
  182. * Only in effect for outbound, non-manual connections, with
  183. * m_protect == false
  184. * Algorithm: if a peer's best known block has less work than our tip,
  185. * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
  186. * - If at timeout their best known block now has more work than our tip
  187. * when the timeout was set, then either reset the timeout or clear it
  188. * (after comparing against our current tip's work)
  189. * - If at timeout their best known block still has less work than our
  190. * tip did when the timeout was set, then send a getheaders message,
  191. * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
  192. * If their best known block is still behind when that new timeout is
  193. * reached, disconnect.
  194. */
  195. struct ChainSyncTimeoutState {
  196. //! A timeout used for checking whether our peer has sufficiently synced
  197. int64_t m_timeout;
  198. //! A header with the work we require on our peer's chain
  199. const CBlockIndex * m_work_header;
  200. //! After timeout is reached, set to true after sending getheaders
  201. bool m_sent_getheaders;
  202. //! Whether this peer is protected from disconnection due to a bad/slow chain
  203. bool m_protect;
  204. };
  205. ChainSyncTimeoutState m_chain_sync;
  206. //! Time of last new block announcement
  207. int64_t m_last_block_announcement;
  208. CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
  209. fCurrentlyConnected = false;
  210. nMisbehavior = 0;
  211. fShouldBan = false;
  212. pindexBestKnownBlock = nullptr;
  213. hashLastUnknownBlock.SetNull();
  214. pindexLastCommonBlock = nullptr;
  215. pindexBestHeaderSent = nullptr;
  216. nUnconnectingHeaders = 0;
  217. fSyncStarted = false;
  218. nHeadersSyncTimeout = 0;
  219. nStallingSince = 0;
  220. nDownloadingSince = 0;
  221. nBlocksInFlight = 0;
  222. nBlocksInFlightValidHeaders = 0;
  223. fPreferredDownload = false;
  224. fPreferHeaders = false;
  225. fPreferHeaderAndIDs = false;
  226. fProvidesHeaderAndIDs = false;
  227. fHaveWitness = false;
  228. fWantsCmpctWitness = false;
  229. fSupportsDesiredCmpctVersion = false;
  230. m_chain_sync = { 0, nullptr, false, false };
  231. m_last_block_announcement = 0;
  232. }
  233. };
  234. /** Map maintaining per-node state. Requires cs_main. */
  235. std::map<NodeId, CNodeState> mapNodeState;
  236. // Requires cs_main.
  237. CNodeState *State(NodeId pnode) {
  238. std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
  239. if (it == mapNodeState.end())
  240. return nullptr;
  241. return &it->second;
  242. }
  243. void UpdatePreferredDownload(CNode* node, CNodeState* state)
  244. {
  245. nPreferredDownload -= state->fPreferredDownload;
  246. // Whether this node should be marked as a preferred download node.
  247. state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
  248. nPreferredDownload += state->fPreferredDownload;
  249. }
  250. void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
  251. {
  252. ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
  253. uint64_t nonce = pnode->GetLocalNonce();
  254. int nNodeStartingHeight = pnode->GetMyStartingHeight();
  255. NodeId nodeid = pnode->GetId();
  256. CAddress addr = pnode->addr;
  257. CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
  258. CAddress addrMe = CAddress(CService(), nLocalNodeServices);
  259. connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
  260. nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
  261. if (fLogIPs) {
  262. LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
  263. } else {
  264. LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
  265. }
  266. }
  267. // Requires cs_main.
  268. // Returns a bool indicating whether we requested this block.
  269. // Also used if a block was /not/ received and timed out or started with another peer
  270. bool MarkBlockAsReceived(const uint256& hash) {
  271. std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
  272. if (itInFlight != mapBlocksInFlight.end()) {
  273. CNodeState *state = State(itInFlight->second.first);
  274. state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
  275. if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
  276. // Last validated block on the queue was received.
  277. nPeersWithValidatedDownloads--;
  278. }
  279. if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
  280. // First block on the queue was received, update the start download time for the next one
  281. state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
  282. }
  283. state->vBlocksInFlight.erase(itInFlight->second.second);
  284. state->nBlocksInFlight--;
  285. state->nStallingSince = 0;
  286. mapBlocksInFlight.erase(itInFlight);
  287. return true;
  288. }
  289. return false;
  290. }
  291. // Requires cs_main.
  292. // returns false, still setting pit, if the block was already in flight from the same peer
  293. // pit will only be valid as long as the same cs_main lock is being held
  294. bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) {
  295. CNodeState *state = State(nodeid);
  296. assert(state != nullptr);
  297. // Short-circuit most stuff in case its from the same node
  298. std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
  299. if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
  300. if (pit) {
  301. *pit = &itInFlight->second.second;
  302. }
  303. return false;
  304. }
  305. // Make sure it's not listed somewhere already.
  306. MarkBlockAsReceived(hash);
  307. std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
  308. {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
  309. state->nBlocksInFlight++;
  310. state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
  311. if (state->nBlocksInFlight == 1) {
  312. // We're starting a block download (batch) from this peer.
  313. state->nDownloadingSince = GetTimeMicros();
  314. }
  315. if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
  316. nPeersWithValidatedDownloads++;
  317. }
  318. itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
  319. if (pit)
  320. *pit = &itInFlight->second.second;
  321. return true;
  322. }
  323. /** Check whether the last unknown block a peer advertised is not yet known. */
  324. void ProcessBlockAvailability(NodeId nodeid) {
  325. CNodeState *state = State(nodeid);
  326. assert(state != nullptr);
  327. if (!state->hashLastUnknownBlock.IsNull()) {
  328. BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
  329. if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
  330. if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
  331. state->pindexBestKnownBlock = itOld->second;
  332. state->hashLastUnknownBlock.SetNull();
  333. }
  334. }
  335. }
  336. /** Update tracking information about which blocks a peer is assumed to have. */
  337. void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
  338. CNodeState *state = State(nodeid);
  339. assert(state != nullptr);
  340. ProcessBlockAvailability(nodeid);
  341. BlockMap::iterator it = mapBlockIndex.find(hash);
  342. if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
  343. // An actually better block was announced.
  344. if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
  345. state->pindexBestKnownBlock = it->second;
  346. } else {
  347. // An unknown block was announced; just assume that the latest one is the best one.
  348. state->hashLastUnknownBlock = hash;
  349. }
  350. }
  351. void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) {
  352. AssertLockHeld(cs_main);
  353. CNodeState* nodestate = State(nodeid);
  354. if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
  355. // Never ask from peers who can't provide witnesses.
  356. return;
  357. }
  358. if (nodestate->fProvidesHeaderAndIDs) {
  359. for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
  360. if (*it == nodeid) {
  361. lNodesAnnouncingHeaderAndIDs.erase(it);
  362. lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
  363. return;
  364. }
  365. }
  366. connman->ForNode(nodeid, [connman](CNode* pfrom){
  367. bool fAnnounceUsingCMPCTBLOCK = false;
  368. uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
  369. if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
  370. // As per BIP152, we only get 3 of our peers to announce
  371. // blocks using compact encodings.
  372. connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
  373. connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
  374. return true;
  375. });
  376. lNodesAnnouncingHeaderAndIDs.pop_front();
  377. }
  378. fAnnounceUsingCMPCTBLOCK = true;
  379. connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
  380. lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
  381. return true;
  382. });
  383. }
  384. }
  385. bool TipMayBeStale(const Consensus::Params &consensusParams)
  386. {
  387. AssertLockHeld(cs_main);
  388. if (g_last_tip_update == 0) {
  389. g_last_tip_update = GetTime();
  390. }
  391. return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
  392. }
  393. // Requires cs_main
  394. bool CanDirectFetch(const Consensus::Params &consensusParams)
  395. {
  396. return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
  397. }
  398. // Requires cs_main
  399. bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
  400. {
  401. if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
  402. return true;
  403. if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
  404. return true;
  405. return false;
  406. }
  407. /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
  408. * at most count entries. */
  409. void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
  410. if (count == 0)
  411. return;
  412. vBlocks.reserve(vBlocks.size() + count);
  413. CNodeState *state = State(nodeid);
  414. assert(state != nullptr);
  415. // Make sure pindexBestKnownBlock is up to date, we'll need it.
  416. ProcessBlockAvailability(nodeid);
  417. if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
  418. // This peer has nothing interesting.
  419. return;
  420. }
  421. if (state->pindexLastCommonBlock == nullptr) {
  422. // Bootstrap quickly by guessing a parent of our best tip is the forking point.
  423. // Guessing wrong in either direction is not a problem.
  424. state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
  425. }
  426. // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
  427. // of its current tip anymore. Go back enough to fix that.
  428. state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
  429. if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
  430. return;
  431. std::vector<const CBlockIndex*> vToFetch;
  432. const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
  433. // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
  434. // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
  435. // download that next block if the window were 1 larger.
  436. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
  437. int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
  438. NodeId waitingfor = -1;
  439. while (pindexWalk->nHeight < nMaxHeight) {
  440. // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
  441. // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
  442. // as iterating over ~100 CBlockIndex* entries anyway.
  443. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
  444. vToFetch.resize(nToFetch);
  445. pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
  446. vToFetch[nToFetch - 1] = pindexWalk;
  447. for (unsigned int i = nToFetch - 1; i > 0; i--) {
  448. vToFetch[i - 1] = vToFetch[i]->pprev;
  449. }
  450. // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
  451. // are not yet downloaded and not in flight to vBlocks. In the mean time, update
  452. // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
  453. // already part of our chain (and therefore don't need it even if pruned).
  454. for (const CBlockIndex* pindex : vToFetch) {
  455. if (!pindex->IsValid(BLOCK_VALID_TREE)) {
  456. // We consider the chain that this peer is on invalid.
  457. return;
  458. }
  459. if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
  460. // We wouldn't download this block or its descendants from this peer.
  461. return;
  462. }
  463. if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
  464. if (pindex->nChainTx)
  465. state->pindexLastCommonBlock = pindex;
  466. } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
  467. // The block is not already downloaded, and not yet in flight.
  468. if (pindex->nHeight > nWindowEnd) {
  469. // We reached the end of the window.
  470. if (vBlocks.size() == 0 && waitingfor != nodeid) {
  471. // We aren't able to fetch anything, but we would be if the download window was one larger.
  472. nodeStaller = waitingfor;
  473. }
  474. return;
  475. }
  476. vBlocks.push_back(pindex);
  477. if (vBlocks.size() == count) {
  478. return;
  479. }
  480. } else if (waitingfor == -1) {
  481. // This is the first already-in-flight block.
  482. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
  483. }
  484. }
  485. }
  486. }
  487. } // namespace
  488. // This function is used for testing the stale tip eviction logic, see
  489. // DoS_tests.cpp
  490. void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
  491. {
  492. LOCK(cs_main);
  493. CNodeState *state = State(node);
  494. if (state) state->m_last_block_announcement = time_in_seconds;
  495. }
  496. // Returns true for outbound peers, excluding manual connections, feelers, and
  497. // one-shots
  498. bool IsOutboundDisconnectionCandidate(const CNode *node)
  499. {
  500. return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
  501. }
  502. void PeerLogicValidation::InitializeNode(CNode *pnode) {
  503. CAddress addr = pnode->addr;
  504. std::string addrName = pnode->GetAddrName();
  505. NodeId nodeid = pnode->GetId();
  506. {
  507. LOCK(cs_main);
  508. mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
  509. }
  510. if(!pnode->fInbound)
  511. PushNodeVersion(pnode, connman, GetTime());
  512. }
  513. void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
  514. fUpdateConnectionTime = false;
  515. LOCK(cs_main);
  516. CNodeState *state = State(nodeid);
  517. assert(state != nullptr);
  518. if (state->fSyncStarted)
  519. nSyncStarted--;
  520. if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
  521. fUpdateConnectionTime = true;
  522. }
  523. for (const QueuedBlock& entry : state->vBlocksInFlight) {
  524. mapBlocksInFlight.erase(entry.hash);
  525. }
  526. EraseOrphansFor(nodeid);
  527. nPreferredDownload -= state->fPreferredDownload;
  528. nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
  529. assert(nPeersWithValidatedDownloads >= 0);
  530. g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
  531. assert(g_outbound_peers_with_protect_from_disconnect >= 0);
  532. mapNodeState.erase(nodeid);
  533. if (mapNodeState.empty()) {
  534. // Do a consistency check after the last peer is removed.
  535. assert(mapBlocksInFlight.empty());
  536. assert(nPreferredDownload == 0);
  537. assert(nPeersWithValidatedDownloads == 0);
  538. assert(g_outbound_peers_with_protect_from_disconnect == 0);
  539. }
  540. LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
  541. }
  542. bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
  543. LOCK(cs_main);
  544. CNodeState *state = State(nodeid);
  545. if (state == nullptr)
  546. return false;
  547. stats.nMisbehavior = state->nMisbehavior;
  548. stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
  549. stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
  550. for (const QueuedBlock& queue : state->vBlocksInFlight) {
  551. if (queue.pindex)
  552. stats.vHeightInFlight.push_back(queue.pindex->nHeight);
  553. }
  554. return true;
  555. }
  556. //////////////////////////////////////////////////////////////////////////////
  557. //
  558. // mapOrphanTransactions
  559. //
  560. void AddToCompactExtraTransactions(const CTransactionRef& tx)
  561. {
  562. size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
  563. if (max_extra_txn <= 0)
  564. return;
  565. if (!vExtraTxnForCompact.size())
  566. vExtraTxnForCompact.resize(max_extra_txn);
  567. vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
  568. vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
  569. }
  570. bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
  571. {
  572. const uint256& hash = tx->GetHash();
  573. if (mapOrphanTransactions.count(hash))
  574. return false;
  575. // Ignore big transactions, to avoid a
  576. // send-big-orphans memory exhaustion attack. If a peer has a legitimate
  577. // large transaction with a missing parent then we assume
  578. // it will rebroadcast it later, after the parent transaction(s)
  579. // have been mined or received.
  580. // 100 orphans, each of which is at most 99,999 bytes big is
  581. // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
  582. unsigned int sz = GetTransactionWeight(*tx);
  583. if (sz >= MAX_STANDARD_TX_WEIGHT)
  584. {
  585. LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
  586. return false;
  587. }
  588. auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
  589. assert(ret.second);
  590. for (const CTxIn& txin : tx->vin) {
  591. mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
  592. }
  593. AddToCompactExtraTransactions(tx);
  594. LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
  595. mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
  596. return true;
  597. }
  598. int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
  599. {
  600. std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
  601. if (it == mapOrphanTransactions.end())
  602. return 0;
  603. for (const CTxIn& txin : it->second.tx->vin)
  604. {
  605. auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
  606. if (itPrev == mapOrphanTransactionsByPrev.end())
  607. continue;
  608. itPrev->second.erase(it);
  609. if (itPrev->second.empty())
  610. mapOrphanTransactionsByPrev.erase(itPrev);
  611. }
  612. mapOrphanTransactions.erase(it);
  613. return 1;
  614. }
  615. void EraseOrphansFor(NodeId peer)
  616. {
  617. int nErased = 0;
  618. std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
  619. while (iter != mapOrphanTransactions.end())
  620. {
  621. std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
  622. if (maybeErase->second.fromPeer == peer)
  623. {
  624. nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
  625. }
  626. }
  627. if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
  628. }
  629. unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
  630. {
  631. unsigned int nEvicted = 0;
  632. static int64_t nNextSweep;
  633. int64_t nNow = GetTime();
  634. if (nNextSweep <= nNow) {
  635. // Sweep out expired orphan pool entries:
  636. int nErased = 0;
  637. int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
  638. std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
  639. while (iter != mapOrphanTransactions.end())
  640. {
  641. std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
  642. if (maybeErase->second.nTimeExpire <= nNow) {
  643. nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
  644. } else {
  645. nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
  646. }
  647. }
  648. // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
  649. nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
  650. if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
  651. }
  652. while (mapOrphanTransactions.size() > nMaxOrphans)
  653. {
  654. // Evict a random orphan:
  655. uint256 randomhash = GetRandHash();
  656. std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
  657. if (it == mapOrphanTransactions.end())
  658. it = mapOrphanTransactions.begin();
  659. EraseOrphanTx(it->first);
  660. ++nEvicted;
  661. }
  662. return nEvicted;
  663. }
  664. // Requires cs_main.
  665. void Misbehaving(NodeId pnode, int howmuch)
  666. {
  667. if (howmuch == 0)
  668. return;
  669. CNodeState *state = State(pnode);
  670. if (state == nullptr)
  671. return;
  672. state->nMisbehavior += howmuch;
  673. int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
  674. if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
  675. {
  676. LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
  677. state->fShouldBan = true;
  678. } else
  679. LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
  680. }
  681. //////////////////////////////////////////////////////////////////////////////
  682. //
  683. // blockchain -> download logic notification
  684. //
  685. PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &scheduler) : connman(connmanIn), m_stale_tip_check_time(0) {
  686. // Initialize global variables that cannot be constructed at startup.
  687. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
  688. const Consensus::Params& consensusParams = Params().GetConsensus();
  689. // Stale tip checking and peer eviction are on two different timers, but we
  690. // don't want them to get out of sync due to drift in the scheduler, so we
  691. // combine them in one function and schedule at the quicker (peer-eviction)
  692. // timer.
  693. static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
  694. scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000);
  695. }
  696. void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
  697. LOCK(cs_main);
  698. std::vector<uint256> vOrphanErase;
  699. for (const CTransactionRef& ptx : pblock->vtx) {
  700. const CTransaction& tx = *ptx;
  701. // Which orphan pool entries must we evict?
  702. for (const auto& txin : tx.vin) {
  703. auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
  704. if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
  705. for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
  706. const CTransaction& orphanTx = *(*mi)->second.tx;
  707. const uint256& orphanHash = orphanTx.GetHash();
  708. vOrphanErase.push_back(orphanHash);
  709. }
  710. }
  711. }
  712. // Erase orphan transactions include or precluded by this block
  713. if (vOrphanErase.size()) {
  714. int nErased = 0;
  715. for (uint256 &orphanHash : vOrphanErase) {
  716. nErased += EraseOrphanTx(orphanHash);
  717. }
  718. LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
  719. }
  720. g_last_tip_update = GetTime();
  721. }
  722. // All of the following cache a recent block, and are protected by cs_most_recent_block
  723. static CCriticalSection cs_most_recent_block;
  724. static std::shared_ptr<const CBlock> most_recent_block;
  725. static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block;
  726. static uint256 most_recent_block_hash;
  727. static bool fWitnessesPresentInMostRecentCompactBlock;
  728. void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
  729. std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
  730. const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
  731. LOCK(cs_main);
  732. static int nHighestFastAnnounce = 0;
  733. if (pindex->nHeight <= nHighestFastAnnounce)
  734. return;
  735. nHighestFastAnnounce = pindex->nHeight;
  736. bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
  737. uint256 hashBlock(pblock->GetHash());
  738. {
  739. LOCK(cs_most_recent_block);
  740. most_recent_block_hash = hashBlock;
  741. most_recent_block = pblock;
  742. most_recent_compact_block = pcmpctblock;
  743. fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
  744. }
  745. connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
  746. // TODO: Avoid the repeated-serialization here
  747. if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
  748. return;
  749. ProcessBlockAvailability(pnode->GetId());
  750. CNodeState &state = *State(pnode->GetId());
  751. // If the peer has, or we announced to them the previous block already,
  752. // but we don't think they have this one, go ahead and announce it
  753. if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
  754. !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
  755. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
  756. hashBlock.ToString(), pnode->GetId());
  757. connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
  758. state.pindexBestHeaderSent = pindex;
  759. }
  760. });
  761. }
  762. void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
  763. const int nNewHeight = pindexNew->nHeight;
  764. connman->SetBestHeight(nNewHeight);
  765. if (!fInitialDownload) {
  766. // Find the hashes of all blocks that weren't previously in the best chain.
  767. std::vector<uint256> vHashes;
  768. const CBlockIndex *pindexToAnnounce = pindexNew;
  769. while (pindexToAnnounce != pindexFork) {
  770. vHashes.push_back(pindexToAnnounce->GetBlockHash());
  771. pindexToAnnounce = pindexToAnnounce->pprev;
  772. if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
  773. // Limit announcements in case of a huge reorganization.
  774. // Rely on the peer's synchronization mechanism in that case.
  775. break;
  776. }
  777. }
  778. // Relay inventory, but don't relay old inventory during initial block download.
  779. connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
  780. if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
  781. for (const uint256& hash : reverse_iterate(vHashes)) {
  782. pnode->PushBlockHash(hash);
  783. }
  784. }
  785. });
  786. connman->WakeMessageHandler();
  787. }
  788. nTimeBestReceived = GetTime();
  789. }
  790. void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
  791. LOCK(cs_main);
  792. const uint256 hash(block.GetHash());
  793. std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
  794. int nDoS = 0;
  795. if (state.IsInvalid(nDoS)) {
  796. // Don't send reject message with code 0 or an internal reject code.
  797. if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) {
  798. CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
  799. State(it->second.first)->rejects.push_back(reject);
  800. if (nDoS > 0 && it->second.second)
  801. Misbehaving(it->second.first, nDoS);
  802. }
  803. }
  804. // Check that:
  805. // 1. The block is valid
  806. // 2. We're not in initial block download
  807. // 3. This is currently the best block we're aware of. We haven't updated
  808. // the tip yet so we have no way to check this directly here. Instead we
  809. // just check that there are currently no other blocks in flight.
  810. else if (state.IsValid() &&
  811. !IsInitialBlockDownload() &&
  812. mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
  813. if (it != mapBlockSource.end()) {
  814. MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
  815. }
  816. }
  817. if (it != mapBlockSource.end())
  818. mapBlockSource.erase(it);
  819. }
  820. //////////////////////////////////////////////////////////////////////////////
  821. //
  822. // Messages
  823. //
  824. bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
  825. {
  826. switch (inv.type)
  827. {
  828. case MSG_TX:
  829. case MSG_WITNESS_TX:
  830. {
  831. assert(recentRejects);
  832. if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
  833. {
  834. // If the chain tip has changed previously rejected transactions
  835. // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
  836. // or a double-spend. Reset the rejects filter and give those
  837. // txs a second chance.
  838. hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
  839. recentRejects->reset();
  840. }
  841. return recentRejects->contains(inv.hash) ||
  842. mempool.exists(inv.hash) ||
  843. mapOrphanTransactions.count(inv.hash) ||
  844. pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
  845. pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
  846. }
  847. case MSG_BLOCK:
  848. case MSG_WITNESS_BLOCK:
  849. return mapBlockIndex.count(inv.hash);
  850. }
  851. // Don't know what it is, just say we already got one
  852. return true;
  853. }
  854. static void RelayTransaction(const CTransaction& tx, CConnman* connman)
  855. {
  856. CInv inv(MSG_TX, tx.GetHash());
  857. connman->ForEachNode([&inv](CNode* pnode)
  858. {
  859. pnode->PushInventory(inv);
  860. });
  861. }
  862. static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
  863. {
  864. unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
  865. // Relay to a limited number of other nodes
  866. // Use deterministic randomness to send to the same nodes for 24 hours
  867. // at a time so the addrKnowns of the chosen nodes prevent repeats
  868. uint64_t hashAddr = addr.GetHash();
  869. const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
  870. FastRandomContext insecure_rand;
  871. std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
  872. assert(nRelayNodes <= best.size());
  873. auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
  874. if (pnode->nVersion >= CADDR_TIME_VERSION) {
  875. uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
  876. for (unsigned int i = 0; i < nRelayNodes; i++) {
  877. if (hashKey > best[i].first) {
  878. std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
  879. best[i] = std::make_pair(hashKey, pnode);
  880. break;
  881. }
  882. }
  883. }
  884. };
  885. auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
  886. for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
  887. best[i].second->PushAddress(addr, insecure_rand);
  888. }
  889. };
  890. connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
  891. }
  892. void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
  893. {
  894. std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
  895. std::vector<CInv> vNotFound;
  896. const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
  897. LOCK(cs_main);
  898. while (it != pfrom->vRecvGetData.end()) {
  899. // Don't bother if send buffer is too full to respond anyway
  900. if (pfrom->fPauseSend)
  901. break;
  902. const CInv &inv = *it;
  903. {
  904. if (interruptMsgProc)
  905. return;
  906. it++;
  907. if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
  908. {
  909. bool send = false;
  910. BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
  911. std::shared_ptr<const CBlock> a_recent_block;
  912. std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
  913. bool fWitnessesPresentInARecentCompactBlock;
  914. {
  915. LOCK(cs_most_recent_block);
  916. a_recent_block = most_recent_block;
  917. a_recent_compact_block = most_recent_compact_block;
  918. fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
  919. }
  920. if (mi != mapBlockIndex.end())
  921. {
  922. if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
  923. mi->second->IsValid(BLOCK_VALID_TREE)) {
  924. // If we have the block and all of its parents, but have not yet validated it,
  925. // we might be in the middle of connecting it (ie in the unlock of cs_main
  926. // before ActivateBestChain but after AcceptBlock).
  927. // In this case, we need to run ActivateBestChain prior to checking the relay
  928. // conditions below.
  929. CValidationState dummy;
  930. ActivateBestChain(dummy, Params(), a_recent_block);
  931. }
  932. if (chainActive.Contains(mi->second)) {
  933. send = true;
  934. } else {
  935. static const int nOneMonth = 30 * 24 * 60 * 60;
  936. // To prevent fingerprinting attacks, only send blocks outside of the active
  937. // chain if they are valid, and no more than a month older (both in time, and in
  938. // best equivalent proof of work) than the best header chain we know about.
  939. send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
  940. (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
  941. (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
  942. if (!send) {
  943. LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
  944. }
  945. }
  946. }
  947. // disconnect node in case we have reached the outbound limit for serving historical blocks
  948. // never disconnect whitelisted nodes
  949. static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
  950. if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
  951. {
  952. LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
  953. //disconnect node
  954. pfrom->fDisconnect = true;
  955. send = false;
  956. }
  957. // Pruned nodes may have deleted the block, so check whether
  958. // it's available before trying to send.
  959. if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
  960. {
  961. std::shared_ptr<const CBlock> pblock;
  962. if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
  963. pblock = a_recent_block;
  964. } else {
  965. // Send block from disk
  966. std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
  967. if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
  968. assert(!"cannot load block from disk");
  969. pblock = pblockRead;
  970. }
  971. if (inv.type == MSG_BLOCK)
  972. connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
  973. else if (inv.type == MSG_WITNESS_BLOCK)
  974. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
  975. else if (inv.type == MSG_FILTERED_BLOCK)
  976. {
  977. bool sendMerkleBlock = false;
  978. CMerkleBlock merkleBlock;
  979. {
  980. LOCK(pfrom->cs_filter);
  981. if (pfrom->pfilter) {
  982. sendMerkleBlock = true;
  983. merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
  984. }
  985. }
  986. if (sendMerkleBlock) {
  987. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
  988. // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
  989. // This avoids hurting performance by pointlessly requiring a round-trip
  990. // Note that there is currently no way for a node to request any single transactions we didn't send here -
  991. // they must either disconnect and retry or request the full block.
  992. // Thus, the protocol spec specified allows for us to provide duplicate txn here,
  993. // however we MUST always provide at least what the remote peer needs
  994. typedef std::pair<unsigned int, uint256> PairType;
  995. for (PairType& pair : merkleBlock.vMatchedTxn)
  996. connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
  997. }
  998. // else
  999. // no response
  1000. }
  1001. else if (inv.type == MSG_CMPCT_BLOCK)
  1002. {
  1003. // If a peer is asking for old blocks, we're almost guaranteed
  1004. // they won't have a useful mempool to match against a compact block,
  1005. // and we don't feel like constructing the object for them, so
  1006. // instead we respond with the full, non-compact block.
  1007. bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
  1008. int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
  1009. if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
  1010. if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
  1011. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
  1012. } else {
  1013. CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
  1014. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
  1015. }
  1016. } else {
  1017. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
  1018. }
  1019. }
  1020. // Trigger the peer node to send a getblocks request for the next batch of inventory
  1021. if (inv.hash == pfrom->hashContinue)
  1022. {
  1023. // Bypass PushInventory, this must send even if redundant,
  1024. // and we want it right after the last block so they don't
  1025. // wait for other stuff first.
  1026. std::vector<CInv> vInv;
  1027. vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
  1028. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
  1029. pfrom->hashContinue.SetNull();
  1030. }
  1031. }
  1032. }
  1033. else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
  1034. {
  1035. // Send stream from relay memory
  1036. bool push = false;
  1037. auto mi = mapRelay.find(inv.hash);
  1038. int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
  1039. if (mi != mapRelay.end()) {
  1040. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
  1041. push = true;
  1042. } else if (pfrom->timeLastMempoolReq) {
  1043. auto txinfo = mempool.info(inv.hash);
  1044. // To protect privacy, do not answer getdata using the mempool when
  1045. // that TX couldn't have been INVed in reply to a MEMPOOL request.
  1046. if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
  1047. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
  1048. push = true;
  1049. }
  1050. }
  1051. if (!push) {
  1052. vNotFound.push_back(inv);
  1053. }
  1054. }
  1055. // Track requests for our stuff.
  1056. GetMainSignals().Inventory(inv.hash);
  1057. if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
  1058. break;
  1059. }
  1060. }
  1061. pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
  1062. if (!vNotFound.empty()) {
  1063. // Let the peer know that we didn't find what it asked for, so it doesn't
  1064. // have to wait around forever. Currently only SPV clients actually care
  1065. // about this message: it's needed when they are recursively walking the
  1066. // dependencies of relevant unconfirmed transactions. SPV clients want to
  1067. // do that because they want to know about (and store and rebroadcast and
  1068. // risk analyze) the dependencies of transactions relevant to them, without
  1069. // having to download the entire memory pool.
  1070. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
  1071. }
  1072. }
  1073. uint32_t GetFetchFlags(CNode* pfrom) {
  1074. uint32_t nFetchFlags = 0;
  1075. if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
  1076. nFetchFlags |= MSG_WITNESS_FLAG;
  1077. }
  1078. return nFetchFlags;
  1079. }
  1080. inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
  1081. BlockTransactions resp(req);
  1082. for (size_t i = 0; i < req.indexes.size(); i++) {
  1083. if (req.indexes[i] >= block.vtx.size()) {
  1084. LOCK(cs_main);
  1085. Misbehaving(pfrom->GetId(), 100);
  1086. LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId());
  1087. return;
  1088. }
  1089. resp.txn[i] = block.vtx[req.indexes[i]];
  1090. }
  1091. LOCK(cs_main);
  1092. const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
  1093. int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
  1094. connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
  1095. }
  1096. bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid)
  1097. {
  1098. const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
  1099. size_t nCount = headers.size();
  1100. if (nCount == 0) {
  1101. // Nothing interesting. Stop asking this peers for more headers.
  1102. return true;
  1103. }
  1104. bool received_new_header = false;
  1105. const CBlockIndex *pindexLast = nullptr;
  1106. {
  1107. LOCK(cs_main);
  1108. CNodeState *nodestate = State(pfrom->GetId());
  1109. // If this looks like it could be a block announcement (nCount <
  1110. // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
  1111. // don't connect:
  1112. // - Send a getheaders message in response to try to connect the chain.
  1113. // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
  1114. // don't connect before giving DoS points
  1115. // - Once a headers message is received that is valid and does connect,
  1116. // nUnconnectingHeaders gets reset back to 0.
  1117. if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
  1118. nodestate->nUnconnectingHeaders++;
  1119. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
  1120. LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
  1121. headers[0].GetHash().ToString(),
  1122. headers[0].hashPrevBlock.ToString(),
  1123. pindexBestHeader->nHeight,
  1124. pfrom->GetId(), nodestate->nUnconnectingHeaders);
  1125. // Set hashLastUnknownBlock for this peer, so that if we
  1126. // eventually get the headers - even from a different peer -
  1127. // we can use this peer to download.
  1128. UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
  1129. if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
  1130. Misbehaving(pfrom->GetId(), 20);
  1131. }
  1132. return true;
  1133. }
  1134. uint256 hashLastBlock;
  1135. for (const CBlockHeader& header : headers) {
  1136. if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
  1137. Misbehaving(pfrom->GetId(), 20);
  1138. return error("non-continuous headers sequence");
  1139. }
  1140. hashLastBlock = header.GetHash();
  1141. }
  1142. // If we don't have the last header, then they'll have given us
  1143. // something new (if these headers are valid).
  1144. if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) {
  1145. received_new_header = true;
  1146. }
  1147. }
  1148. CValidationState state;
  1149. CBlockHeader first_invalid_header;
  1150. if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) {
  1151. int nDoS;
  1152. if (state.IsInvalid(nDoS)) {
  1153. LOCK(cs_main);
  1154. if (nDoS > 0) {
  1155. Misbehaving(pfrom->GetId(), nDoS);
  1156. }
  1157. if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) {
  1158. // Goal: don't allow outbound peers to use up our outbound
  1159. // connection slots if they are on incompatible chains.
  1160. //
  1161. // We ask the caller to set punish_invalid appropriately based
  1162. // on the peer and the method of header delivery (compact
  1163. // blocks are allowed to be invalid in some circumstances,
  1164. // under BIP 152).
  1165. // Here, we try to detect the narrow situation that we have a
  1166. // valid block header (ie it was valid at the time the header
  1167. // was received, and hence stored in mapBlockIndex) but know the
  1168. // block is invalid, and that a peer has announced that same
  1169. // block as being on its active chain.
  1170. // Disconnect the peer in such a situation.
  1171. //
  1172. // Note: if the header that is invalid was not accepted to our
  1173. // mapBlockIndex at all, that may also be grounds for
  1174. // disconnecting the peer, as the chain they are on is likely
  1175. // to be incompatible. However, there is a circumstance where
  1176. // that does not hold: if the header's timestamp is more than
  1177. // 2 hours ahead of our current time. In that case, the header
  1178. // may become valid in the future, and we don't want to
  1179. // disconnect a peer merely for serving us one too-far-ahead
  1180. // block header, to prevent an attacker from splitting the
  1181. // network by mining a block right at the 2 hour boundary.
  1182. //
  1183. // TODO: update the DoS logic (or, rather, rewrite the
  1184. // DoS-interface between validation and net_processing) so that
  1185. // the interface is cleaner, and so that we disconnect on all the
  1186. // reasons that a peer's headers chain is incompatible
  1187. // with ours (eg block->nVersion softforks, MTP violations,
  1188. // etc), and not just the duplicate-invalid case.
  1189. pfrom->fDisconnect = true;
  1190. }
  1191. return error("invalid header received");
  1192. }
  1193. }
  1194. {
  1195. LOCK(cs_main);
  1196. CNodeState *nodestate = State(pfrom->GetId());
  1197. if (nodestate->nUnconnectingHeaders > 0) {
  1198. LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
  1199. }
  1200. nodestate->nUnconnectingHeaders = 0;
  1201. assert(pindexLast);
  1202. UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
  1203. // From here, pindexBestKnownBlock should be guaranteed to be non-null,
  1204. // because it is set in UpdateBlockAvailability. Some nullptr checks
  1205. // are still present, however, as belt-and-suspenders.
  1206. if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
  1207. nodestate->m_last_block_announcement = GetTime();
  1208. }
  1209. if (nCount == MAX_HEADERS_RESULTS) {
  1210. // Headers message had its maximum size; the peer may have more headers.
  1211. // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
  1212. // from there instead.
  1213. LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
  1214. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
  1215. }
  1216. bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
  1217. // If this set of headers is valid and ends in a block with at least as
  1218. // much work as our tip, download as much as possible.
  1219. if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
  1220. std::vector<const CBlockIndex*> vToFetch;
  1221. const CBlockIndex *pindexWalk = pindexLast;
  1222. // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
  1223. while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
  1224. if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
  1225. !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
  1226. (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
  1227. // We don't have this block, and it's not yet in flight.
  1228. vToFetch.push_back(pindexWalk);
  1229. }
  1230. pindexWalk = pindexWalk->pprev;
  1231. }
  1232. // If pindexWalk still isn't on our main chain, we're looking at a
  1233. // very large reorg at a time we think we're close to caught up to
  1234. // the main chain -- this shouldn't really happen. Bail out on the
  1235. // direct fetch and rely on parallel download instead.
  1236. if (!chainActive.Contains(pindexWalk)) {
  1237. LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
  1238. pindexLast->GetBlockHash().ToString(),
  1239. pindexLast->nHeight);
  1240. } else {
  1241. std::vector<CInv> vGetData;
  1242. // Download as much as possible, from earliest to latest.
  1243. for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
  1244. if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
  1245. // Can't download any more from this peer
  1246. break;
  1247. }
  1248. uint32_t nFetchFlags = GetFetchFlags(pfrom);
  1249. vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
  1250. MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
  1251. LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
  1252. pindex->GetBlockHash().ToString(), pfrom->GetId());
  1253. }
  1254. if (vGetData.size() > 1) {
  1255. LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
  1256. pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
  1257. }
  1258. if (vGetData.size() > 0) {
  1259. if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
  1260. // In any case, we want to download using a compact block, not a regular one
  1261. vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
  1262. }
  1263. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
  1264. }
  1265. }
  1266. }
  1267. // If we're in IBD, we want outbound peers that will serve us a useful
  1268. // chain. Disconnect peers that are on chains with insufficient work.
  1269. if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
  1270. // When nCount < MAX_HEADERS_RESULTS, we know we have no more
  1271. // headers to fetch from this peer.
  1272. if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
  1273. // This peer has too little work on their headers chain to help
  1274. // us sync -- disconnect if using an outbound slot (unless
  1275. // whitelisted or addnode).
  1276. // Note: We compare their tip to nMinimumChainWork (rather than
  1277. // chainActive.Tip()) because we won't start block download
  1278. // until we have a headers chain that has at least
  1279. // nMinimumChainWork, even if a peer has a chain past our tip,
  1280. // as an anti-DoS measure.
  1281. if (IsOutboundDisconnectionCandidate(pfrom)) {
  1282. LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
  1283. pfrom->fDisconnect = true;
  1284. }
  1285. }
  1286. }
  1287. if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) {
  1288. // If this is an outbound peer, check to see if we should protect
  1289. // it from the bad/lagging chain logic.
  1290. if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
  1291. LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
  1292. nodestate->m_chain_sync.m_protect = true;
  1293. ++g_outbound_peers_with_protect_from_disconnect;
  1294. }
  1295. }
  1296. }
  1297. return true;
  1298. }
  1299. bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
  1300. {
  1301. LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
  1302. if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
  1303. {
  1304. LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
  1305. return true;
  1306. }
  1307. if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
  1308. (strCommand == NetMsgType::FILTERLOAD ||
  1309. strCommand == NetMsgType::FILTERADD))
  1310. {
  1311. if (pfrom->nVersion >= NO_BLOOM_VERSION) {
  1312. LOCK(cs_main);
  1313. Misbehaving(pfrom->GetId(), 100);
  1314. return false;
  1315. } else {
  1316. pfrom->fDisconnect = true;
  1317. return false;
  1318. }
  1319. }
  1320. if (strCommand == NetMsgType::REJECT)
  1321. {
  1322. if (LogAcceptCategory(BCLog::NET)) {
  1323. try {
  1324. std::string strMsg; unsigned char ccode; std::string strReason;
  1325. vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
  1326. std::ostringstream ss;
  1327. ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
  1328. if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
  1329. {
  1330. uint256 hash;
  1331. vRecv >> hash;
  1332. ss << ": hash " << hash.ToString();
  1333. }
  1334. LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
  1335. } catch (const std::ios_base::failure&) {
  1336. // Avoid feedback loops by preventing reject messages from triggering a new reject message.
  1337. LogPrint(BCLog::NET, "Unparseable reject message received\n");
  1338. }
  1339. }
  1340. }
  1341. else if (strCommand == NetMsgType::VERSION)
  1342. {
  1343. // Each connection can only send one version message
  1344. if (pfrom->nVersion != 0)
  1345. {
  1346. connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
  1347. LOCK(cs_main);
  1348. Misbehaving(pfrom->GetId(), 1);
  1349. return false;
  1350. }
  1351. int64_t nTime;
  1352. CAddress addrMe;
  1353. CAddress addrFrom;
  1354. uint64_t nNonce = 1;
  1355. uint64_t nServiceInt;
  1356. ServiceFlags nServices;
  1357. int nVersion;
  1358. int nSendVersion;
  1359. std::string strSubVer;
  1360. std::string cleanSubVer;
  1361. int nStartingHeight = -1;
  1362. bool fRelay = true;
  1363. vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
  1364. nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
  1365. nServices = ServiceFlags(nServiceInt);
  1366. if (!pfrom->fInbound)
  1367. {
  1368. connman->SetServices(pfrom->addr, nServices);
  1369. }
  1370. if (pfrom->nServicesExpected & ~nServices)
  1371. {
  1372. LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, pfrom->nServicesExpected);
  1373. connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
  1374. strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
  1375. pfrom->fDisconnect = true;
  1376. return false;
  1377. }
  1378. if (nServices & ((1 << 7) | (1 << 5))) {
  1379. if (GetTime() < 1533096000) {
  1380. // Immediately disconnect peers that use service bits 6 or 8 until August 1st, 2018
  1381. // These bits have been used as a flag to indicate that a node is running incompatible
  1382. // consensus rules instead of changing the network magic, so we're stuck disconnecting
  1383. // based on these service bits, at least for a while.
  1384. pfrom->fDisconnect = true;
  1385. return false;
  1386. }
  1387. }
  1388. if (nVersion < MIN_PEER_PROTO_VERSION)
  1389. {
  1390. // disconnect from peers older than this proto version
  1391. LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
  1392. connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
  1393. strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
  1394. pfrom->fDisconnect = true;
  1395. return false;
  1396. }
  1397. if (nVersion == 10300)
  1398. nVersion = 300;
  1399. if (!vRecv.empty())
  1400. vRecv >> addrFrom >> nNonce;
  1401. if (!vRecv.empty()) {
  1402. vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
  1403. cleanSubVer = SanitizeString(strSubVer);
  1404. }
  1405. if (!vRecv.empty()) {
  1406. vRecv >> nStartingHeight;
  1407. }
  1408. if (!vRecv.empty())
  1409. vRecv >> fRelay;
  1410. // Disconnect if we connected to ourself
  1411. if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
  1412. {
  1413. LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
  1414. pfrom->fDisconnect = true;
  1415. return true;
  1416. }
  1417. if (pfrom->fInbound && addrMe.IsRoutable())
  1418. {
  1419. SeenLocal(addrMe);
  1420. }
  1421. // Be shy and don't send version until we hear
  1422. if (pfrom->fInbound)
  1423. PushNodeVersion(pfrom, connman, GetAdjustedTime());
  1424. connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
  1425. pfrom->nServices = nServices;
  1426. pfrom->SetAddrLocal(addrMe);
  1427. {
  1428. LOCK(pfrom->cs_SubVer);
  1429. pfrom->strSubVer = strSubVer;
  1430. pfrom->cleanSubVer = cleanSubVer;
  1431. }
  1432. pfrom->nStartingHeight = nStartingHeight;
  1433. pfrom->fClient = !(nServices & NODE_NETWORK);
  1434. {
  1435. LOCK(pfrom->cs_filter);
  1436. pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message
  1437. }
  1438. // Change version
  1439. pfrom->SetSendVersion(nSendVersion);
  1440. pfrom->nVersion = nVersion;
  1441. if((nServices & NODE_WITNESS))
  1442. {
  1443. LOCK(cs_main);
  1444. State(pfrom->GetId())->fHaveWitness = true;
  1445. }
  1446. // Potentially mark this peer as a preferred download peer.
  1447. {
  1448. LOCK(cs_main);
  1449. UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
  1450. }
  1451. if (!pfrom->fInbound)
  1452. {
  1453. // Advertise our address
  1454. if (fListen && !IsInitialBlockDownload())
  1455. {
  1456. CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
  1457. FastRandomContext insecure_rand;
  1458. if (addr.IsRoutable())
  1459. {
  1460. LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
  1461. pfrom->PushAddress(addr, insecure_rand);
  1462. } else if (IsPeerAddrLocalGood(pfrom)) {
  1463. addr.SetIP(addrMe);
  1464. LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
  1465. pfrom->PushAddress(addr, insecure_rand);
  1466. }
  1467. }
  1468. // Get recent addresses
  1469. if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
  1470. {
  1471. connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
  1472. pfrom->fGetAddr = true;
  1473. }
  1474. connman->MarkAddressGood(pfrom->addr);
  1475. }
  1476. std::string remoteAddr;
  1477. if (fLogIPs)
  1478. remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
  1479. LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
  1480. cleanSubVer, pfrom->nVersion,
  1481. pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
  1482. remoteAddr);
  1483. int64_t nTimeOffset = nTime - GetTime();
  1484. pfrom->nTimeOffset = nTimeOffset;
  1485. AddTimeData(pfrom->addr, nTimeOffset);
  1486. // If the peer is old enough to have the old alert system, send it the final alert.
  1487. if (pfrom->nVersion <= 70012) {
  1488. CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
  1489. connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
  1490. }
  1491. // Feeler connections exist only to verify if address is online.
  1492. if (pfrom->fFeeler) {
  1493. assert(pfrom->fInbound == false);
  1494. pfrom->fDisconnect = true;
  1495. }
  1496. return true;
  1497. }
  1498. else if (pfrom->nVersion == 0)
  1499. {
  1500. // Must have a version message before anything else
  1501. LOCK(cs_main);
  1502. Misbehaving(pfrom->GetId(), 1);
  1503. return false;
  1504. }
  1505. // At this point, the outgoing message serialization version can't change.
  1506. const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
  1507. if (strCommand == NetMsgType::VERACK)
  1508. {
  1509. pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
  1510. if (!pfrom->fInbound) {
  1511. // Mark this node as currently connected, so we update its timestamp later.
  1512. LOCK(cs_main);
  1513. State(pfrom->GetId())->fCurrentlyConnected = true;
  1514. }
  1515. if (pfrom->nVersion >= SENDHEADERS_VERSION) {
  1516. // Tell our peer we prefer to receive headers rather than inv's
  1517. // We send this to non-NODE NETWORK peers as well, because even
  1518. // non-NODE NETWORK peers can announce blocks (such as pruning
  1519. // nodes)
  1520. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
  1521. }
  1522. if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
  1523. // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
  1524. // However, we do not request new block announcements using
  1525. // cmpctblock messages.
  1526. // We send this to non-NODE NETWORK peers as well, because
  1527. // they may wish to request compact blocks from us
  1528. bool fAnnounceUsingCMPCTBLOCK = false;
  1529. uint64_t nCMPCTBLOCKVersion = 2;
  1530. if (pfrom->GetLocalServices() & NODE_WITNESS)
  1531. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
  1532. nCMPCTBLOCKVersion = 1;
  1533. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
  1534. }
  1535. pfrom->fSuccessfullyConnected = true;
  1536. }
  1537. else if (!pfrom->fSuccessfullyConnected)
  1538. {
  1539. // Must have a verack message before anything else
  1540. LOCK(cs_main);
  1541. Misbehaving(pfrom->GetId(), 1);
  1542. return false;
  1543. }
  1544. else if (strCommand == NetMsgType::ADDR)
  1545. {
  1546. std::vector<CAddress> vAddr;
  1547. vRecv >> vAddr;
  1548. // Don't want addr from older versions unless seeding
  1549. if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
  1550. return true;
  1551. if (vAddr.size() > 1000)
  1552. {
  1553. LOCK(cs_main);
  1554. Misbehaving(pfrom->GetId(), 20);
  1555. return error("message addr size() = %u", vAddr.size());
  1556. }
  1557. // Store the new addresses
  1558. std::vector<CAddress> vAddrOk;
  1559. int64_t nNow = GetAdjustedTime();
  1560. int64_t nSince = nNow - 10 * 60;
  1561. for (CAddress& addr : vAddr)
  1562. {
  1563. if (interruptMsgProc)
  1564. return true;
  1565. if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
  1566. continue;
  1567. if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
  1568. addr.nTime = nNow - 5 * 24 * 60 * 60;
  1569. pfrom->AddAddressKnown(addr);
  1570. bool fReachable = IsReachable(addr);
  1571. if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
  1572. {
  1573. // Relay to a limited number of other nodes
  1574. RelayAddress(addr, fReachable, connman);
  1575. }
  1576. // Do not store addresses outside our network
  1577. if (fReachable)
  1578. vAddrOk.push_back(addr);
  1579. }
  1580. connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
  1581. if (vAddr.size() < 1000)
  1582. pfrom->fGetAddr = false;
  1583. if (pfrom->fOneShot)
  1584. pfrom->fDisconnect = true;
  1585. }
  1586. else if (strCommand == NetMsgType::SENDHEADERS)
  1587. {
  1588. LOCK(cs_main);
  1589. State(pfrom->GetId())->fPreferHeaders = true;
  1590. }
  1591. else if (strCommand == NetMsgType::SENDCMPCT)
  1592. {
  1593. bool fAnnounceUsingCMPCTBLOCK = false;
  1594. uint64_t nCMPCTBLOCKVersion = 0;
  1595. vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
  1596. if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
  1597. LOCK(cs_main);
  1598. // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
  1599. if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
  1600. State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
  1601. State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
  1602. }
  1603. if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
  1604. State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
  1605. if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
  1606. if (pfrom->GetLocalServices() & NODE_WITNESS)
  1607. State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
  1608. else
  1609. State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
  1610. }
  1611. }
  1612. }
  1613. else if (strCommand == NetMsgType::INV)
  1614. {
  1615. std::vector<CInv> vInv;
  1616. vRecv >> vInv;
  1617. if (vInv.size() > MAX_INV_SZ)
  1618. {
  1619. LOCK(cs_main);
  1620. Misbehaving(pfrom->GetId(), 20);
  1621. return error("message inv size() = %u", vInv.size());
  1622. }
  1623. bool fBlocksOnly = !fRelayTxes;
  1624. // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
  1625. if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
  1626. fBlocksOnly = false;
  1627. LOCK(cs_main);
  1628. uint32_t nFetchFlags = GetFetchFlags(pfrom);
  1629. for (CInv &inv : vInv)
  1630. {
  1631. if (interruptMsgProc)
  1632. return true;
  1633. bool fAlreadyHave = AlreadyHave(inv);
  1634. LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
  1635. if (inv.type == MSG_TX) {
  1636. inv.type |= nFetchFlags;
  1637. }
  1638. if (inv.type == MSG_BLOCK) {
  1639. UpdateBlockAvailability(pfrom->GetId(), inv.hash);
  1640. if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
  1641. // We used to request the full block here, but since headers-announcements are now the
  1642. // primary method of announcement on the network, and since, in the case that a node
  1643. // fell back to inv we probably have a reorg which we should get the headers for first,
  1644. // we now only provide a getheaders response here. When we receive the headers, we will
  1645. // then ask for the blocks we need.
  1646. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
  1647. LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
  1648. }
  1649. }
  1650. else
  1651. {
  1652. pfrom->AddInventoryKnown(inv);
  1653. if (fBlocksOnly) {
  1654. LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->GetId());
  1655. } else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
  1656. pfrom->AskFor(inv);
  1657. }
  1658. }
  1659. // Track requests for our stuff
  1660. GetMainSignals().Inventory(inv.hash);
  1661. }
  1662. }
  1663. else if (strCommand == NetMsgType::GETDATA)
  1664. {
  1665. std::vector<CInv> vInv;
  1666. vRecv >> vInv;
  1667. if (vInv.size() > MAX_INV_SZ)
  1668. {
  1669. LOCK(cs_main);
  1670. Misbehaving(pfrom->GetId(), 20);
  1671. return error("message getdata size() = %u", vInv.size());
  1672. }
  1673. LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
  1674. if (vInv.size() > 0) {
  1675. LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
  1676. }
  1677. pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
  1678. ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
  1679. }
  1680. else if (strCommand == NetMsgType::GETBLOCKS)
  1681. {
  1682. CBlockLocator locator;
  1683. uint256 hashStop;
  1684. vRecv >> locator >> hashStop;
  1685. // We might have announced the currently-being-connected tip using a
  1686. // compact block, which resulted in the peer sending a getblocks
  1687. // request, which we would otherwise respond to without the new block.
  1688. // To avoid this situation we simply verify that we are on our best
  1689. // known chain now. This is super overkill, but we handle it better
  1690. // for getheaders requests, and there are no known nodes which support
  1691. // compact blocks but still use getblocks to request blocks.
  1692. {
  1693. std::shared_ptr<const CBlock> a_recent_block;
  1694. {
  1695. LOCK(cs_most_recent_block);
  1696. a_recent_block = most_recent_block;
  1697. }
  1698. CValidationState dummy;
  1699. ActivateBestChain(dummy, Params(), a_recent_block);
  1700. }
  1701. LOCK(cs_main);
  1702. // Find the last block the caller has in the main chain
  1703. const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
  1704. // Send the rest of the chain
  1705. if (pindex)
  1706. pindex = chainActive.Next(pindex);
  1707. int nLimit = 500;
  1708. LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
  1709. for (; pindex; pindex = chainActive.Next(pindex))
  1710. {
  1711. if (pindex->GetBlockHash() == hashStop)
  1712. {
  1713. LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
  1714. break;
  1715. }
  1716. // If pruning, don't inv blocks unless we have on disk and are likely to still have
  1717. // for some reasonable time window (1 hour) that block relay might require.
  1718. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
  1719. if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
  1720. {
  1721. LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
  1722. break;
  1723. }
  1724. pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
  1725. if (--nLimit <= 0)
  1726. {
  1727. // When this block is requested, we'll send an inv that'll
  1728. // trigger the peer to getblocks the next batch of inventory.
  1729. LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
  1730. pfrom->hashContinue = pindex->GetBlockHash();
  1731. break;
  1732. }
  1733. }
  1734. }
  1735. else if (strCommand == NetMsgType::GETBLOCKTXN)
  1736. {
  1737. BlockTransactionsRequest req;
  1738. vRecv >> req;
  1739. std::shared_ptr<const CBlock> recent_block;
  1740. {
  1741. LOCK(cs_most_recent_block);
  1742. if (most_recent_block_hash == req.blockhash)
  1743. recent_block = most_recent_block;
  1744. // Unlock cs_most_recent_block to avoid cs_main lock inversion
  1745. }
  1746. if (recent_block) {
  1747. SendBlockTransactions(*recent_block, req, pfrom, connman);
  1748. return true;
  1749. }
  1750. LOCK(cs_main);
  1751. BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
  1752. if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
  1753. LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->GetId());
  1754. return true;
  1755. }
  1756. if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
  1757. // If an older block is requested (should never happen in practice,
  1758. // but can happen in tests) send a block response instead of a
  1759. // blocktxn response. Sending a full block response instead of a
  1760. // small blocktxn response is preferable in the case where a peer
  1761. // might maliciously send lots of getblocktxn requests to trigger
  1762. // expensive disk reads, because it will require the peer to
  1763. // actually receive all the data read from disk over the network.
  1764. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
  1765. CInv inv;
  1766. inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
  1767. inv.hash = req.blockhash;
  1768. pfrom->vRecvGetData.push_back(inv);
  1769. ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
  1770. return true;
  1771. }
  1772. CBlock block;
  1773. bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus());
  1774. assert(ret);
  1775. SendBlockTransactions(block, req, pfrom, connman);
  1776. }
  1777. else if (strCommand == NetMsgType::GETHEADERS)
  1778. {
  1779. CBlockLocator locator;
  1780. uint256 hashStop;
  1781. vRecv >> locator >> hashStop;
  1782. LOCK(cs_main);
  1783. if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
  1784. LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
  1785. return true;
  1786. }
  1787. CNodeState *nodestate = State(pfrom->GetId());
  1788. const CBlockIndex* pindex = nullptr;
  1789. if (locator.IsNull())
  1790. {
  1791. // If locator is null, return the hashStop block
  1792. BlockMap::iterator mi = mapBlockIndex.find(hashStop);
  1793. if (mi == mapBlockIndex.end())
  1794. return true;
  1795. pindex = (*mi).second;
  1796. }
  1797. else
  1798. {
  1799. // Find the last block the caller has in the main chain
  1800. pindex = FindForkInGlobalIndex(chainActive, locator);
  1801. if (pindex)
  1802. pindex = chainActive.Next(pindex);
  1803. }
  1804. // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
  1805. std::vector<CBlock> vHeaders;
  1806. int nLimit = MAX_HEADERS_RESULTS;
  1807. LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
  1808. for (; pindex; pindex = chainActive.Next(pindex))
  1809. {
  1810. vHeaders.push_back(pindex->GetBlockHeader());
  1811. if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
  1812. break;
  1813. }
  1814. // pindex can be nullptr either if we sent chainActive.Tip() OR
  1815. // if our peer has chainActive.Tip() (and thus we are sending an empty
  1816. // headers message). In both cases it's safe to update
  1817. // pindexBestHeaderSent to be our tip.
  1818. //
  1819. // It is important that we simply reset the BestHeaderSent value here,
  1820. // and not max(BestHeaderSent, newHeaderSent). We might have announced
  1821. // the currently-being-connected tip using a compact block, which
  1822. // resulted in the peer sending a headers request, which we respond to
  1823. // without the new block. By resetting the BestHeaderSent, we ensure we
  1824. // will re-announce the new block via headers (or compact blocks again)
  1825. // in the SendMessages logic.
  1826. nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
  1827. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
  1828. }
  1829. else if (strCommand == NetMsgType::TX)
  1830. {
  1831. // Stop processing the transaction early if
  1832. // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
  1833. if (!fRelayTxes && (!pfrom->fWhitelisted || !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
  1834. {
  1835. LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
  1836. return true;
  1837. }
  1838. std::deque<COutPoint> vWorkQueue;
  1839. std::vector<uint256> vEraseQueue;
  1840. CTransactionRef ptx;
  1841. vRecv >> ptx;
  1842. const CTransaction& tx = *ptx;
  1843. CInv inv(MSG_TX, tx.GetHash());
  1844. pfrom->AddInventoryKnown(inv);
  1845. LOCK(cs_main);
  1846. bool fMissingInputs = false;
  1847. CValidationState state;
  1848. pfrom->setAskFor.erase(inv.hash);
  1849. mapAlreadyAskedFor.erase(inv.hash);
  1850. std::list<CTransactionRef> lRemovedTxn;
  1851. if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, ptx, true, &fMissingInputs, &lRemovedTxn)) {
  1852. mempool.check(pcoinsTip);
  1853. RelayTransaction(tx, connman);
  1854. for (unsigned int i = 0; i < tx.vout.size(); i++) {
  1855. vWorkQueue.emplace_back(inv.hash, i);
  1856. }
  1857. pfrom->nLastTXTime = GetTime();
  1858. LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
  1859. pfrom->GetId(),
  1860. tx.GetHash().ToString(),
  1861. mempool.size(), mempool.DynamicMemoryUsage() / 1000);
  1862. // Recursively process any orphan transactions that depended on this one
  1863. std::set<NodeId> setMisbehaving;
  1864. while (!vWorkQueue.empty()) {
  1865. auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front());
  1866. vWorkQueue.pop_front();
  1867. if (itByPrev == mapOrphanTransactionsByPrev.end())
  1868. continue;
  1869. for (auto mi = itByPrev->second.begin();
  1870. mi != itByPrev->second.end();
  1871. ++mi)
  1872. {
  1873. const CTransactionRef& porphanTx = (*mi)->second.tx;
  1874. const CTransaction& orphanTx = *porphanTx;
  1875. const uint256& orphanHash = orphanTx.GetHash();
  1876. NodeId fromPeer = (*mi)->second.fromPeer;
  1877. bool fMissingInputs2 = false;
  1878. // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
  1879. // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
  1880. // anyone relaying LegitTxX banned)
  1881. CValidationState stateDummy;
  1882. if (setMisbehaving.count(fromPeer))
  1883. continue;
  1884. if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) {
  1885. LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
  1886. RelayTransaction(orphanTx, connman);
  1887. for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
  1888. vWorkQueue.emplace_back(orphanHash, i);
  1889. }
  1890. vEraseQueue.push_back(orphanHash);
  1891. }
  1892. else if (!fMissingInputs2)
  1893. {
  1894. int nDos = 0;
  1895. if (stateDummy.IsInvalid(nDos) && nDos > 0)
  1896. {
  1897. // Punish peer that gave us an invalid orphan tx
  1898. Misbehaving(fromPeer, nDos);
  1899. setMisbehaving.insert(fromPeer);
  1900. LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
  1901. }
  1902. // Has inputs but not accepted to mempool
  1903. // Probably non-standard or insufficient fee
  1904. LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
  1905. vEraseQueue.push_back(orphanHash);
  1906. if (!orphanTx.HasWitness() && !stateDummy.CorruptionPossible()) {
  1907. // Do not use rejection cache for witness transactions or
  1908. // witness-stripped transactions, as they can have been malleated.
  1909. // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
  1910. assert(recentRejects);
  1911. recentRejects->insert(orphanHash);
  1912. }
  1913. }
  1914. mempool.check(pcoinsTip);
  1915. }
  1916. }
  1917. for (uint256 hash : vEraseQueue)
  1918. EraseOrphanTx(hash);
  1919. }
  1920. else if (fMissingInputs)
  1921. {
  1922. bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
  1923. for (const CTxIn& txin : tx.vin) {
  1924. if (recentRejects->contains(txin.prevout.hash)) {
  1925. fRejectedParents = true;
  1926. break;
  1927. }
  1928. }
  1929. if (!fRejectedParents) {
  1930. uint32_t nFetchFlags = GetFetchFlags(pfrom);
  1931. for (const CTxIn& txin : tx.vin) {
  1932. CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
  1933. pfrom->AddInventoryKnown(_inv);
  1934. if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
  1935. }
  1936. AddOrphanTx(ptx, pfrom->GetId());
  1937. // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
  1938. unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
  1939. unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
  1940. if (nEvicted > 0) {
  1941. LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
  1942. }
  1943. } else {
  1944. LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
  1945. // We will continue to reject this tx since it has rejected
  1946. // parents so avoid re-requesting it from other peers.
  1947. recentRejects->insert(tx.GetHash());
  1948. }
  1949. } else {
  1950. if (!tx.HasWitness() && !state.CorruptionPossible()) {
  1951. // Do not use rejection cache for witness transactions or
  1952. // witness-stripped transactions, as they can have been malleated.
  1953. // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
  1954. assert(recentRejects);
  1955. recentRejects->insert(tx.GetHash());
  1956. if (RecursiveDynamicUsage(*ptx) < 100000) {
  1957. AddToCompactExtraTransactions(ptx);
  1958. }
  1959. } else if (tx.HasWitness() && RecursiveDynamicUsage(*ptx) < 100000) {
  1960. AddToCompactExtraTransactions(ptx);
  1961. }
  1962. if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
  1963. // Always relay transactions received from whitelisted peers, even
  1964. // if they were already in the mempool or rejected from it due
  1965. // to policy, allowing the node to function as a gateway for
  1966. // nodes hidden behind it.
  1967. //
  1968. // Never relay transactions that we would assign a non-zero DoS
  1969. // score for, as we expect peers to do the same with us in that
  1970. // case.
  1971. int nDoS = 0;
  1972. if (!state.IsInvalid(nDoS) || nDoS == 0) {
  1973. LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
  1974. RelayTransaction(tx, connman);
  1975. } else {
  1976. LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
  1977. }
  1978. }
  1979. }
  1980. for (const CTransactionRef& removedTx : lRemovedTxn)
  1981. AddToCompactExtraTransactions(removedTx);
  1982. int nDoS = 0;
  1983. if (state.IsInvalid(nDoS))
  1984. {
  1985. LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
  1986. pfrom->GetId(),
  1987. FormatStateMessage(state));
  1988. if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
  1989. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
  1990. state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
  1991. if (nDoS > 0) {
  1992. Misbehaving(pfrom->GetId(), nDoS);
  1993. }
  1994. }
  1995. }
  1996. else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
  1997. {
  1998. CBlockHeaderAndShortTxIDs cmpctblock;
  1999. vRecv >> cmpctblock;
  2000. bool received_new_header = false;
  2001. {
  2002. LOCK(cs_main);
  2003. if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
  2004. // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
  2005. if (!IsInitialBlockDownload())
  2006. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
  2007. return true;
  2008. }
  2009. if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) {
  2010. received_new_header = true;
  2011. }
  2012. }
  2013. const CBlockIndex *pindex = nullptr;
  2014. CValidationState state;
  2015. if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
  2016. int nDoS;
  2017. if (state.IsInvalid(nDoS)) {
  2018. if (nDoS > 0) {
  2019. LOCK(cs_main);
  2020. Misbehaving(pfrom->GetId(), nDoS);
  2021. }
  2022. LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId());
  2023. return true;
  2024. }
  2025. }
  2026. // When we succeed in decoding a block's txids from a cmpctblock
  2027. // message we typically jump to the BLOCKTXN handling code, with a
  2028. // dummy (empty) BLOCKTXN message, to re-use the logic there in
  2029. // completing processing of the putative block (without cs_main).
  2030. bool fProcessBLOCKTXN = false;
  2031. CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
  2032. // If we end up treating this as a plain headers message, call that as well
  2033. // without cs_main.
  2034. bool fRevertToHeaderProcessing = false;
  2035. // Keep a CBlock for "optimistic" compactblock reconstructions (see
  2036. // below)
  2037. std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
  2038. bool fBlockReconstructed = false;
  2039. {
  2040. LOCK(cs_main);
  2041. // If AcceptBlockHeader returned true, it set pindex
  2042. assert(pindex);
  2043. UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
  2044. CNodeState *nodestate = State(pfrom->GetId());
  2045. // If this was a new header with more work than our tip, update the
  2046. // peer's last block announcement time
  2047. if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
  2048. nodestate->m_last_block_announcement = GetTime();
  2049. }
  2050. std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
  2051. bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
  2052. if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
  2053. return true;
  2054. if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
  2055. pindex->nTx != 0) { // We had this block at some point, but pruned it
  2056. if (fAlreadyInFlight) {
  2057. // We requested this block for some reason, but our mempool will probably be useless
  2058. // so we just grab the block via normal getdata
  2059. std::vector<CInv> vInv(1);
  2060. vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
  2061. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
  2062. }
  2063. return true;
  2064. }
  2065. // If we're not close to tip yet, give up and let parallel block fetch work its magic
  2066. if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
  2067. return true;
  2068. if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
  2069. // Don't bother trying to process compact blocks from v1 peers
  2070. // after segwit activates.
  2071. return true;
  2072. }
  2073. // We want to be a bit conservative just to be extra careful about DoS
  2074. // possibilities in compact block processing...
  2075. if (pindex->nHeight <= chainActive.Height() + 2) {
  2076. if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
  2077. (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
  2078. std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
  2079. if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
  2080. if (!(*queuedBlockIt)->partialBlock)
  2081. (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
  2082. else {
  2083. // The block was already in flight using compact blocks from the same peer
  2084. LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
  2085. return true;
  2086. }
  2087. }
  2088. PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
  2089. ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
  2090. if (status == READ_STATUS_INVALID) {
  2091. MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
  2092. Misbehaving(pfrom->GetId(), 100);
  2093. LogPrintf("Peer %d sent us invalid compact block\n", pfrom->GetId());
  2094. return true;
  2095. } else if (status == READ_STATUS_FAILED) {
  2096. // Duplicate txindexes, the block is now in-flight, so just request it
  2097. std::vector<CInv> vInv(1);
  2098. vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
  2099. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
  2100. return true;
  2101. }
  2102. BlockTransactionsRequest req;
  2103. for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
  2104. if (!partialBlock.IsTxAvailable(i))
  2105. req.indexes.push_back(i);
  2106. }
  2107. if (req.indexes.empty()) {
  2108. // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
  2109. BlockTransactions txn;
  2110. txn.blockhash = cmpctblock.header.GetHash();
  2111. blockTxnMsg << txn;
  2112. fProcessBLOCKTXN = true;
  2113. } else {
  2114. req.blockhash = pindex->GetBlockHash();
  2115. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
  2116. }
  2117. } else {
  2118. // This block is either already in flight from a different
  2119. // peer, or this peer has too many blocks outstanding to
  2120. // download from.
  2121. // Optimistically try to reconstruct anyway since we might be
  2122. // able to without any round trips.
  2123. PartiallyDownloadedBlock tempBlock(&mempool);
  2124. ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
  2125. if (status != READ_STATUS_OK) {
  2126. // TODO: don't ignore failures
  2127. return true;
  2128. }
  2129. std::vector<CTransactionRef> dummy;
  2130. status = tempBlock.FillBlock(*pblock, dummy);
  2131. if (status == READ_STATUS_OK) {
  2132. fBlockReconstructed = true;
  2133. }
  2134. }
  2135. } else {
  2136. if (fAlreadyInFlight) {
  2137. // We requested this block, but its far into the future, so our
  2138. // mempool will probably be useless - request the block normally
  2139. std::vector<CInv> vInv(1);
  2140. vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
  2141. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
  2142. return true;
  2143. } else {
  2144. // If this was an announce-cmpctblock, we want the same treatment as a header message
  2145. fRevertToHeaderProcessing = true;
  2146. }
  2147. }
  2148. } // cs_main
  2149. if (fProcessBLOCKTXN)
  2150. return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
  2151. if (fRevertToHeaderProcessing) {
  2152. // Headers received from HB compact block peers are permitted to be
  2153. // relayed before full validation (see BIP 152), so we don't want to disconnect
  2154. // the peer if the header turns out to be for an invalid block.
  2155. // Note that if a peer tries to build on an invalid chain, that
  2156. // will be detected and the peer will be banned.
  2157. return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false);
  2158. }
  2159. if (fBlockReconstructed) {
  2160. // If we got here, we were able to optimistically reconstruct a
  2161. // block that is in flight from some other peer.
  2162. {
  2163. LOCK(cs_main);
  2164. mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
  2165. }
  2166. bool fNewBlock = false;
  2167. // Setting fForceProcessing to true means that we bypass some of
  2168. // our anti-DoS protections in AcceptBlock, which filters
  2169. // unrequested blocks that might be trying to waste our resources
  2170. // (eg disk space). Because we only try to reconstruct blocks when
  2171. // we're close to caught up (via the CanDirectFetch() requirement
  2172. // above, combined with the behavior of not requesting blocks until
  2173. // we have a chain with at least nMinimumChainWork), and we ignore
  2174. // compact blocks with less work than our tip, it is safe to treat
  2175. // reconstructed compact blocks as having been requested.
  2176. ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
  2177. if (fNewBlock) {
  2178. pfrom->nLastBlockTime = GetTime();
  2179. } else {
  2180. LOCK(cs_main);
  2181. mapBlockSource.erase(pblock->GetHash());
  2182. }
  2183. LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
  2184. if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
  2185. // Clear download state for this block, which is in
  2186. // process from some other peer. We do this after calling
  2187. // ProcessNewBlock so that a malleated cmpctblock announcement
  2188. // can't be used to interfere with block relay.
  2189. MarkBlockAsReceived(pblock->GetHash());
  2190. }
  2191. }
  2192. }
  2193. else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
  2194. {
  2195. BlockTransactions resp;
  2196. vRecv >> resp;
  2197. std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
  2198. bool fBlockRead = false;
  2199. {
  2200. LOCK(cs_main);
  2201. std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
  2202. if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
  2203. it->second.first != pfrom->GetId()) {
  2204. LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
  2205. return true;
  2206. }
  2207. PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
  2208. ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
  2209. if (status == READ_STATUS_INVALID) {
  2210. MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
  2211. Misbehaving(pfrom->GetId(), 100);
  2212. LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId());
  2213. return true;
  2214. } else if (status == READ_STATUS_FAILED) {
  2215. // Might have collided, fall back to getdata now :(
  2216. std::vector<CInv> invs;
  2217. invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
  2218. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
  2219. } else {
  2220. // Block is either okay, or possibly we received
  2221. // READ_STATUS_CHECKBLOCK_FAILED.
  2222. // Note that CheckBlock can only fail for one of a few reasons:
  2223. // 1. bad-proof-of-work (impossible here, because we've already
  2224. // accepted the header)
  2225. // 2. merkleroot doesn't match the transactions given (already
  2226. // caught in FillBlock with READ_STATUS_FAILED, so
  2227. // impossible here)
  2228. // 3. the block is otherwise invalid (eg invalid coinbase,
  2229. // block is too big, too many legacy sigops, etc).
  2230. // So if CheckBlock failed, #3 is the only possibility.
  2231. // Under BIP 152, we don't DoS-ban unless proof of work is
  2232. // invalid (we don't require all the stateless checks to have
  2233. // been run). This is handled below, so just treat this as
  2234. // though the block was successfully read, and rely on the
  2235. // handling in ProcessNewBlock to ensure the block index is
  2236. // updated, reject messages go out, etc.
  2237. MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
  2238. fBlockRead = true;
  2239. // mapBlockSource is only used for sending reject messages and DoS scores,
  2240. // so the race between here and cs_main in ProcessNewBlock is fine.
  2241. // BIP 152 permits peers to relay compact blocks after validating
  2242. // the header only; we should not punish peers if the block turns
  2243. // out to be invalid.
  2244. mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
  2245. }
  2246. } // Don't hold cs_main when we call into ProcessNewBlock
  2247. if (fBlockRead) {
  2248. bool fNewBlock = false;
  2249. // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
  2250. // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
  2251. // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
  2252. // disk-space attacks), but this should be safe due to the
  2253. // protections in the compact block handler -- see related comment
  2254. // in compact block optimistic reconstruction handling.
  2255. ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
  2256. if (fNewBlock) {
  2257. pfrom->nLastBlockTime = GetTime();
  2258. } else {
  2259. LOCK(cs_main);
  2260. mapBlockSource.erase(pblock->GetHash());
  2261. }
  2262. }
  2263. }
  2264. else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
  2265. {
  2266. std::vector<CBlockHeader> headers;
  2267. // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
  2268. unsigned int nCount = ReadCompactSize(vRecv);
  2269. if (nCount > MAX_HEADERS_RESULTS) {
  2270. LOCK(cs_main);
  2271. Misbehaving(pfrom->GetId(), 20);
  2272. return error("headers message size = %u", nCount);
  2273. }
  2274. headers.resize(nCount);
  2275. for (unsigned int n = 0; n < nCount; n++) {
  2276. vRecv >> headers[n];
  2277. ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
  2278. }
  2279. // Headers received via a HEADERS message should be valid, and reflect
  2280. // the chain the peer is on. If we receive a known-invalid header,
  2281. // disconnect the peer if it is using one of our outbound connection
  2282. // slots.
  2283. bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection;
  2284. return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish);
  2285. }
  2286. else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
  2287. {
  2288. std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
  2289. vRecv >> *pblock;
  2290. LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
  2291. bool forceProcessing = false;
  2292. const uint256 hash(pblock->GetHash());
  2293. {
  2294. LOCK(cs_main);
  2295. // Also always process if we requested the block explicitly, as we may
  2296. // need it even though it is not a candidate for a new best tip.
  2297. forceProcessing |= MarkBlockAsReceived(hash);
  2298. // mapBlockSource is only used for sending reject messages and DoS scores,
  2299. // so the race between here and cs_main in ProcessNewBlock is fine.
  2300. mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
  2301. }
  2302. bool fNewBlock = false;
  2303. ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
  2304. if (fNewBlock) {
  2305. pfrom->nLastBlockTime = GetTime();
  2306. } else {
  2307. LOCK(cs_main);
  2308. mapBlockSource.erase(pblock->GetHash());
  2309. }
  2310. }
  2311. else if (strCommand == NetMsgType::GETADDR)
  2312. {
  2313. // This asymmetric behavior for inbound and outbound connections was introduced
  2314. // to prevent a fingerprinting attack: an attacker can send specific fake addresses
  2315. // to users' AddrMan and later request them by sending getaddr messages.
  2316. // Making nodes which are behind NAT and can only make outgoing connections ignore
  2317. // the getaddr message mitigates the attack.
  2318. if (!pfrom->fInbound) {
  2319. LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
  2320. return true;
  2321. }
  2322. // Only send one GetAddr response per connection to reduce resource waste
  2323. // and discourage addr stamping of INV announcements.
  2324. if (pfrom->fSentAddr) {
  2325. LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
  2326. return true;
  2327. }
  2328. pfrom->fSentAddr = true;
  2329. pfrom->vAddrToSend.clear();
  2330. std::vector<CAddress> vAddr = connman->GetAddresses();
  2331. FastRandomContext insecure_rand;
  2332. for (const CAddress &addr : vAddr)
  2333. pfrom->PushAddress(addr, insecure_rand);
  2334. }
  2335. else if (strCommand == NetMsgType::MEMPOOL)
  2336. {
  2337. if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
  2338. {
  2339. LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
  2340. pfrom->fDisconnect = true;
  2341. return true;
  2342. }
  2343. if (connman->OutboundTargetReached(false) && !pfrom->fWhitelisted)
  2344. {
  2345. LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
  2346. pfrom->fDisconnect = true;
  2347. return true;
  2348. }
  2349. LOCK(pfrom->cs_inventory);
  2350. pfrom->fSendMempool = true;
  2351. }
  2352. else if (strCommand == NetMsgType::PING)
  2353. {
  2354. if (pfrom->nVersion > BIP0031_VERSION)
  2355. {
  2356. uint64_t nonce = 0;
  2357. vRecv >> nonce;
  2358. // Echo the message back with the nonce. This allows for two useful features:
  2359. //
  2360. // 1) A remote node can quickly check if the connection is operational
  2361. // 2) Remote nodes can measure the latency of the network thread. If this node
  2362. // is overloaded it won't respond to pings quickly and the remote node can
  2363. // avoid sending us more work, like chain download requests.
  2364. //
  2365. // The nonce stops the remote getting confused between different pings: without
  2366. // it, if the remote node sends a ping once per second and this node takes 5
  2367. // seconds to respond to each, the 5th ping the remote sends would appear to
  2368. // return very quickly.
  2369. connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
  2370. }
  2371. }
  2372. else if (strCommand == NetMsgType::PONG)
  2373. {
  2374. int64_t pingUsecEnd = nTimeReceived;
  2375. uint64_t nonce = 0;
  2376. size_t nAvail = vRecv.in_avail();
  2377. bool bPingFinished = false;
  2378. std::string sProblem;
  2379. if (nAvail >= sizeof(nonce)) {
  2380. vRecv >> nonce;
  2381. // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
  2382. if (pfrom->nPingNonceSent != 0) {
  2383. if (nonce == pfrom->nPingNonceSent) {
  2384. // Matching pong received, this ping is no longer outstanding
  2385. bPingFinished = true;
  2386. int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
  2387. if (pingUsecTime > 0) {
  2388. // Successful ping time measurement, replace previous
  2389. pfrom->nPingUsecTime = pingUsecTime;
  2390. pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime.load(), pingUsecTime);
  2391. } else {
  2392. // This should never happen
  2393. sProblem = "Timing mishap";
  2394. }
  2395. } else {
  2396. // Nonce mismatches are normal when pings are overlapping
  2397. sProblem = "Nonce mismatch";
  2398. if (nonce == 0) {
  2399. // This is most likely a bug in another implementation somewhere; cancel this ping
  2400. bPingFinished = true;
  2401. sProblem = "Nonce zero";
  2402. }
  2403. }
  2404. } else {
  2405. sProblem = "Unsolicited pong without ping";
  2406. }
  2407. } else {
  2408. // This is most likely a bug in another implementation somewhere; cancel this ping
  2409. bPingFinished = true;
  2410. sProblem = "Short payload";
  2411. }
  2412. if (!(sProblem.empty())) {
  2413. LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
  2414. pfrom->GetId(),
  2415. sProblem,
  2416. pfrom->nPingNonceSent,
  2417. nonce,
  2418. nAvail);
  2419. }
  2420. if (bPingFinished) {
  2421. pfrom->nPingNonceSent = 0;
  2422. }
  2423. }
  2424. else if (strCommand == NetMsgType::FILTERLOAD)
  2425. {
  2426. CBloomFilter filter;
  2427. vRecv >> filter;
  2428. if (!filter.IsWithinSizeConstraints())
  2429. {
  2430. // There is no excuse for sending a too-large filter
  2431. LOCK(cs_main);
  2432. Misbehaving(pfrom->GetId(), 100);
  2433. }
  2434. else
  2435. {
  2436. LOCK(pfrom->cs_filter);
  2437. delete pfrom->pfilter;
  2438. pfrom->pfilter = new CBloomFilter(filter);
  2439. pfrom->pfilter->UpdateEmptyFull();
  2440. pfrom->fRelayTxes = true;
  2441. }
  2442. }
  2443. else if (strCommand == NetMsgType::FILTERADD)
  2444. {
  2445. std::vector<unsigned char> vData;
  2446. vRecv >> vData;
  2447. // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
  2448. // and thus, the maximum size any matched object can have) in a filteradd message
  2449. bool bad = false;
  2450. if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
  2451. bad = true;
  2452. } else {
  2453. LOCK(pfrom->cs_filter);
  2454. if (pfrom->pfilter) {
  2455. pfrom->pfilter->insert(vData);
  2456. } else {
  2457. bad = true;
  2458. }
  2459. }
  2460. if (bad) {
  2461. LOCK(cs_main);
  2462. Misbehaving(pfrom->GetId(), 100);
  2463. }
  2464. }
  2465. else if (strCommand == NetMsgType::FILTERCLEAR)
  2466. {
  2467. LOCK(pfrom->cs_filter);
  2468. if (pfrom->GetLocalServices() & NODE_BLOOM) {
  2469. delete pfrom->pfilter;
  2470. pfrom->pfilter = new CBloomFilter();
  2471. }
  2472. pfrom->fRelayTxes = true;
  2473. }
  2474. else if (strCommand == NetMsgType::FEEFILTER) {
  2475. CAmount newFeeFilter = 0;
  2476. vRecv >> newFeeFilter;
  2477. if (MoneyRange(newFeeFilter)) {
  2478. {
  2479. LOCK(pfrom->cs_feeFilter);
  2480. pfrom->minFeeFilter = newFeeFilter;
  2481. }
  2482. LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
  2483. }
  2484. }
  2485. else if (strCommand == NetMsgType::NOTFOUND) {
  2486. // We do not care about the NOTFOUND message, but logging an Unknown Command
  2487. // message would be undesirable as we transmit it ourselves.
  2488. }
  2489. else {
  2490. // Ignore unknown commands for extensibility
  2491. LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
  2492. }
  2493. return true;
  2494. }
  2495. static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman* connman)
  2496. {
  2497. AssertLockHeld(cs_main);
  2498. CNodeState &state = *State(pnode->GetId());
  2499. for (const CBlockReject& reject : state.rejects) {
  2500. connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
  2501. }
  2502. state.rejects.clear();
  2503. if (state.fShouldBan) {
  2504. state.fShouldBan = false;
  2505. if (pnode->fWhitelisted)
  2506. LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString());
  2507. else if (pnode->m_manual_connection)
  2508. LogPrintf("Warning: not punishing addnoded peer %s!\n", pnode->addr.ToString());
  2509. else {
  2510. pnode->fDisconnect = true;
  2511. if (pnode->addr.IsLocal())
  2512. LogPrintf("Warning: not banning local peer %s!\n", pnode->addr.ToString());
  2513. else
  2514. {
  2515. connman->Ban(pnode->addr, BanReasonNodeMisbehaving);
  2516. }
  2517. }
  2518. return true;
  2519. }
  2520. return false;
  2521. }
  2522. bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
  2523. {
  2524. const CChainParams& chainparams = Params();
  2525. //
  2526. // Message format
  2527. // (4) message start
  2528. // (12) command
  2529. // (4) size
  2530. // (4) checksum
  2531. // (x) data
  2532. //
  2533. bool fMoreWork = false;
  2534. if (!pfrom->vRecvGetData.empty())
  2535. ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
  2536. if (pfrom->fDisconnect)
  2537. return false;
  2538. // this maintains the order of responses
  2539. if (!pfrom->vRecvGetData.empty()) return true;
  2540. // Don't bother if send buffer is too full to respond anyway
  2541. if (pfrom->fPauseSend)
  2542. return false;
  2543. std::list<CNetMessage> msgs;
  2544. {
  2545. LOCK(pfrom->cs_vProcessMsg);
  2546. if (pfrom->vProcessMsg.empty())
  2547. return false;
  2548. // Just take one message
  2549. msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
  2550. pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
  2551. pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
  2552. fMoreWork = !pfrom->vProcessMsg.empty();
  2553. }
  2554. CNetMessage& msg(msgs.front());
  2555. msg.SetVersion(pfrom->GetRecvVersion());
  2556. // Scan for message start
  2557. if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
  2558. LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
  2559. pfrom->fDisconnect = true;
  2560. return false;
  2561. }
  2562. // Read header
  2563. CMessageHeader& hdr = msg.hdr;
  2564. if (!hdr.IsValid(chainparams.MessageStart()))
  2565. {
  2566. LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->GetId());
  2567. return fMoreWork;
  2568. }
  2569. std::string strCommand = hdr.GetCommand();
  2570. // Message size
  2571. unsigned int nMessageSize = hdr.nMessageSize;
  2572. // Checksum
  2573. CDataStream& vRecv = msg.vRecv;
  2574. const uint256& hash = msg.GetMessageHash();
  2575. if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
  2576. {
  2577. LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
  2578. SanitizeString(strCommand), nMessageSize,
  2579. HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
  2580. HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
  2581. return fMoreWork;
  2582. }
  2583. // Process message
  2584. bool fRet = false;
  2585. try
  2586. {
  2587. fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc);
  2588. if (interruptMsgProc)
  2589. return false;
  2590. if (!pfrom->vRecvGetData.empty())
  2591. fMoreWork = true;
  2592. }
  2593. catch (const std::ios_base::failure& e)
  2594. {
  2595. connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message")));
  2596. if (strstr(e.what(), "end of data"))
  2597. {
  2598. // Allow exceptions from under-length message on vRecv
  2599. LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
  2600. }
  2601. else if (strstr(e.what(), "size too large"))
  2602. {
  2603. // Allow exceptions from over-long size
  2604. LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
  2605. }
  2606. else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
  2607. {
  2608. // Allow exceptions from non-canonical encoding
  2609. LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
  2610. }
  2611. else
  2612. {
  2613. PrintExceptionContinue(&e, "ProcessMessages()");
  2614. }
  2615. }
  2616. catch (const std::exception& e) {
  2617. PrintExceptionContinue(&e, "ProcessMessages()");
  2618. } catch (...) {
  2619. PrintExceptionContinue(nullptr, "ProcessMessages()");
  2620. }
  2621. if (!fRet) {
  2622. LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
  2623. }
  2624. LOCK(cs_main);
  2625. SendRejectsAndCheckIfBanned(pfrom, connman);
  2626. return fMoreWork;
  2627. }
  2628. void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
  2629. {
  2630. AssertLockHeld(cs_main);
  2631. CNodeState &state = *State(pto->GetId());
  2632. const CNetMsgMaker msgMaker(pto->GetSendVersion());
  2633. if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
  2634. // This is an outbound peer subject to disconnection if they don't
  2635. // announce a block with as much work as the current tip within
  2636. // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
  2637. // their chain has more work than ours, we should sync to it,
  2638. // unless it's invalid, in which case we should find that out and
  2639. // disconnect from them elsewhere).
  2640. if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) {
  2641. if (state.m_chain_sync.m_timeout != 0) {
  2642. state.m_chain_sync.m_timeout = 0;
  2643. state.m_chain_sync.m_work_header = nullptr;
  2644. state.m_chain_sync.m_sent_getheaders = false;
  2645. }
  2646. } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
  2647. // Our best block known by this peer is behind our tip, and we're either noticing
  2648. // that for the first time, OR this peer was able to catch up to some earlier point
  2649. // where we checked against our tip.
  2650. // Either way, set a new timeout based on current tip.
  2651. state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
  2652. state.m_chain_sync.m_work_header = chainActive.Tip();
  2653. state.m_chain_sync.m_sent_getheaders = false;
  2654. } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
  2655. // No evidence yet that our peer has synced to a chain with work equal to that
  2656. // of our tip, when we first detected it was behind. Send a single getheaders
  2657. // message to give the peer a chance to update us.
  2658. if (state.m_chain_sync.m_sent_getheaders) {
  2659. // They've run out of time to catch up!
  2660. LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
  2661. pto->fDisconnect = true;
  2662. } else {
  2663. LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
  2664. connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
  2665. state.m_chain_sync.m_sent_getheaders = true;
  2666. constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
  2667. // Bump the timeout to allow a response, which could clear the timeout
  2668. // (if the response shows the peer has synced), reset the timeout (if
  2669. // the peer syncs to the required work but not to our tip), or result
  2670. // in disconnect (if we advance to the timeout and pindexBestKnownBlock
  2671. // has not sufficiently progressed)
  2672. state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
  2673. }
  2674. }
  2675. }
  2676. }
  2677. void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
  2678. {
  2679. // Check whether we have too many outbound peers
  2680. int extra_peers = connman->GetExtraOutboundCount();
  2681. if (extra_peers > 0) {
  2682. // If we have more outbound peers than we target, disconnect one.
  2683. // Pick the outbound peer that least recently announced
  2684. // us a new block, with ties broken by choosing the more recent
  2685. // connection (higher node id)
  2686. NodeId worst_peer = -1;
  2687. int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
  2688. LOCK(cs_main);
  2689. connman->ForEachNode([&](CNode* pnode) {
  2690. // Ignore non-outbound peers, or nodes marked for disconnect already
  2691. if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
  2692. CNodeState *state = State(pnode->GetId());
  2693. if (state == nullptr) return; // shouldn't be possible, but just in case
  2694. // Don't evict our protected peers
  2695. if (state->m_chain_sync.m_protect) return;
  2696. if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
  2697. worst_peer = pnode->GetId();
  2698. oldest_block_announcement = state->m_last_block_announcement;
  2699. }
  2700. });
  2701. if (worst_peer != -1) {
  2702. bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
  2703. // Only disconnect a peer that has been connected to us for
  2704. // some reasonable fraction of our check-frequency, to give
  2705. // it time for new information to have arrived.
  2706. // Also don't disconnect any peer we're trying to download a
  2707. // block from.
  2708. CNodeState &state = *State(pnode->GetId());
  2709. if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
  2710. LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
  2711. pnode->fDisconnect = true;
  2712. return true;
  2713. } else {
  2714. LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
  2715. return false;
  2716. }
  2717. });
  2718. if (disconnected) {
  2719. // If we disconnected an extra peer, that means we successfully
  2720. // connected to at least one peer after the last time we
  2721. // detected a stale tip. Don't try any more extra peers until
  2722. // we next detect a stale tip, to limit the load we put on the
  2723. // network from these extra connections.
  2724. connman->SetTryNewOutboundPeer(false);
  2725. }
  2726. }
  2727. }
  2728. }
  2729. void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
  2730. {
  2731. if (connman == nullptr) return;
  2732. int64_t time_in_seconds = GetTime();
  2733. EvictExtraOutboundPeers(time_in_seconds);
  2734. if (time_in_seconds > m_stale_tip_check_time) {
  2735. LOCK(cs_main);
  2736. // Check whether our tip is stale, and if so, allow using an extra
  2737. // outbound peer
  2738. if (TipMayBeStale(consensusParams)) {
  2739. LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
  2740. connman->SetTryNewOutboundPeer(true);
  2741. } else if (connman->GetTryNewOutboundPeer()) {
  2742. connman->SetTryNewOutboundPeer(false);
  2743. }
  2744. m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
  2745. }
  2746. }
  2747. class CompareInvMempoolOrder
  2748. {
  2749. CTxMemPool *mp;
  2750. public:
  2751. CompareInvMempoolOrder(CTxMemPool *_mempool)
  2752. {
  2753. mp = _mempool;
  2754. }
  2755. bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
  2756. {
  2757. /* As std::make_heap produces a max-heap, we want the entries with the
  2758. * fewest ancestors/highest fee to sort later. */
  2759. return mp->CompareDepthAndScore(*b, *a);
  2760. }
  2761. };
  2762. bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptMsgProc)
  2763. {
  2764. const Consensus::Params& consensusParams = Params().GetConsensus();
  2765. {
  2766. // Don't send anything until the version handshake is complete
  2767. if (!pto->fSuccessfullyConnected || pto->fDisconnect)
  2768. return true;
  2769. // If we get here, the outgoing message serialization version is set and can't change.
  2770. const CNetMsgMaker msgMaker(pto->GetSendVersion());
  2771. //
  2772. // Message: ping
  2773. //
  2774. bool pingSend = false;
  2775. if (pto->fPingQueued) {
  2776. // RPC ping request by user
  2777. pingSend = true;
  2778. }
  2779. if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
  2780. // Ping automatically sent as a latency probe & keepalive.
  2781. pingSend = true;
  2782. }
  2783. if (pingSend) {
  2784. uint64_t nonce = 0;
  2785. while (nonce == 0) {
  2786. GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
  2787. }
  2788. pto->fPingQueued = false;
  2789. pto->nPingUsecStart = GetTimeMicros();
  2790. if (pto->nVersion > BIP0031_VERSION) {
  2791. pto->nPingNonceSent = nonce;
  2792. connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
  2793. } else {
  2794. // Peer is too old to support ping command with nonce, pong will never arrive.
  2795. pto->nPingNonceSent = 0;
  2796. connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
  2797. }
  2798. }
  2799. TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
  2800. if (!lockMain)
  2801. return true;
  2802. if (SendRejectsAndCheckIfBanned(pto, connman))
  2803. return true;
  2804. CNodeState &state = *State(pto->GetId());
  2805. // Address refresh broadcast
  2806. int64_t nNow = GetTimeMicros();
  2807. if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
  2808. AdvertiseLocal(pto);
  2809. pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
  2810. }
  2811. //
  2812. // Message: addr
  2813. //
  2814. if (pto->nNextAddrSend < nNow) {
  2815. pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
  2816. std::vector<CAddress> vAddr;
  2817. vAddr.reserve(pto->vAddrToSend.size());
  2818. for (const CAddress& addr : pto->vAddrToSend)
  2819. {
  2820. if (!pto->addrKnown.contains(addr.GetKey()))
  2821. {
  2822. pto->addrKnown.insert(addr.GetKey());
  2823. vAddr.push_back(addr);
  2824. // receiver rejects addr messages larger than 1000
  2825. if (vAddr.size() >= 1000)
  2826. {
  2827. connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
  2828. vAddr.clear();
  2829. }
  2830. }
  2831. }
  2832. pto->vAddrToSend.clear();
  2833. if (!vAddr.empty())
  2834. connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
  2835. // we only send the big addr message once
  2836. if (pto->vAddrToSend.capacity() > 40)
  2837. pto->vAddrToSend.shrink_to_fit();
  2838. }
  2839. // Start block sync
  2840. if (pindexBestHeader == nullptr)
  2841. pindexBestHeader = chainActive.Tip();
  2842. bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
  2843. if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
  2844. // Only actively request headers from a single peer, unless we're close to today.
  2845. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
  2846. state.fSyncStarted = true;
  2847. state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
  2848. nSyncStarted++;
  2849. const CBlockIndex *pindexStart = pindexBestHeader;
  2850. /* If possible, start at the block preceding the currently
  2851. best known header. This ensures that we always get a
  2852. non-empty list of headers back as long as the peer
  2853. is up-to-date. With a non-empty response, we can initialise
  2854. the peer's known best block. This wouldn't be possible
  2855. if we requested starting at pindexBestHeader and
  2856. got back an empty response. */
  2857. if (pindexStart->pprev)
  2858. pindexStart = pindexStart->pprev;
  2859. LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
  2860. connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
  2861. }
  2862. }
  2863. // Resend wallet transactions that haven't gotten in a block yet
  2864. // Except during reindex, importing and IBD, when old wallet
  2865. // transactions become unconfirmed and spams other nodes.
  2866. if (!fReindex && !fImporting && !IsInitialBlockDownload())
  2867. {
  2868. GetMainSignals().Broadcast(nTimeBestReceived, connman);
  2869. }
  2870. //
  2871. // Try sending block announcements via headers
  2872. //
  2873. {
  2874. // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
  2875. // list of block hashes we're relaying, and our peer wants
  2876. // headers announcements, then find the first header
  2877. // not yet known to our peer but would connect, and send.
  2878. // If no header would connect, or if we have too many
  2879. // blocks, or if the peer doesn't want headers, just
  2880. // add all to the inv queue.
  2881. LOCK(pto->cs_inventory);
  2882. std::vector<CBlock> vHeaders;
  2883. bool fRevertToInv = ((!state.fPreferHeaders &&
  2884. (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
  2885. pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
  2886. const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
  2887. ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
  2888. if (!fRevertToInv) {
  2889. bool fFoundStartingHeader = false;
  2890. // Try to find first header that our peer doesn't have, and
  2891. // then send all headers past that one. If we come across any
  2892. // headers that aren't on chainActive, give up.
  2893. for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
  2894. BlockMap::iterator mi = mapBlockIndex.find(hash);
  2895. assert(mi != mapBlockIndex.end());
  2896. const CBlockIndex *pindex = mi->second;
  2897. if (chainActive[pindex->nHeight] != pindex) {
  2898. // Bail out if we reorged away from this block
  2899. fRevertToInv = true;
  2900. break;
  2901. }
  2902. if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
  2903. // This means that the list of blocks to announce don't
  2904. // connect to each other.
  2905. // This shouldn't really be possible to hit during
  2906. // regular operation (because reorgs should take us to
  2907. // a chain that has some block not on the prior chain,
  2908. // which should be caught by the prior check), but one
  2909. // way this could happen is by using invalidateblock /
  2910. // reconsiderblock repeatedly on the tip, causing it to
  2911. // be added multiple times to vBlockHashesToAnnounce.
  2912. // Robustly deal with this rare situation by reverting
  2913. // to an inv.
  2914. fRevertToInv = true;
  2915. break;
  2916. }
  2917. pBestIndex = pindex;
  2918. if (fFoundStartingHeader) {
  2919. // add this to the headers message
  2920. vHeaders.push_back(pindex->GetBlockHeader());
  2921. } else if (PeerHasHeader(&state, pindex)) {
  2922. continue; // keep looking for the first new block
  2923. } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
  2924. // Peer doesn't have this header but they do have the prior one.
  2925. // Start sending headers.
  2926. fFoundStartingHeader = true;
  2927. vHeaders.push_back(pindex->GetBlockHeader());
  2928. } else {
  2929. // Peer doesn't have this header or the prior one -- nothing will
  2930. // connect, so bail out.
  2931. fRevertToInv = true;
  2932. break;
  2933. }
  2934. }
  2935. }
  2936. if (!fRevertToInv && !vHeaders.empty()) {
  2937. if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
  2938. // We only send up to 1 block as header-and-ids, as otherwise
  2939. // probably means we're doing an initial-ish-sync or they're slow
  2940. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
  2941. vHeaders.front().GetHash().ToString(), pto->GetId());
  2942. int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
  2943. bool fGotBlockFromCache = false;
  2944. {
  2945. LOCK(cs_most_recent_block);
  2946. if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
  2947. if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
  2948. connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
  2949. else {
  2950. CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
  2951. connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
  2952. }
  2953. fGotBlockFromCache = true;
  2954. }
  2955. }
  2956. if (!fGotBlockFromCache) {
  2957. CBlock block;
  2958. bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
  2959. assert(ret);
  2960. CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
  2961. connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
  2962. }
  2963. state.pindexBestHeaderSent = pBestIndex;
  2964. } else if (state.fPreferHeaders) {
  2965. if (vHeaders.size() > 1) {
  2966. LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
  2967. vHeaders.size(),
  2968. vHeaders.front().GetHash().ToString(),
  2969. vHeaders.back().GetHash().ToString(), pto->GetId());
  2970. } else {
  2971. LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
  2972. vHeaders.front().GetHash().ToString(), pto->GetId());
  2973. }
  2974. connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
  2975. state.pindexBestHeaderSent = pBestIndex;
  2976. } else
  2977. fRevertToInv = true;
  2978. }
  2979. if (fRevertToInv) {
  2980. // If falling back to using an inv, just try to inv the tip.
  2981. // The last entry in vBlockHashesToAnnounce was our tip at some point
  2982. // in the past.
  2983. if (!pto->vBlockHashesToAnnounce.empty()) {
  2984. const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
  2985. BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
  2986. assert(mi != mapBlockIndex.end());
  2987. const CBlockIndex *pindex = mi->second;
  2988. // Warn if we're announcing a block that is not on the main chain.
  2989. // This should be very rare and could be optimized out.
  2990. // Just log for now.
  2991. if (chainActive[pindex->nHeight] != pindex) {
  2992. LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
  2993. hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
  2994. }
  2995. // If the peer's chain has this block, don't inv it back.
  2996. if (!PeerHasHeader(&state, pindex)) {
  2997. pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
  2998. LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
  2999. pto->GetId(), hashToAnnounce.ToString());
  3000. }
  3001. }
  3002. }
  3003. pto->vBlockHashesToAnnounce.clear();
  3004. }
  3005. //
  3006. // Message: inventory
  3007. //
  3008. std::vector<CInv> vInv;
  3009. {
  3010. LOCK(pto->cs_inventory);
  3011. vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
  3012. // Add blocks
  3013. for (const uint256& hash : pto->vInventoryBlockToSend) {
  3014. vInv.push_back(CInv(MSG_BLOCK, hash));
  3015. if (vInv.size() == MAX_INV_SZ) {
  3016. connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
  3017. vInv.clear();
  3018. }
  3019. }
  3020. pto->vInventoryBlockToSend.clear();
  3021. // Check whether periodic sends should happen
  3022. bool fSendTrickle = pto->fWhitelisted;
  3023. if (pto->nNextInvSend < nNow) {
  3024. fSendTrickle = true;
  3025. // Use half the delay for outbound peers, as there is less privacy concern for them.
  3026. pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
  3027. }
  3028. // Time to send but the peer has requested we not relay transactions.
  3029. if (fSendTrickle) {
  3030. LOCK(pto->cs_filter);
  3031. if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
  3032. }
  3033. // Respond to BIP35 mempool requests
  3034. if (fSendTrickle && pto->fSendMempool) {
  3035. auto vtxinfo = mempool.infoAll();
  3036. pto->fSendMempool = false;
  3037. CAmount filterrate = 0;
  3038. {
  3039. LOCK(pto->cs_feeFilter);
  3040. filterrate = pto->minFeeFilter;
  3041. }
  3042. LOCK(pto->cs_filter);
  3043. for (const auto& txinfo : vtxinfo) {
  3044. const uint256& hash = txinfo.tx->GetHash();
  3045. CInv inv(MSG_TX, hash);
  3046. pto->setInventoryTxToSend.erase(hash);
  3047. if (filterrate) {
  3048. if (txinfo.feeRate.GetFeePerK() < filterrate)
  3049. continue;
  3050. }
  3051. if (pto->pfilter) {
  3052. if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
  3053. }
  3054. pto->filterInventoryKnown.insert(hash);
  3055. vInv.push_back(inv);
  3056. if (vInv.size() == MAX_INV_SZ) {
  3057. connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
  3058. vInv.clear();
  3059. }
  3060. }
  3061. pto->timeLastMempoolReq = GetTime();
  3062. }
  3063. // Determine transactions to relay
  3064. if (fSendTrickle) {
  3065. // Produce a vector with all candidates for sending
  3066. std::vector<std::set<uint256>::iterator> vInvTx;
  3067. vInvTx.reserve(pto->setInventoryTxToSend.size());
  3068. for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
  3069. vInvTx.push_back(it);
  3070. }
  3071. CAmount filterrate = 0;
  3072. {
  3073. LOCK(pto->cs_feeFilter);
  3074. filterrate = pto->minFeeFilter;
  3075. }
  3076. // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
  3077. // A heap is used so that not all items need sorting if only a few are being sent.
  3078. CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
  3079. std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
  3080. // No reason to drain out at many times the network's capacity,
  3081. // especially since we have many peers and some will draw much shorter delays.
  3082. unsigned int nRelayedTransactions = 0;
  3083. LOCK(pto->cs_filter);
  3084. while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
  3085. // Fetch the top element from the heap
  3086. std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
  3087. std::set<uint256>::iterator it = vInvTx.back();
  3088. vInvTx.pop_back();
  3089. uint256 hash = *it;
  3090. // Remove it from the to-be-sent set
  3091. pto->setInventoryTxToSend.erase(it);
  3092. // Check if not in the filter already
  3093. if (pto->filterInventoryKnown.contains(hash)) {
  3094. continue;
  3095. }
  3096. // Not in the mempool anymore? don't bother sending it.
  3097. auto txinfo = mempool.info(hash);
  3098. if (!txinfo.tx) {
  3099. continue;
  3100. }
  3101. if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
  3102. continue;
  3103. }
  3104. if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
  3105. // Send
  3106. vInv.push_back(CInv(MSG_TX, hash));
  3107. nRelayedTransactions++;
  3108. {
  3109. // Expire old relay messages
  3110. while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
  3111. {
  3112. mapRelay.erase(vRelayExpiration.front().second);
  3113. vRelayExpiration.pop_front();
  3114. }
  3115. auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
  3116. if (ret.second) {
  3117. vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
  3118. }
  3119. }
  3120. if (vInv.size() == MAX_INV_SZ) {
  3121. connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
  3122. vInv.clear();
  3123. }
  3124. pto->filterInventoryKnown.insert(hash);
  3125. }
  3126. }
  3127. }
  3128. if (!vInv.empty())
  3129. connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
  3130. // Detect whether we're stalling
  3131. nNow = GetTimeMicros();
  3132. if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
  3133. // Stalling only triggers when the block download window cannot move. During normal steady state,
  3134. // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
  3135. // should only happen during initial block download.
  3136. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
  3137. pto->fDisconnect = true;
  3138. return true;
  3139. }
  3140. // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
  3141. // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
  3142. // We compensate for other peers to prevent killing off peers due to our own downstream link
  3143. // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
  3144. // to unreasonably increase our timeout.
  3145. if (state.vBlocksInFlight.size() > 0) {
  3146. QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
  3147. int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
  3148. if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
  3149. LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
  3150. pto->fDisconnect = true;
  3151. return true;
  3152. }
  3153. }
  3154. // Check for headers sync timeouts
  3155. if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
  3156. // Detect whether this is a stalling initial-headers-sync peer
  3157. if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24*60*60) {
  3158. if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
  3159. // Disconnect a (non-whitelisted) peer if it is our only sync peer,
  3160. // and we have others we could be using instead.
  3161. // Note: If all our peers are inbound, then we won't
  3162. // disconnect our sync peer for stalling; we have bigger
  3163. // problems if we can't get any outbound peers.
  3164. if (!pto->fWhitelisted) {
  3165. LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
  3166. pto->fDisconnect = true;
  3167. return true;
  3168. } else {
  3169. LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
  3170. // Reset the headers sync state so that we have a
  3171. // chance to try downloading from a different peer.
  3172. // Note: this will also result in at least one more
  3173. // getheaders message to be sent to
  3174. // this peer (eventually).
  3175. state.fSyncStarted = false;
  3176. nSyncStarted--;
  3177. state.nHeadersSyncTimeout = 0;
  3178. }
  3179. }
  3180. } else {
  3181. // After we've caught up once, reset the timeout so we can't trigger
  3182. // disconnect later.
  3183. state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
  3184. }
  3185. }
  3186. // Check that outbound peers have reasonable chains
  3187. // GetTime() is used by this anti-DoS logic so we can test this using mocktime
  3188. ConsiderEviction(pto, GetTime());
  3189. //
  3190. // Message: getdata (blocks)
  3191. //
  3192. std::vector<CInv> vGetData;
  3193. if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
  3194. std::vector<const CBlockIndex*> vToDownload;
  3195. NodeId staller = -1;
  3196. FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
  3197. for (const CBlockIndex *pindex : vToDownload) {
  3198. uint32_t nFetchFlags = GetFetchFlags(pto);
  3199. vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
  3200. MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
  3201. LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
  3202. pindex->nHeight, pto->GetId());
  3203. }
  3204. if (state.nBlocksInFlight == 0 && staller != -1) {
  3205. if (State(staller)->nStallingSince == 0) {
  3206. State(staller)->nStallingSince = nNow;
  3207. LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
  3208. }
  3209. }
  3210. }
  3211. //
  3212. // Message: getdata (non-blocks)
  3213. //
  3214. while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
  3215. {
  3216. const CInv& inv = (*pto->mapAskFor.begin()).second;
  3217. if (!AlreadyHave(inv))
  3218. {
  3219. LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
  3220. vGetData.push_back(inv);
  3221. if (vGetData.size() >= 1000)
  3222. {
  3223. connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
  3224. vGetData.clear();
  3225. }
  3226. } else {
  3227. //If we're not going to ask, don't expect a response.
  3228. pto->setAskFor.erase(inv.hash);
  3229. }
  3230. pto->mapAskFor.erase(pto->mapAskFor.begin());
  3231. }
  3232. if (!vGetData.empty())
  3233. connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
  3234. //
  3235. // Message: feefilter
  3236. //
  3237. // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
  3238. if (pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
  3239. !(pto->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
  3240. CAmount currentFilter = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
  3241. int64_t timeNow = GetTimeMicros();
  3242. if (timeNow > pto->nextSendTimeFeeFilter) {
  3243. static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
  3244. static FeeFilterRounder filterRounder(default_feerate);
  3245. CAmount filterToSend = filterRounder.round(currentFilter);
  3246. // We always have a fee filter of at least minRelayTxFee
  3247. filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
  3248. if (filterToSend != pto->lastSentFeeFilter) {
  3249. connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
  3250. pto->lastSentFeeFilter = filterToSend;
  3251. }
  3252. pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
  3253. }
  3254. // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
  3255. // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
  3256. else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter &&
  3257. (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
  3258. pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
  3259. }
  3260. }
  3261. }
  3262. return true;
  3263. }
  3264. class CNetProcessingCleanup
  3265. {
  3266. public:
  3267. CNetProcessingCleanup() {}
  3268. ~CNetProcessingCleanup() {
  3269. // orphan transactions
  3270. mapOrphanTransactions.clear();
  3271. mapOrphanTransactionsByPrev.clear();
  3272. }
  3273. } instance_of_cnetprocessingcleanup;