You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. #!/usr/bin/env python2
  2. # Copyright (c) 2014 The Bitcoin Core developers
  3. # Distributed under the MIT software license, see the accompanying
  4. # file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5. #
  6. # Test pruning code
  7. # ********
  8. # WARNING:
  9. # This test uses 4GB of disk space and takes in excess of 30 mins to run
  10. # ********
  11. from test_framework import BitcoinTestFramework
  12. from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
  13. from util import *
  14. import os.path
  15. def calc_usage(blockdir):
  16. return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f))/(1024*1024)
  17. class PruneTest(BitcoinTestFramework):
  18. def __init__(self):
  19. self.utxo = []
  20. self.address = ["",""]
  21. # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
  22. # So we have big transactions and full blocks to fill up our block files
  23. # create one script_pubkey
  24. script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
  25. for i in xrange (512):
  26. script_pubkey = script_pubkey + "01"
  27. # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
  28. self.txouts = "81"
  29. for k in xrange(128):
  30. # add txout value
  31. self.txouts = self.txouts + "0000000000000000"
  32. # add length of script_pubkey
  33. self.txouts = self.txouts + "fd0402"
  34. # add script_pubkey
  35. self.txouts = self.txouts + script_pubkey
  36. def setup_chain(self):
  37. print("Initializing test directory "+self.options.tmpdir)
  38. initialize_chain_clean(self.options.tmpdir, 3)
  39. def setup_network(self):
  40. self.nodes = []
  41. self.is_network_split = False
  42. # Create nodes 0 and 1 to mine
  43. self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300))
  44. self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300))
  45. # Create node 2 to test pruning
  46. self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=300))
  47. self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
  48. self.address[0] = self.nodes[0].getnewaddress()
  49. self.address[1] = self.nodes[1].getnewaddress()
  50. connect_nodes(self.nodes[0], 1)
  51. connect_nodes(self.nodes[1], 2)
  52. connect_nodes(self.nodes[2], 0)
  53. sync_blocks(self.nodes[0:3])
  54. def create_big_chain(self):
  55. # Start by creating some coinbases we can spend later
  56. self.nodes[1].generate(200)
  57. sync_blocks(self.nodes[0:2])
  58. self.nodes[0].generate(150)
  59. # Then mine enough full blocks to create more than 550MB of data
  60. for i in xrange(645):
  61. self.mine_full_block(self.nodes[0], self.address[0])
  62. sync_blocks(self.nodes[0:3])
  63. def test_height_min(self):
  64. if not os.path.isfile(self.prunedir+"blk00000.dat"):
  65. raise AssertionError("blk00000.dat is missing, pruning too early")
  66. print "Success"
  67. print "Though we're already using more than 550MB, current usage:", calc_usage(self.prunedir)
  68. print "Mining 25 more blocks should cause the first block file to be pruned"
  69. # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
  70. for i in xrange(25):
  71. self.mine_full_block(self.nodes[0],self.address[0])
  72. waitstart = time.time()
  73. while os.path.isfile(self.prunedir+"blk00000.dat"):
  74. time.sleep(0.1)
  75. if time.time() - waitstart > 10:
  76. raise AssertionError("blk00000.dat not pruned when it should be")
  77. print "Success"
  78. usage = calc_usage(self.prunedir)
  79. print "Usage should be below target:", usage
  80. if (usage > 550):
  81. raise AssertionError("Pruning target not being met")
  82. def create_chain_with_staleblocks(self):
  83. # Create stale blocks in manageable sized chunks
  84. print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"
  85. for j in xrange(12):
  86. # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
  87. # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
  88. # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
  89. stop_node(self.nodes[0],0)
  90. self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300)
  91. # Mine 24 blocks in node 1
  92. self.utxo = self.nodes[1].listunspent()
  93. for i in xrange(24):
  94. if j == 0:
  95. self.mine_full_block(self.nodes[1],self.address[1])
  96. else:
  97. self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
  98. # Reorg back with 25 block chain from node 0
  99. self.utxo = self.nodes[0].listunspent()
  100. for i in xrange(25):
  101. self.mine_full_block(self.nodes[0],self.address[0])
  102. # Create connections in the order so both nodes can see the reorg at the same time
  103. connect_nodes(self.nodes[1], 0)
  104. connect_nodes(self.nodes[2], 0)
  105. sync_blocks(self.nodes[0:3])
  106. print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
  107. def reorg_test(self):
  108. # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
  109. # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
  110. # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
  111. # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
  112. stop_node(self.nodes[1],1)
  113. self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=300)
  114. height = self.nodes[1].getblockcount()
  115. print "Current block height:", height
  116. invalidheight = height-287
  117. badhash = self.nodes[1].getblockhash(invalidheight)
  118. print "Invalidating block at height:",invalidheight,badhash
  119. self.nodes[1].invalidateblock(badhash)
  120. # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
  121. # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
  122. mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
  123. curhash = self.nodes[1].getblockhash(invalidheight - 1)
  124. while curhash != mainchainhash:
  125. self.nodes[1].invalidateblock(curhash)
  126. curhash = self.nodes[1].getblockhash(invalidheight - 1)
  127. assert(self.nodes[1].getblockcount() == invalidheight - 1)
  128. print "New best height", self.nodes[1].getblockcount()
  129. # Reboot node1 to clear those giant tx's from mempool
  130. stop_node(self.nodes[1],1)
  131. self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=300)
  132. print "Generating new longer chain of 300 more blocks"
  133. self.nodes[1].generate(300)
  134. print "Reconnect nodes"
  135. connect_nodes(self.nodes[0], 1)
  136. connect_nodes(self.nodes[2], 1)
  137. sync_blocks(self.nodes[0:3])
  138. print "Verify height on node 2:",self.nodes[2].getblockcount()
  139. print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)
  140. print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
  141. self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
  142. sync_blocks(self.nodes[0:3])
  143. usage = calc_usage(self.prunedir)
  144. print "Usage should be below target:", usage
  145. if (usage > 550):
  146. raise AssertionError("Pruning target not being met")
  147. return invalidheight,badhash
  148. def reorg_back(self):
  149. # Verify that a block on the old main chain fork has been pruned away
  150. try:
  151. self.nodes[2].getblock(self.forkhash)
  152. raise AssertionError("Old block wasn't pruned so can't test redownload")
  153. except JSONRPCException as e:
  154. print "Will need to redownload block",self.forkheight
  155. # Verify that we have enough history to reorg back to the fork point
  156. # Although this is more than 288 blocks, because this chain was written more recently
  157. # and only its other 299 small and 220 large block are in the block files after it,
  158. # its expected to still be retained
  159. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
  160. first_reorg_height = self.nodes[2].getblockcount()
  161. curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
  162. self.nodes[2].invalidateblock(curchainhash)
  163. goalbestheight = self.mainchainheight
  164. goalbesthash = self.mainchainhash2
  165. # As of 0.10 the current block download logic is not able to reorg to the original chain created in
  166. # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
  167. # redownload its missing blocks.
  168. # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
  169. # because it has all the block data.
  170. # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
  171. # to trigger node 2's block download logic.
  172. # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
  173. if self.nodes[2].getblockcount() < self.mainchainheight:
  174. blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
  175. print "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine
  176. self.nodes[0].invalidateblock(curchainhash)
  177. assert(self.nodes[0].getblockcount() == self.mainchainheight)
  178. assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
  179. goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
  180. goalbestheight = first_reorg_height + 1
  181. print "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload"
  182. waitstart = time.time()
  183. while self.nodes[2].getblockcount() < goalbestheight:
  184. time.sleep(0.1)
  185. if time.time() - waitstart > 300:
  186. raise AssertionError("Node 2 didn't reorg to proper height")
  187. assert(self.nodes[2].getbestblockhash() == goalbesthash)
  188. # Verify we can now have the data for a block previously pruned
  189. assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
  190. def mine_full_block(self, node, address):
  191. # Want to create a full block
  192. # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
  193. for j in xrange(14):
  194. if len(self.utxo) < 14:
  195. self.utxo = node.listunspent()
  196. inputs=[]
  197. outputs = {}
  198. t = self.utxo.pop()
  199. inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
  200. remchange = t["amount"] - Decimal("0.001000")
  201. outputs[address]=remchange
  202. # Create a basic transaction that will send change back to ourself after account for a fee
  203. # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
  204. # of txouts is stored and is the only thing we overwrite from the original transaction
  205. rawtx = node.createrawtransaction(inputs, outputs)
  206. newtx = rawtx[0:92]
  207. newtx = newtx + self.txouts
  208. newtx = newtx + rawtx[94:]
  209. # Appears to be ever so slightly faster to sign with SIGHASH_NONE
  210. signresult = node.signrawtransaction(newtx,None,None,"NONE")
  211. txid = node.sendrawtransaction(signresult["hex"], True)
  212. # Mine a full sized block which will be these transactions we just created
  213. node.generate(1)
  214. def run_test(self):
  215. print "Warning! This test requires 4GB of disk space and takes over 30 mins"
  216. print "Mining a big blockchain of 995 blocks"
  217. self.create_big_chain()
  218. # Chain diagram key:
  219. # * blocks on main chain
  220. # +,&,$,@ blocks on other forks
  221. # X invalidated block
  222. # N1 Node 1
  223. #
  224. # Start by mining a simple chain that all nodes have
  225. # N0=N1=N2 **...*(995)
  226. print "Check that we haven't started pruning yet because we're below PruneAfterHeight"
  227. self.test_height_min()
  228. # Extend this chain past the PruneAfterHeight
  229. # N0=N1=N2 **...*(1020)
  230. print "Check that we'll exceed disk space target if we have a very high stale block rate"
  231. self.create_chain_with_staleblocks()
  232. # Disconnect N0
  233. # And mine a 24 block chain on N1 and a separate 25 block chain on N0
  234. # N1=N2 **...*+...+(1044)
  235. # N0 **...**...**(1045)
  236. #
  237. # reconnect nodes causing reorg on N1 and N2
  238. # N1=N2 **...*(1020) *...**(1045)
  239. # \
  240. # +...+(1044)
  241. #
  242. # repeat this process until you have 12 stale forks hanging off the
  243. # main chain on N1 and N2
  244. # N0 *************************...***************************(1320)
  245. #
  246. # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
  247. # \ \ \
  248. # +...+(1044) &.. $...$(1319)
  249. # Save some current chain state for later use
  250. self.mainchainheight = self.nodes[2].getblockcount() #1320
  251. self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
  252. print "Check that we can survive a 288 block reorg still"
  253. (self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
  254. # Now create a 288 block reorg by mining a longer chain on N1
  255. # First disconnect N1
  256. # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
  257. # N1 **...*(1020) **...**(1032)X..
  258. # \
  259. # ++...+(1031)X..
  260. #
  261. # Now mine 300 more blocks on N1
  262. # N1 **...*(1020) **...**(1032) @@...@(1332)
  263. # \ \
  264. # \ X...
  265. # \ \
  266. # ++...+(1031)X.. ..
  267. #
  268. # Reconnect nodes and mine 220 more blocks on N1
  269. # N1 **...*(1020) **...**(1032) @@...@@@(1552)
  270. # \ \
  271. # \ X...
  272. # \ \
  273. # ++...+(1031)X.. ..
  274. #
  275. # N2 **...*(1020) **...**(1032) @@...@@@(1552)
  276. # \ \
  277. # \ *...**(1320)
  278. # \ \
  279. # ++...++(1044) ..
  280. #
  281. # N0 ********************(1032) @@...@@@(1552)
  282. # \
  283. # *...**(1320)
  284. print "Test that we can rerequest a block we previously pruned if needed for a reorg"
  285. self.reorg_back()
  286. # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
  287. # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
  288. # original main chain (*), but will require redownload of some blocks
  289. # In order to have a peer we think we can download from, must also perform this invalidation
  290. # on N0 and mine a new longest chain to trigger.
  291. # Final result:
  292. # N0 ********************(1032) **...****(1553)
  293. # \
  294. # X@...@@@(1552)
  295. #
  296. # N2 **...*(1020) **...**(1032) **...****(1553)
  297. # \ \
  298. # \ X@...@@@(1552)
  299. # \
  300. # +..
  301. #
  302. # N1 doesn't change because 1033 on main chain (*) is invalid
  303. print "Done"
  304. if __name__ == '__main__':
  305. PruneTest().main()