2323
2424
2525def calc_usage (blockdir ):
26- return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
26+ return sum (os .path .getsize (blockdir + f ) for f in os .listdir (blockdir ) if os .path .isfile (os .path .join (blockdir , f ))) / (1024. * 1024. )
2727
2828class PruneTest (BitcoinTestFramework ):
2929 def set_test_params (self ):
@@ -55,7 +55,7 @@ def setup_network(self):
5555
5656 connect_nodes (self .nodes [0 ], 1 )
5757 connect_nodes (self .nodes [1 ], 2 )
58- connect_nodes (self .nodes [2 ], 0 )
58+ connect_nodes (self .nodes [0 ], 2 )
5959 connect_nodes (self .nodes [0 ], 3 )
6060 connect_nodes (self .nodes [0 ], 4 )
6161 sync_blocks (self .nodes [0 :5 ])
@@ -71,15 +71,15 @@ def create_big_chain(self):
7171 self .nodes [1 ].generate (200 )
7272 sync_blocks (self .nodes [0 :2 ])
7373 self .nodes [0 ].generate (150 )
74+
7475 # Then mine enough full blocks to create more than 550MiB of data
7576 for i in range (645 ):
7677 mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
7778
7879 sync_blocks (self .nodes [0 :5 ])
7980
8081 def test_height_min (self ):
81- if not os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )):
82- raise AssertionError ("blk00000.dat is missing, pruning too early" )
82+ assert os .path .isfile (os .path .join (self .prunedir , "blk00000.dat" )), "blk00000.dat is missing, pruning too early"
8383 self .log .info ("Success" )
8484 self .log .info ("Though we're already using more than 550MiB, current usage: %d" % calc_usage (self .prunedir ))
8585 self .log .info ("Mining 25 more blocks should cause the first block file to be pruned" )
@@ -93,8 +93,7 @@ def test_height_min(self):
9393 self .log .info ("Success" )
9494 usage = calc_usage (self .prunedir )
9595 self .log .info ("Usage should be below target: %d" % usage )
96- if (usage > 550 ):
97- raise AssertionError ("Pruning target not being met" )
96+ assert_greater_than (550 , usage )
9897
9998 def create_chain_with_staleblocks (self ):
10099 # Create stale blocks in manageable sized chunks
@@ -121,8 +120,8 @@ def create_chain_with_staleblocks(self):
121120 mine_large_block (self .nodes [0 ], self .utxo_cache_0 )
122121
123122 # Create connections in the order so both nodes can see the reorg at the same time
124- connect_nodes (self .nodes [1 ], 0 )
125- connect_nodes (self .nodes [2 ], 0 )
123+ connect_nodes (self .nodes [0 ], 1 )
124+ connect_nodes (self .nodes [0 ], 2 )
126125 sync_blocks (self .nodes [0 :3 ])
127126
128127 self .log .info ("Usage can be over target because of high stale rate: %d" % calc_usage (self .prunedir ))
@@ -138,20 +137,20 @@ def reorg_test(self):
138137 height = self .nodes [1 ].getblockcount ()
139138 self .log .info ("Current block height: %d" % height )
140139
141- invalidheight = height - 287
142- badhash = self .nodes [1 ].getblockhash (invalidheight )
143- self .log .info ("Invalidating block %s at height %d" % (badhash , invalidheight ))
144- self .nodes [1 ].invalidateblock (badhash )
140+ self . forkheight = height - 287
141+ self . forkhash = self .nodes [1 ].getblockhash (self . forkheight )
142+ self .log .info ("Invalidating block %s at height %d" % (self . forkhash , self . forkheight ))
143+ self .nodes [1 ].invalidateblock (self . forkhash )
145144
146145 # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
147146 # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
148- mainchainhash = self .nodes [0 ].getblockhash (invalidheight - 1 )
149- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
147+ mainchainhash = self .nodes [0 ].getblockhash (self . forkheight - 1 )
148+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
150149 while curhash != mainchainhash :
151150 self .nodes [1 ].invalidateblock (curhash )
152- curhash = self .nodes [1 ].getblockhash (invalidheight - 1 )
151+ curhash = self .nodes [1 ].getblockhash (self . forkheight - 1 )
153152
154- assert self .nodes [1 ].getblockcount () == invalidheight - 1
153+ assert self .nodes [1 ].getblockcount () == self . forkheight - 1
155154 self .log .info ("New best height: %d" % self .nodes [1 ].getblockcount ())
156155
157156 # Reboot node1 to clear those giant tx's from mempool
@@ -163,13 +162,12 @@ def reorg_test(self):
163162
164163 self .log .info ("Reconnect nodes" )
165164 connect_nodes (self .nodes [0 ], 1 )
166- connect_nodes (self .nodes [2 ], 1 )
165+ connect_nodes (self .nodes [1 ], 2 )
167166 sync_blocks (self .nodes [0 :3 ], timeout = 120 )
168167
169168 self .log .info ("Verify height on node 2: %d" % self .nodes [2 ].getblockcount ())
170- self .log .info ("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage (self .prunedir ))
171-
172- self .log .info ("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)" )
169+ self .log .info ("Usage possibly still high because of stale blocks in block files: %d" % calc_usage (self .prunedir ))
170+ self .log .info ("Mine 220 more large blocks so we have requisite history" )
173171
174172 # Get node0's wallet transactions back in its mempool, to avoid the
175173 # mined blocks from being too small.
@@ -183,10 +181,7 @@ def reorg_test(self):
183181
184182 usage = calc_usage (self .prunedir )
185183 self .log .info ("Usage should be below target: %d" % usage )
186- if (usage > 550 ):
187- raise AssertionError ("Pruning target not being met" )
188-
189- return invalidheight ,badhash
184+ assert_greater_than (550 , usage )
190185
191186 def reorg_back (self ):
192187 # Verify that a block on the old main chain fork has been pruned away
@@ -219,17 +214,17 @@ def reorg_back(self):
219214 blocks_to_mine = first_reorg_height + 1 - self .mainchainheight
220215 self .log .info ("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine )
221216 self .nodes [0 ].invalidateblock (curchainhash )
222- assert self .nodes [0 ].getblockcount () == self .mainchainheight
223- assert self .nodes [0 ].getbestblockhash () == self .mainchainhash2
217+ assert_equal ( self .nodes [0 ].getblockcount (), self .mainchainheight )
218+ assert_equal ( self .nodes [0 ].getbestblockhash (), self .mainchainhash2 )
224219 goalbesthash = self .nodes [0 ].generate (blocks_to_mine )[- 1 ]
225220 goalbestheight = first_reorg_height + 1
226221
227222 self .log .info ("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload" )
228223 # Wait for Node 2 to reorg to proper height
229224 wait_until (lambda : self .nodes [2 ].getblockcount () >= goalbestheight , timeout = 900 )
230- assert self .nodes [2 ].getbestblockhash () == goalbesthash
225+ assert_equal ( self .nodes [2 ].getbestblockhash (), goalbesthash )
231226 # Verify we can now have the data for a block previously pruned
232- assert self .nodes [2 ].getblock (self .forkhash )["height" ] == self .forkheight
227+ assert_equal ( self .nodes [2 ].getblock (self .forkhash )["height" ], self .forkheight )
233228
234229 def manual_test (self , node_number , use_timestamp ):
235230 # at this point, node has 995 blocks and has not yet run in prune mode
@@ -287,38 +282,30 @@ def has_block(index):
287282
288283 # height=100 too low to prune first block file so this is a no-op
289284 prune (100 )
290- if not has_block (0 ):
291- raise AssertionError ("blk00000.dat is missing when should still be there" )
285+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
292286
293287 # Does nothing
294288 node .pruneblockchain (height (0 ))
295- if not has_block (0 ):
296- raise AssertionError ("blk00000.dat is missing when should still be there" )
289+ assert has_block (0 ), "blk00000.dat is missing when should still be there"
297290
298291 # height=500 should prune first file
299292 prune (500 )
300- if has_block (0 ):
301- raise AssertionError ("blk00000.dat is still there, should be pruned by now" )
302- if not has_block (1 ):
303- raise AssertionError ("blk00001.dat is missing when should still be there" )
293+ assert not has_block (0 ), "blk00000.dat is still there, should be pruned by now"
294+ assert has_block (1 ), "blk00001.dat is missing when should still be there"
304295
305296 # height=650 should prune second file
306297 prune (650 )
307- if has_block (1 ):
308- raise AssertionError ("blk00001.dat is still there, should be pruned by now" )
298+ assert not has_block (1 ), "blk00001.dat is still there, should be pruned by now"
309299
310300 # height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
311301 prune (1000 , 1001 - MIN_BLOCKS_TO_KEEP )
312- if not has_block (2 ):
313- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
302+ assert has_block (2 ), "blk00002.dat is still there, should be pruned by now"
314303
315304 # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
316305 node .generate (288 )
317306 prune (1000 )
318- if has_block (2 ):
319- raise AssertionError ("blk00002.dat is still there, should be pruned by now" )
320- if has_block (3 ):
321- raise AssertionError ("blk00003.dat is still there, should be pruned by now" )
307+ assert not has_block (2 ), "blk00002.dat is still there, should be pruned by now"
308+ assert not has_block (3 ), "blk00003.dat is still there, should be pruned by now"
322309
323310 # stop node, start back up with auto-prune at 550 MiB, make sure still runs
324311 self .stop_node (node_number )
@@ -339,7 +326,7 @@ def wallet_test(self):
339326 connect_nodes (self .nodes [0 ], 5 )
340327 nds = [self .nodes [0 ], self .nodes [5 ]]
341328 sync_blocks (nds , wait = 5 , timeout = 300 )
342- self .stop_node (5 ) # stop and start to trigger rescan
329+ self .stop_node (5 ) # stop and start to trigger rescan
343330 self .start_node (5 , extra_args = ["-prune=550" ])
344331 self .log .info ("Success" )
345332
@@ -394,11 +381,11 @@ def run_test(self):
394381 # +...+(1044) &.. $...$(1319)
395382
396383 # Save some current chain state for later use
397- self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
384+ self .mainchainheight = self .nodes [2 ].getblockcount () # 1320
398385 self .mainchainhash2 = self .nodes [2 ].getblockhash (self .mainchainheight )
399386
400387 self .log .info ("Check that we can survive a 288 block reorg still" )
401- ( self .forkheight , self . forkhash ) = self . reorg_test () # (1033, )
388+ self .reorg_test () # (1033, )
402389 # Now create a 288 block reorg by mining a longer chain on N1
403390 # First disconnect N1
404391 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
0 commit comments