Skip to content

Commit 57fa99f

Browse files
committed
[Tests] Introduce tiertwo_reorg_mempool test
1 parent 9a1bf27 commit 57fa99f

File tree

3 files changed

+251
-5
lines changed

3 files changed

+251
-5
lines changed

test/functional/test_framework/test_framework.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1312,6 +1312,7 @@ def register_new_dmn(self, idx, miner_idx, controller_idx, strType,
13121312
return dmn
13131313

13141314
def check_mn_list_on_node(self, idx, mns):
1315+
self.nodes[idx].syncwithvalidationinterfacequeue()
13151316
mnlist = self.nodes[idx].listmasternodes()
13161317
if len(mnlist) != len(mns):
13171318
raise Exception("Invalid mn list on node %d:\n%s\nExpected:%s" % (idx, str(mnlist), str(mns)))

test/functional/test_runner.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -141,11 +141,12 @@
141141

142142
TIERTWO_SCRIPTS = [
143143
# Longest test should go first, to favor running tests in parallel
144-
'tiertwo_deterministicmns.py',
145-
'tiertwo_governance_sync_basic.py',
146-
'tiertwo_mn_compatibility.py',
147-
'tiertwo_masternode_activation.py',
148-
'tiertwo_masternode_ping.py',
144+
'tiertwo_governance_sync_basic.py', # ~ 445 sec
145+
'tiertwo_mn_compatibility.py', # ~ 413 sec
146+
'tiertwo_deterministicmns.py', # ~ 366 sec
147+
'tiertwo_masternode_activation.py', # ~ 352 sec
148+
'tiertwo_masternode_ping.py', # ~ 293 sec
149+
'tiertwo_reorg_mempool.py', # ~ 107 sec
149150
]
150151

151152
SAPLING_SCRIPTS = [
Lines changed: 244 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,244 @@
1+
#!/usr/bin/env python3
2+
# Copyright (c) 2021 The PIVX Core developers
3+
# Distributed under the MIT software license, see the accompanying
4+
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
5+
6+
"""
7+
Test deterministic masternodes conflicts and reorgs.
8+
- Check that in-mempool reuse of mn unique-properties is invalid
9+
- Check mempool eviction after conflict with newly connected block / reorg
10+
- Check deterministic list consensus after reorg
11+
"""
12+
13+
import random
14+
import time
15+
16+
from test_framework.test_framework import PivxTestFramework
17+
18+
from test_framework.util import (
19+
assert_equal,
20+
assert_raises_rpc_error,
21+
create_new_dmn,
22+
connect_nodes,
23+
disconnect_nodes,
24+
)
25+
26+
class TiertwoReorgMempoolTest(PivxTestFramework):
27+
28+
def set_test_params(self):
29+
# two nodes mining on separate chains
30+
self.num_nodes = 2
31+
self.setup_clean_chain = True
32+
self.extra_args = [["-nuparams=v5_shield:1", "-nuparams=v6_evo:160"]] * self.num_nodes
33+
self.extra_args[0].append("-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi")
34+
35+
def setup_network(self):
36+
self.setup_nodes()
37+
self.connect_all()
38+
39+
def connect_all(self):
40+
connect_nodes(self.nodes[0], 1)
41+
connect_nodes(self.nodes[1], 0)
42+
43+
def disconnect_all(self):
44+
self.log.info("Disconnecting nodes...")
45+
disconnect_nodes(self.nodes[0], 1)
46+
disconnect_nodes(self.nodes[1], 0)
47+
self.log.info("Nodes disconnected")
48+
49+
def register_masternode(self, from_node, dmn, collateral_addr):
50+
dmn.proTx = from_node.protx_register_fund(collateral_addr, dmn.ipport, dmn.owner,
51+
dmn.operator, dmn.voting, dmn.payee)
52+
53+
def run_test(self):
54+
self.disable_mocktime()
55+
nodeA = self.nodes[0]
56+
nodeB = self.nodes[1]
57+
free_idx = 1 # unique id for masternodes. first available.
58+
59+
# Enforce mn payments and reject legacy mns at block 202
60+
self.activate_spork(0, "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT")
61+
assert_equal("success", self.set_spork(0, "SPORK_21_LEGACY_MNS_MAX_HEIGHT", 201))
62+
time.sleep(1)
63+
assert_equal([201] * self.num_nodes, [self.get_spork(x, "SPORK_21_LEGACY_MNS_MAX_HEIGHT")
64+
for x in range(self.num_nodes)])
65+
66+
# Mine 201 blocks
67+
self.log.info("Mining...")
68+
nodeA.generate(25)
69+
self.sync_blocks()
70+
nodeB.generate(25)
71+
self.sync_blocks()
72+
nodeA.generate(50)
73+
self.sync_blocks()
74+
nodeB.generate(101)
75+
self.sync_blocks()
76+
self.assert_equal_for_all(201, "getblockcount")
77+
78+
# Register one masternode before the split
79+
collateral_addr = nodeA.getnewaddress() # for both collateral and payouts
80+
pre_split_mn = create_new_dmn(100, nodeA, nodeA.getnewaddress(), None)
81+
self.register_masternode(nodeA, pre_split_mn, collateral_addr)
82+
nodeA.generate(1)
83+
self.sync_blocks()
84+
mnsA = [pre_split_mn]
85+
mnsB = [pre_split_mn]
86+
self.check_mn_list_on_node(0, mnsA)
87+
self.check_mn_list_on_node(1, mnsB)
88+
self.log.info("Pre-split masternode registered.")
89+
90+
# Disconnect the nodes
91+
self.disconnect_all() # network splits at block 203
92+
93+
#
94+
# -- CHAIN A --
95+
#
96+
97+
# Register 5 masternodes, then mine 5 blocks
98+
self.log.info("Registering masternodes on chain A...")
99+
for _ in range(5):
100+
dmn = create_new_dmn(free_idx, nodeA, collateral_addr, None)
101+
free_idx += 1
102+
self.register_masternode(nodeA, dmn, collateral_addr)
103+
mnsA.append(dmn)
104+
nodeA.generate(5)
105+
self.check_mn_list_on_node(0, mnsA)
106+
self.log.info("Masternodes registered on chain A.")
107+
108+
# Lock any utxo with less than 101 confs (e.g. change), so we can resurrect everything
109+
for x in nodeA.listunspent(0, 101):
110+
nodeA.lockunspent(False, [{"txid": x["txid"], "vout": x["vout"]}])
111+
112+
# Now send a valid proReg tx to the mempool, without mining it
113+
mempool_dmn1 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
114+
free_idx += 1
115+
self.register_masternode(nodeA, mempool_dmn1, collateral_addr)
116+
assert mempool_dmn1.proTx in nodeA.getrawmempool()
117+
118+
# Try sending a proReg tx with same owner
119+
self.log.info("Testing in-mempool duplicate-owner rejection...")
120+
dmn_A1 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
121+
free_idx += 1
122+
dmn_A1.owner = mempool_dmn1.owner
123+
assert_raises_rpc_error(-26, "protx-dup",
124+
self.register_masternode, nodeA, dmn_A1, collateral_addr)
125+
assert dmn_A1.proTx not in nodeA.getrawmempool()
126+
127+
# Try sending a proReg tx with same operator
128+
self.log.info("Testing in-mempool duplicate-operator rejection...")
129+
dmn_A2 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
130+
free_idx += 1
131+
dmn_A2.operator = mempool_dmn1.operator
132+
assert_raises_rpc_error(-26, "protx-dup",
133+
self.register_masternode, nodeA, dmn_A2, collateral_addr)
134+
assert dmn_A2.proTx not in nodeA.getrawmempool()
135+
136+
# Try sending a proReg tx with same IP
137+
self.log.info("Testing proReg in-mempool duplicate-IP rejection...")
138+
dmn_A3 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
139+
free_idx += 1
140+
dmn_A3.ipport = mempool_dmn1.ipport
141+
assert_raises_rpc_error(-26, "protx-dup",
142+
self.register_masternode, nodeA, dmn_A3, collateral_addr)
143+
assert dmn_A3.proTx not in nodeA.getrawmempool()
144+
145+
# Now send other 2 valid proReg tx to the mempool, without mining it
146+
mempool_dmn2 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
147+
free_idx += 1
148+
mempool_dmn3 = create_new_dmn(free_idx, nodeA, collateral_addr, None)
149+
free_idx += 1
150+
self.register_masternode(nodeA, mempool_dmn2, collateral_addr)
151+
self.register_masternode(nodeA, mempool_dmn3, collateral_addr)
152+
153+
# Now nodeA has 3 proReg txes in its mempool
154+
mempoolA = nodeA.getrawmempool()
155+
assert mempool_dmn1.proTx in mempoolA
156+
assert mempool_dmn2.proTx in mempoolA
157+
assert mempool_dmn3.proTx in mempoolA
158+
159+
assert_equal(nodeA.getblockcount(), 207)
160+
161+
#
162+
# -- CHAIN B --
163+
#
164+
collateral_addr = nodeB.getnewaddress()
165+
self.log.info("Registering masternodes on chain B...")
166+
167+
# Register first the 3 nodes that conflict with the mempool of nodes[0]
168+
# mine one block after each registration
169+
for dmn in [dmn_A1, dmn_A2, dmn_A3]:
170+
self.register_masternode(nodeB, dmn, collateral_addr)
171+
mnsB.append(dmn)
172+
nodeB.generate(1)
173+
self.check_mn_list_on_node(1, mnsB)
174+
175+
# Pick the proReg for the first MN registered on chain A, and replay it on chain B
176+
self.log.info("Replaying a masternode on a different chain...")
177+
mnsA.remove(pre_split_mn)
178+
replay_mn = mnsA.pop(0)
179+
mnsB.append(replay_mn) # same proTx hash
180+
nodeB.sendrawtransaction(nodeA.getrawtransaction(replay_mn.proTx, False))
181+
nodeB.generate(1)
182+
self.check_mn_list_on_node(1, mnsB)
183+
184+
# Now pick a proReg for another MN registered on chain A, and re-register it on chain B
185+
self.log.info("Re-registering a masternode on a different chain...")
186+
rereg_mn = random.choice(mnsA)
187+
mnsA.remove(rereg_mn)
188+
self.register_masternode(nodeB, rereg_mn, collateral_addr)
189+
mnsB.append(rereg_mn) # changed proTx hash
190+
nodeB.generate(1)
191+
self.check_mn_list_on_node(1, mnsB)
192+
193+
# Register 5 more masternodes. One per block.
194+
for _ in range(5):
195+
dmn = create_new_dmn(free_idx, nodeB, collateral_addr, None)
196+
free_idx += 1
197+
self.register_masternode(nodeB, dmn, collateral_addr)
198+
mnsB.append(dmn)
199+
nodeB.generate(1)
200+
201+
# Then mine 10 more blocks on chain B
202+
nodeB.generate(10)
203+
self.check_mn_list_on_node(1, mnsB)
204+
self.log.info("Masternodes registered on chain B.")
205+
206+
assert_equal(nodeB.getblockcount(), 222)
207+
208+
#
209+
# -- RECONNECT --
210+
#
211+
212+
# Reconnect and sync (give it some more time)
213+
self.log.info("Reconnecting nodes...")
214+
self.connect_all()
215+
# !TODO: FIXME - failing because we check budget/mn payment in CheckBlock
216+
# during a reorg, the previous block hasn't been connected yet, so the dmn list is empty.
217+
self.sync_blocks(wait=3, timeout=180)
218+
219+
# Both nodes have the same list (mnB)
220+
self.log.info("Checking masternode list...")
221+
self.check_mn_list_on_node(0, mnsB)
222+
self.check_mn_list_on_node(1, mnsB)
223+
self.log.info("Both nodes have %d registered masternodes." % len(mnsB))
224+
225+
# The first mempool proReg tx has been removed from nodeA's mempool due to
226+
# conflicts with the masternodes of chain B, now connected.
227+
self.log.info("Checking mempool...")
228+
mempoolA = nodeA.getrawmempool()
229+
assert mempool_dmn1.proTx not in mempoolA
230+
assert mempool_dmn2.proTx in mempoolA
231+
assert mempool_dmn3.proTx in mempoolA
232+
# The mempool contains also all the ProReg from the disconnected blocks,
233+
# except the ones re-registered and replayed on chain B.
234+
for mn in mnsA:
235+
assert mn.proTx in mempoolA
236+
assert rereg_mn.proTx not in mempoolA
237+
assert replay_mn.proTx not in mempoolA
238+
assert pre_split_mn.proTx not in mempoolA
239+
240+
self.log.info("All good.")
241+
242+
243+
if __name__ == '__main__':
244+
TiertwoReorgMempoolTest().main()

0 commit comments

Comments
 (0)