Skip to content

Commit b8fb8a4

Browse files
authored
Snap sync maint update (#3969)
* Disentangle `helpers.nim` import details Move string functions to the module where the data type is defined * Add `totalRatio()` function for unprocessed interval logging why Shows a factor of what is to be done, e.g. `0.0` for all done, `1.0` for nothing done. * Provide `fetchSubRange()` function for unprocessed ranges why This makes it possible for requesting different account intervals for different state roots in a synchronised way. * Extend support for simple `ItemKey` range set why A simple `ItemKeyRangeSet` range set can be used to synchronise the more complex `UnprocItemKeys` range set which has sort of a locking branch (called `borrowed`.) Synchronisation of state root range sets ItemKeyRangeSet` is used to to cover account ranges regardless of state roots. When building the MPT it is expected that parts of account sub-ties can be used partially for other state roots. * Use `ItemKey` type rather than `Hash32` in `validate()` function why The `ItemKey` is the `UInt256` representation of a `Hash32` which is used in the account range administration functions. So it needs no extra conversion when used here. * Fix RLP storage in `encodeRawAccPkg()` why A sub-type of `UInt256` must be RLP encoded as `UInt256` and not as the base type produced with `distinctBase()`. * Update error handling for account range fetcher why The fetcher returns the error type. So the caller can use this information, e.g. for deciding whether to re-try with a different state root, or abandon this peer. * Cosmetics, better names for types/functions
1 parent 204c69a commit b8fb8a4

File tree

12 files changed

+251
-113
lines changed

12 files changed

+251
-113
lines changed

execution_chain/sync/snap/worker/account.nim

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ template updateTarget(
3434

3535
# Single target block hash
3636
if ctx.pool.target.value.blockHash != BlockHash(zeroHash32):
37-
let rc = buddy.headerStateSet(ctx.pool.target.value.blockHash)
37+
let rc = buddy.headerStateRegister(ctx.pool.target.value.blockHash)
3838
if rc.isErr and rc.error: # real error
3939
trace info & ": failed fetching pivot hash", peer,
4040
hash=ctx.pool.target.value.blockHash.toStr
@@ -118,7 +118,7 @@ template accountRangeImport*(buddy: SnapPeerRef; info: static[string]) =
118118
if not ethPeer.isNil:
119119
trace info & ": processing best/latest pivotHash", peer,
120120
hash=ethPeer.only.pivotHash.short
121-
buddy.headerStateSet(BlockHash(ethPeer.only.pivotHash)).isErrOr:
121+
buddy.headerStateRegister(BlockHash(ethPeer.only.pivotHash)).isErrOr:
122122
buddy.only.pivotRoot = Opt.some(value)
123123

124124
# Check for maual target settings

execution_chain/sync/snap/worker/account/account_fetch.nim

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@ import
1616
../[helpers, state_db, worker_desc],
1717
./account_helpers
1818

19+
type
20+
FetchAccountsResult* = Result[AccountRangePacket,ErrorType]
21+
## Shortcut
22+
1923
# ------------------------------------------------------------------------------
2024
# Private helpers
2125
# ------------------------------------------------------------------------------
@@ -77,12 +81,12 @@ template fetchAccounts*(
7781
buddy: SnapPeerRef;
7882
stateRoot: StateRoot; # DB state
7983
ivReq: ItemKeyRange; # Range to be fetched
80-
): Opt[AccountRangePacket] =
84+
): FetchAccountsResult =
8185
## Async/template
8286
##
8387
## Fetch accounts from the network.
8488
##
85-
var bodyRc = Opt[AccountRangePacket].err()
89+
var bodyRc = FetchAccountsResult.err(EGeneric)
8690
block body:
8791
const
8892
sendInfo = trSnapSendSendingGetAccountRange
@@ -107,20 +111,24 @@ template fetchAccounts*(
107111
elapsed = rc.value.elapsed
108112
else:
109113
elapsed = rc.error.elapsed
114+
bodyRc = FetchAccountsResult.err(rc.error.excp)
110115
block evalError:
111116
case rc.error.excp:
112-
of EGeneric, ESyncerTermination:
117+
of EGeneric:
113118
break evalError
114-
of EPeerDisconnected, ECancelledError:
115-
buddy.nErrors.fetch.acc.inc
116-
buddy.ctrl.zombie = true
117-
of ECatchableError, EMissingEthContext:
118-
buddy.accFetchRegisterError()
119119
of EAlreadyTriedAndFailed:
120120
trace recvInfo & " error", peer, root, reqAcc, nReqAcc,
121121
ela=elapsed.toStr, state=($buddy.syncState), error=rc.errStr,
122122
nErrors=buddy.nErrors.fetch.acc
123123
break body # return err()
124+
of EPeerDisconnected, ECancelledError:
125+
buddy.nErrors.fetch.acc.inc
126+
buddy.ctrl.zombie = true
127+
of ECatchableError:
128+
buddy.accFetchRegisterError()
129+
of ENoDataAvailable, EMissingEthContext:
130+
# Not allowed here -- internal error
131+
raiseAssert "Unexpected error " & $rc.error.excp
124132

125133
# Debug message for other errors
126134
debug recvInfo & " error", peer, root, reqAcc, nReqAcc,
@@ -193,6 +201,7 @@ template fetchAccounts*(
193201
buddy.registerPeerError(stateRoot)
194202
trace recvInfo & " not available", peer, root, reqAcc, nReqAcc,
195203
ela, state, nErrors=buddy.nErrors.fetch.acc
204+
bodyRc = FetchAccountsResult.err(ENoDataAvailable)
196205
break body # return err()
197206

198207
else:
@@ -206,7 +215,7 @@ template fetchAccounts*(
206215
buddy.nErrors.fetch.acc = 0 # reset error count
207216
buddy.ctx.pool.lastSlowPeer = Opt.none(Hash) # not last one/error
208217

209-
bodyRc = Opt[AccountRangePacket].ok(rc.value.packet)
218+
bodyRc = FetchAccountsResult.ok(rc.value.packet)
210219

211220
bodyRc
212221

execution_chain/sync/snap/worker/header/header_state.nim

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ logScope:
2323
# Private function
2424
# ------------------------------------------------------------------------------
2525

26-
template headerStateSet(
26+
template headerStateRegister(
2727
buddy: SnapPeerRef;
2828
blockNumber: BlockNumber;
2929
): Result[StateRoot,bool] =
@@ -52,7 +52,7 @@ template headerStateSet(
5252
let header = buddy.headerFetch(blockNumber).valueOr:
5353
break body # error
5454

55-
ctx.pool.stateDB.update(header)
55+
ctx.pool.stateDB.register(header)
5656
bodyRc = Result[StateRoot,bool].ok(StateRoot(header.stateRoot))
5757

5858
bodyRc # return
@@ -92,7 +92,7 @@ proc readData(
9292
# Public function(s)
9393
# ------------------------------------------------------------------------------
9494

95-
template headerStateSet*(
95+
template headerStateRegister*(
9696
buddy: SnapPeerRef;
9797
blockHash: BlockHash;
9898
): Result[StateRoot,bool] =
@@ -123,7 +123,7 @@ template headerStateSet*(
123123
if header.number == 0:
124124
break body # error
125125

126-
ctx.pool.stateDB.update(header, blockHash)
126+
ctx.pool.stateDB.register(header, blockHash)
127127
bodyRc = Result[StateRoot,bool].ok(StateRoot(header.stateRoot))
128128

129129
bodyRc # return
@@ -166,7 +166,7 @@ template headerStateLoad*(
166166
try:
167167
let blkHash = BlockHash(Hash32.fromHex(data))
168168
if blkHash != BlockHash(zeroHash32):
169-
bodyRc = buddy.headerStateSet(blkHash)
169+
bodyRc = buddy.headerStateRegister(blkHash)
170170
if bodyRc.isErr() and bodyRc.error():
171171
trace info & ": state update failed", peer, fileName,
172172
blockHash=blkHash.toStr
@@ -179,7 +179,7 @@ template headerStateLoad*(
179179
try:
180180
let number = data.parseUInt.uint64
181181
if 0 < number:
182-
bodyRc = buddy.headerStateSet(number)
182+
bodyRc = buddy.headerStateRegister(number)
183183
if bodyRc.isErr() and bodyRc.error():
184184
trace info & ": state update failed", peer, fileName,
185185
blockNumber=number

execution_chain/sync/snap/worker/helpers.nim

Lines changed: 56 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -13,47 +13,78 @@
1313
## Extracted helpers from `worker_desc` (avoids circular import)
1414

1515
import
16-
std/strformat,
16+
std/[fenv, math, strformat],
1717
pkg/[chronos, stew/interval_set],
1818
../../../core/chain,
1919
../../../networking/p2p,
2020
../../../utils/[prettify, utils],
2121
../../sync_desc,
22-
./state_db/item_key,
2322
./worker_const
2423

2524
export
2625
prettify, short, `$`
2726

2827

2928
func toStr*(h: Hash32): string =
30-
if h == emptyRoot: "n/a"
31-
elif h == zeroHash32: "n/a"
29+
if h == emptyRoot: "empty"
30+
elif h == zeroHash32: "zero"
3231
else: h.short
3332

34-
35-
func toStr*(w: ItemKey): string =
36-
if w == high(ItemKey): "n/a" else: $(w.to(UInt256))
37-
38-
func toStr*(w: (ItemKey,ItemKey)): string =
39-
func xStr(w: ItemKey): string =
40-
if w == high(ItemKey): "high(ItemKey)" else: $(w.to(UInt256))
41-
if w[0] < w[1]: $(w[0].to(UInt256)) & ".." & w[1].xStr
42-
elif w[0] == w[1]: w[0].xStr
43-
else: "n/a"
44-
45-
func toStr*(w: ItemKeyRange): string =
46-
(w.minPt,w.maxPt).toStr
47-
48-
49-
func toStr*(w: float): string =
50-
&"{w:.7g}" # => 1.234567e+x
51-
52-
func toStr*(w: (float,float)): string =
53-
if w[0] < w[1]: w[0].toStr & ".." & w[1].toStr
54-
elif w[0] == w[1]: w[0].toStr
33+
# --------------
34+
35+
func per256*(w: UInt256): float =
36+
## Represents the quotiont `w / 2^256` as `float` value. Note that the
37+
## result is non-negaive and always smaller than `1f`.
38+
##
39+
when sizeof(float) != sizeof(uint):
40+
{.error: "Expected float having the same size as uint".}
41+
if w == 0:
42+
return 0f
43+
let mantissa = 256 - w.leadingZeros
44+
if mantissa <= mantissaDigits(float): # `<= 53` on a 64 bit system
45+
return w.truncate(uint).float / 2f.pow(256.float)
46+
# Calculate `total / 2^exp / 2^(256-exp)` = `total / 2^256`
47+
let exp = mantissa - mantissaDigits(float) # is positive
48+
(w shr exp).truncate(uint).float / 2f.pow((256 - exp).float)
49+
50+
func per256*(w: Opt[UInt256]): float =
51+
## Variant of `per256()` where the argument `w` covers the full scalar
52+
## range with `Opt.none()` repesenting `0` and `Opt.some(0)` representing
53+
## `2^255` (where the latter is not in the scalar range for `UInt256`,
54+
## anymore.)
55+
##
56+
if w.isNone: 0f
57+
elif w.value == 0: 1f
58+
else: w.value.per256()
59+
60+
61+
func toStr*(w: float, precision: static[int] = 7): string =
62+
if w == 0f:
63+
"0.0"
64+
elif w == 1f:
65+
"1.0"
66+
else:
67+
when precision == 2:
68+
&"{w:.2e}"
69+
elif precision == 3:
70+
&"{w:.3e}"
71+
elif precision == 4:
72+
&"{w:.4e}"
73+
elif precision == 7:
74+
&"{w:.7e}"
75+
elif precision == 11:
76+
&"{w:.11e}"
77+
elif precision == 15:
78+
&"{w:.15e}"
79+
else:
80+
{.error: "Unsupported precision".}
81+
82+
func toStr*(w: (float,float), precision: static[int] = 4): string =
83+
if w[0] < w[1]: w[0].toStr(precision) & ".." & w[1].toStr(precision)
84+
elif w[0] == w[1]: w[0].toStr(precision)
5585
else: "n/a"
5686

87+
# --------------
5788

5889
func toStr*(a: chronos.Duration): string =
5990
if twoHundredYears <= a:
@@ -64,9 +95,6 @@ func toStr*(a: chronos.Duration): string =
6495

6596
# -----------
6697

67-
func `$`*(w: ItemKey|ItemKeyRange): string =
68-
w.toStr
69-
7098
func `$`*(w: (SyncState,bool)): string =
7199
$w[0] & (if w[1]: "+" & "poolMode" else: "")
72100

execution_chain/sync/snap/worker/mpt/mpt_assembly.nim

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,11 @@
1818
##
1919
## This module will always pull in the `RocksDB` library. There is no
2020
## in-memory part (which avoids the `RocksDB` library) as provided by the
21-
## `CorrDb` via different `memory` and `persistent` sub-modules.
21+
## `CoreDb` via different `memory` and `persistent` sub-modules.
2222
##
23-
## For the moment, no column families will be used.
23+
## No column families are used, here.
2424
##
25-
## Storage formats by column type:
25+
## Key/value storage formats by column type:
2626
##
2727
## * RawAccPkg:
2828
## + key65: <col, root, start>
@@ -101,13 +101,13 @@ type
101101
func decodeRawAccPkg(data: seq[byte]): Result[DecodedRawAccPkg,string] =
102102
when sizeof(Hash) != sizeof(uint):
103103
{.error: "Hash type must have size of uint".}
104-
const info = "decodeRawAccPkg: "
104+
const info = "decodeRawAccPkg"
105105
var
106106
rd = data.rlpFromBytes
107107
res: DecodedRawAccPkg
108108
try:
109109
rd.tryEnterList()
110-
res.limit = rd.read(UInt256).ItemKey
110+
res.limit = ItemKey(rd.read(UInt256))
111111
res.packet = rd.read(AccountRangePacket)
112112
res.peerID = Hash(cast[int](rd.read uint))
113113
except RlpError as e:
@@ -122,7 +122,7 @@ template encodeRawAccPkg(
122122
when sizeof(Hash) != sizeof(uint):
123123
{.error: "Hash type must have size of uint".}
124124
var wrt = initRlpList 3
125-
wrt.append limit.distinctBase
125+
wrt.append limit.to(UInt256)
126126
wrt.append packet
127127
wrt.append cast[uint](peerID)
128128
wrt.finish()
@@ -310,7 +310,7 @@ proc init*(T: type MptAsmRef, baseDir: string, info: static[string]): Opt[T] =
310310
error info & ": Cannot backup old assembly folder", asmDir, bakDir, excpt
311311
return err()
312312

313-
when true: # FIXME: debugging
313+
when extraTraceMessages: # FIXME: debugging -- will go away
314314
let adb = bakDir.distinctBase.openRocksDb().valueOr:
315315
error info & ": Can't create assembly DB", bakDir, `error`=error
316316
return err()
@@ -426,7 +426,7 @@ proc putRawAccPkg*(
426426
packet: AccountRangePacket;
427427
peerID: Hash;
428428
): Result[void,string] =
429-
db.put65(root, start, limit.encodeRawAccPkg(packet, peerID), RawAccPkg)
429+
db.put65(root, start, encodeRawAccPkg(limit, packet, peerID), RawAccPkg)
430430

431431
proc delRawAccPkg*(
432432
db: MptAsmRef;

execution_chain/sync/snap/worker/mpt/mpt_build.nim

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,6 @@ proc append(w: var RlpWriter, val: AccBody) =
3030
w.append val.storageRoot.Hash32
3131
w.append val.codeHash.Hash32
3232

33-
# ------------------------------------------------------------------------------
34-
# Private helpers
35-
# ------------------------------------------------------------------------------
36-
37-
func high(T: type Hash32): T = high(UInt256).to(Bytes32).T
38-
39-
func `<`(a, b: Hash32): bool = a.distinctBase < b.distinctBase
40-
func `<=`(a, b: Hash32): bool = not (b < a)
41-
4233
# ------------------------------------------------------------------------------
4334
# Private functions: constructor helpers
4435
# ------------------------------------------------------------------------------
@@ -141,7 +132,7 @@ proc nodeStash*(
141132
proc updateProofTree(
142133
node: NodeRef; # Current node, start node
143134
path: NibblesBuf; # Current path, recursively updated
144-
last: var Hash32; # Path of last leaf, visited
135+
last: var ItemKey; # Path of last leaf, visited
145136
) =
146137
## Recursively label path prefixes, resolve extensions, and return the
147138
## right boundary leaf path (if any).
@@ -167,7 +158,7 @@ proc updateProofTree(
167158
down.updateProofTree(path & NibblesBuf.nibble(byte n), last)
168159

169160
of Leaf:
170-
last = getBytes(path & LeafNodeRef(node).lfPfx).to(Hash32)
161+
last = getBytes(path & LeafNodeRef(node).lfPfx).to(ItemKey)
171162

172163
of Stop:
173164
StopNodeRef(node).path = path
@@ -413,7 +404,7 @@ proc init*(
413404
proc init*(
414405
T: type NodeTrieRef;
415406
stateRoot: StateRoot;
416-
start: Hash32;
407+
start: ItemKey;
417408
nodes: openArray[ProofNode];
418409
): T =
419410
## Create a partial MPT from a list of rlp encoded nodes. Some conditions
@@ -454,12 +445,12 @@ proc init*(
454445
tmpLinks.del stopKey
455446

456447
# Label path prefixes and join Extensions
457-
var limit = high(Hash32)
448+
var limit = high(ItemKey)
458449
db.root.updateProofTree(NibblesBuf(), limit)
459450

460451
# Select sub-roots, links within min/max bounds
461452
for (key,stopNode) in tmpLinks.pairs:
462-
let path = stopNode.path.getBytes.to(Hash32)
453+
let path = stopNode.path.getBytes.to(ItemKey)
463454
if start <= path and path < limit:
464455
db.stops[key] = stopNode
465456
else:
@@ -538,7 +529,7 @@ proc isComplete*(db: NodeTrieRef): bool =
538529

539530
proc validate*(
540531
root: StateRoot;
541-
start: Hash32;
532+
start: ItemKey;
542533
pck: AccountRangePacket;
543534
): Opt[NodeTrieRef] =
544535
## Validate snap account data package.

0 commit comments

Comments
 (0)