Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

introduce ForkedBlobSidecar for EIP-7688 Electra period before PeerDAS #6451

Draft
wants to merge 19 commits into
base: unstable
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion AllTests-mainnet.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ sanity check Deneb states [Preset: mainnet] OK
+ sanity check Deneb states, reusing buffers [Preset: mainnet] OK
+ sanity check Electra blocks [Preset: mainnet] OK
+ sanity check blobs [Preset: mainnet] OK
+ sanity check blobs (Deneb) [Preset: mainnet] OK
+ sanity check genesis roundtrip [Preset: mainnet] OK
+ sanity check phase 0 blocks [Preset: mainnet] OK
+ sanity check phase 0 getState rollback [Preset: mainnet] OK
Expand Down
33 changes: 33 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,36 @@
2024-08-29 v24.8.0
==================

Nimbus `v24.8.0` is a `low-urgency` release with beacon API improvements and fixes.

### Improvements

* Increase speed of processing blocks with deposits by 25%:
https://github.com/status-im/nimbus-eth2/pull/6469

* Avoid running light client sync in background when node is synced:
https://github.com/status-im/nimbus-eth2/pull/6505

* Add additional Sepolia bootnode:
https://github.com/status-im/nimbus-eth2/pull/6490

### Fixes

* Add timeouts to failed execution layer requests:
https://github.com/status-im/nimbus-eth2/pull/6441

* Use correct fork digest when broadcasting blob sidecars, sync committee, and sync contribution messages:
https://github.com/status-im/nimbus-eth2/pull/6440

* Fix Holesky genesis state being downloaded multiple times:
https://github.com/status-im/nimbus-eth2/pull/6452

* Check blob versioned hashes when optimistic syncing:
https://github.com/status-im/nimbus-eth2/pull/6501

* Increase trusted node sync state downloading timeout to 120 seconds:
https://github.com/status-im/nimbus-eth2/pull/6487

2024-07-29 v24.7.0
==================

Expand Down
42 changes: 26 additions & 16 deletions beacon_chain/beacon_chain_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ type
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock

blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
blobs: array[BlobFork, KvStoreRef] # (BlockRoot -> BlobSidecar)

stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot

Expand Down Expand Up @@ -559,9 +559,10 @@ proc new*(T: type BeaconChainDB,
sealedPeriods: "lc_sealed_periods")).expectDb()
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra

var blobs : KvStoreRef
var blobs: array[BlobFork, KvStoreRef]
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
blobs[BlobFork.Deneb] = kvStore db.openKvStore("deneb_blobs").expectDb()
static: doAssert BlobFork.high == BlobFork.Deneb

# Versions prior to 1.4.0 (altair) stored validators in `immutable_validators`
# which stores validator keys in compressed format - this is
Expand Down Expand Up @@ -765,8 +766,9 @@ proc close*(db: BeaconChainDB) =
if db.db == nil: return

# Close things roughly in reverse order
if not isNil(db.blobs):
discard db.blobs.close()
for blobFork in BlobFork:
if not isNil(db.blobs[blobFork]):
discard db.blobs[blobFork].close()
db.lcData.close()
db.finalizedBlocks.close()
discard db.summaries.close()
Expand Down Expand Up @@ -812,16 +814,20 @@ proc putBlock*(
db.blocks[type(value).kind].putSZSSZ(value.root.data, value)
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())

proc putBlobSidecar*(
db: BeaconChainDB,
value: BlobSidecar) =
proc putBlobSidecar*[T: ForkyBlobSidecar](
db: BeaconChainDB, value: T) =
let block_root = hash_tree_root(value.signed_block_header.message)
db.blobs.putSZSSZ(blobkey(block_root, value.index), value)
db.blobs[T.kind].putSZSSZ(blobkey(block_root, value.index), value)

proc delBlobSidecar*(
db: BeaconChainDB,
root: Eth2Digest, index: BlobIndex): bool =
db.blobs.del(blobkey(root, index)).expectDb()
var res = false
for blobFork in BlobFork:
if db.blobs[blobFork] == nil: continue
if db.blobs[blobFork].del(blobkey(root, index)).expectDb():
res = true
res

proc updateImmutableValidators*(
db: BeaconChainDB, validators: openArray[Validator]) =
Expand Down Expand Up @@ -1070,16 +1076,20 @@ proc getBlockSSZ*(
withConsensusFork(fork):
getBlockSSZ(db, key, data, consensusFork.TrustedSignedBeaconBlock)

proc getBlobSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
data: var seq[byte]): bool =
proc getBlobSidecarSZ*[T: ForkyBlobSidecar](
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
data: var seq[byte]): bool =
if db.blobs[T.kind] == nil: return false
let dataPtr = addr data # Short-lived
func decode(data: openArray[byte]) =
assign(dataPtr[], data)
db.blobs.get(blobkey(root, index), decode).expectDb()
db.blobs[T.kind].get(blobkey(root, index), decode).expectDb()

proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
value: var BlobSidecar): bool =
db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found
proc getBlobSidecar*[T: ForkyBlobSidecar](
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
value: var T): bool =
if db.blobs[T.kind] == nil: return false
db.blobs[T.kind].getSZSSZ(blobkey(root, index), value) == GetResult.found

proc getBlockSZ*(
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
Expand Down
60 changes: 39 additions & 21 deletions beacon_chain/consensus_object_pools/blob_quarantine.nim
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ const

type
BlobQuarantine* = object
blobs*:
OrderedTable[(Eth2Digest, BlobIndex, KzgCommitment), ref BlobSidecar]
blobs*: OrderedTable[
(Eth2Digest, BlobIndex, KzgCommitment), ForkedBlobSidecar]
onBlobSidecarCallback*: OnBlobSidecarCallback

BlobFetchRecord* = object
Expand All @@ -38,7 +38,7 @@ func shortLog*(x: seq[BlobIndex]): string =
func shortLog*(x: seq[BlobFetchRecord]): string =
"[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]"

func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
func put*(quarantine: var BlobQuarantine, blobSidecar: ForkedBlobSidecar) =
if quarantine.blobs.lenu64 >= MaxBlobs:
# FIFO if full. For example, sync manager and request manager can race to
# put blobs in at the same time, so one gets blob insert -> block resolve
Expand All @@ -53,43 +53,61 @@ func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
oldest_blob_key = k
break
quarantine.blobs.del oldest_blob_key
let block_root = hash_tree_root(blobSidecar.signed_block_header.message)
discard quarantine.blobs.hasKeyOrPut(
(block_root, blobSidecar.index, blobSidecar.kzg_commitment), blobSidecar)
withForkyBlob(blobSidecar):
let block_root = hash_tree_root(forkyBlob[].signed_block_header.message)
discard quarantine.blobs.hasKeyOrPut(
(block_root, forkyBlob[].index, forkyBlob[].kzg_commitment), blobSidecar)

func put*(quarantine: var BlobQuarantine, blobSidecar: ref ForkyBlobSidecar) =
quarantine.put(ForkedBlobSidecar.init(blobSidecar))

func hasBlob*(
quarantine: BlobQuarantine,
slot: Slot,
proposer_index: uint64,
index: BlobIndex): bool =
for blob_sidecar in quarantine.blobs.values:
template block_header: untyped = blob_sidecar.signed_block_header.message
if block_header.slot == slot and
block_header.proposer_index == proposer_index and
blob_sidecar.index == index:
return true
for blobSidecar in quarantine.blobs.values:
withForkyBlob(blobSidecar):
template block_header: untyped = forkyBlob[].signed_block_header.message
if block_header.slot == slot and
block_header.proposer_index == proposer_index and
forkyBlob[].index == index:
return true
false

func popBlobs*(
quarantine: var BlobQuarantine, digest: Eth2Digest,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock):
seq[ref BlobSidecar] =
var r: seq[ref BlobSidecar] = @[]
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock): auto =
const blobFork = blobForkAtConsensusFork(typeof(blck).kind).expect("Blobs OK")
type ResultType = blobFork.BlobSidecars
var r: ResultType = @[]
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
var b: ref BlobSidecar
var b: ForkedBlobSidecar
if quarantine.blobs.pop((digest, BlobIndex idx, kzg_commitment), b):
r.add(b)
# It was already verified that the blob is linked to `blck`.
# Therefore, we can assume that `BlobFork` is correct.
doAssert b.kind == blobFork,
"Must verify blob inclusion proof before `BlobQuarantine.put`"
r.add(b.forky(blobFork))
r

func hasBlobs*(quarantine: BlobQuarantine,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): bool =
func hasBlobs*(
quarantine: BlobQuarantine,
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock): bool =
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
if (blck.root, BlobIndex idx, kzg_commitment) notin quarantine.blobs:
return false
true

func blobFetchRecord*(quarantine: BlobQuarantine,
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): BlobFetchRecord =
func blobFetchRecord*(
quarantine: BlobQuarantine,
blck:
deneb.SignedBeaconBlock |
electra.SignedBeaconBlock): BlobFetchRecord =
var indices: seq[BlobIndex]
for i in 0..<len(blck.message.body.blob_kzg_commitments):
let idx = BlobIndex(i)
Expand Down
Loading
Loading