diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index e6b75ea7b17..974b3a60718 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -119,6 +119,20 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks run: make test-op-pool + network-minimal-tests: + name: network-minimal-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v3 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Run network tests for all known forks using the minimal spec + run: make test-network-minimal slasher-tests: name: slasher-tests runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index b8486344a20..98a2bfe8cb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2158,6 +2158,15 @@ dependencies = [ "types", ] +[[package]] +name = "erased-serde" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569" +dependencies = [ + "serde", +] + [[package]] name = "errno" version = "0.3.1" @@ -7521,6 +7530,9 @@ name = "slog" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" +dependencies = [ + "erased-serde", +] [[package]] name = "slog-async" diff --git a/Makefile b/Makefile index 4d1292d233b..7fb026f6317 100644 --- a/Makefile +++ b/Makefile @@ -106,7 +106,7 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. @@ -143,6 +143,13 @@ test-op-pool-%: --features 'beacon_chain/fork_from_env'\ -p operation_pool +test-network-minimal: $(patsubst %,test-network-minimal-%,$(FORKS)) + +test-network-minimal-%: + env FORK_NAME=$* cargo test --release \ + --features 'fork_from_env,spec-minimal'\ + -p network + # Run the tests in the `slasher` crate for all supported database backends. test-slasher: cargo test --release -p slasher --features lmdb diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 992ca830d7c..d410e860db0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -117,7 +117,7 @@ use tokio_stream::Stream; use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::beacon_state::CloneConfig; -use types::blob_sidecar::{BlobIdentifier, BlobSidecarList, Blobs}; +use types::blob_sidecar::{BlobSidecarList, Blobs}; use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::*; @@ -185,12 +185,10 @@ pub enum WhenSlotSkipped { #[derive(Debug, PartialEq)] pub enum AvailabilityProcessingStatus { - PendingBlobs(Vec), - PendingBlock(Hash256), + MissingComponents(Slot, Hash256), Imported(Hash256), } -//TODO(sean) using this in tests for now impl TryInto for AvailabilityProcessingStatus { type Error = (); @@ -468,7 +466,7 @@ pub struct BeaconChain { /// The slot at which blocks are downloaded back to. pub genesis_backfill_slot: Slot, pub proposal_blob_cache: BlobCache, - pub data_availability_checker: DataAvailabilityChecker, + pub data_availability_checker: Arc>, pub kzg: Option>, } @@ -1985,8 +1983,7 @@ impl BeaconChain { self: &Arc, blob_sidecar: SignedBlobSidecar, subnet_id: u64, - ) -> Result, BlobError> // TODO(pawan): make a GossipVerifedBlob type - { + ) -> Result, BlobError> { blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self) } @@ -2674,7 +2671,24 @@ impl BeaconChain { ) .await { - Ok(_) => imported_blocks += 1, + Ok(status) => { + match status { + AvailabilityProcessingStatus::Imported(_) => { + // The block was imported successfully. + imported_blocks += 1; + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + warn!(self.log, "Blobs missing in response to range request"; + "block_root" => ?block_root, "slot" => slot); + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::AvailabilityCheck( + AvailabilityCheckError::MissingBlobs, + ), + }; + } + } + } Err(error) => { return ChainSegmentResult::Failed { imported_blocks, @@ -2748,6 +2762,7 @@ impl BeaconChain { count_unrealized: CountUnrealized, ) -> Result> { self.check_availability_and_maybe_import( + blob.slot(), |chain| chain.data_availability_checker.put_gossip_blob(blob), count_unrealized, ) @@ -2804,6 +2819,7 @@ impl BeaconChain { } ExecutedBlock::AvailabilityPending(block) => { self.check_availability_and_maybe_import( + block.block.slot(), |chain| { chain .data_availability_checker @@ -2907,6 +2923,7 @@ impl BeaconChain { /// (i.e., this function is not atomic). pub async fn check_availability_and_maybe_import( self: &Arc, + slot: Slot, cache_fn: impl FnOnce(Arc) -> Result, AvailabilityCheckError>, count_unrealized: CountUnrealized, ) -> Result> { @@ -2915,12 +2932,9 @@ impl BeaconChain { Availability::Available(block) => { self.import_available_block(block, count_unrealized).await } - Availability::PendingBlock(block_root) => { - Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) - } - Availability::PendingBlobs(blob_ids) => { - Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids)) - } + Availability::MissingComponents(block_root) => Ok( + AvailabilityProcessingStatus::MissingComponents(slot, block_root), + ), } } diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6f4868f14a2..d9abd3343df 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -16,15 +16,17 @@ use eth2::types::BlockContentsTuple; use kzg::Kzg; use slog::{debug, warn}; use ssz_derive::{Decode, Encode}; +use ssz_types::FixedVector; use std::borrow::Cow; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, BlobSidecarList, ChainSpec, - CloneConfig, Epoch, EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlobSidecar, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, ChainSpec, CloneConfig, Epoch, + EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedBlobSidecar, Slot, }; #[derive(Debug)] -pub enum BlobError { +pub enum BlobError { /// The blob sidecar is from a slot that is later than the current slot (with respect to the /// gossip clock disparity). /// @@ -96,10 +98,7 @@ pub enum BlobError { /// ## Peer scoring /// /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. - BlobParentUnknown { - blob_root: Hash256, - blob_parent_root: Hash256, - }, + BlobParentUnknown(Arc>), /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple /// over gossip or no gossip sources. @@ -114,13 +113,13 @@ pub enum BlobError { }, } -impl From for BlobError { +impl From for BlobError { fn from(e: BeaconChainError) -> Self { BlobError::BeaconChainError(e) } } -impl From for BlobError { +impl From for BlobError { fn from(e: BeaconStateError) -> Self { BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) } @@ -128,27 +127,36 @@ impl From for BlobError { /// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct GossipVerifiedBlob { blob: Arc>, } impl GossipVerifiedBlob { + pub fn id(&self) -> BlobIdentifier { + self.blob.id() + } pub fn block_root(&self) -> Hash256 { self.blob.block_root } + pub fn to_blob(self) -> Arc> { + self.blob + } + pub fn slot(&self) -> Slot { + self.blob.slot + } } pub fn validate_blob_sidecar_for_gossip( signed_blob_sidecar: SignedBlobSidecar, subnet: u64, chain: &BeaconChain, -) -> Result, BlobError> { +) -> Result, BlobError> { let blob_slot = signed_blob_sidecar.message.slot; let blob_index = signed_blob_sidecar.message.index; - let block_root = signed_blob_sidecar.message.block_root; let block_parent_root = signed_blob_sidecar.message.block_parent_root; let blob_proposer_index = signed_blob_sidecar.message.proposer_index; + let block_root = signed_blob_sidecar.message.block_root; // Verify that the blob_sidecar was received on the correct subnet. if blob_index != subnet { @@ -211,10 +219,7 @@ pub fn validate_blob_sidecar_for_gossip( }); } } else { - return Err(BlobError::BlobParentUnknown { - blob_root: block_root, - blob_parent_root: block_parent_root, - }); + return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message)); } // Note: We check that the proposer_index matches against the shuffling first to avoid @@ -366,7 +371,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( state_root_opt: Option, blob_slot: Slot, spec: &ChainSpec, -) -> Result>, BlobError> { +) -> Result>, BlobError> { let block_epoch = blob_slot.epoch(E::slots_per_epoch()); if state.current_epoch() == block_epoch { @@ -443,19 +448,14 @@ impl KzgVerifiedBlob { /// /// Returns an error if the kzg verification check fails. pub fn verify_kzg_for_blob( - blob: GossipVerifiedBlob, + blob: Arc>, kzg: &Kzg, ) -> Result, AvailabilityCheckError> { //TODO(sean) remove clone - if validate_blob::( - kzg, - blob.blob.blob.clone(), - blob.blob.kzg_commitment, - blob.blob.kzg_proof, - ) - .map_err(AvailabilityCheckError::Kzg)? + if validate_blob::(kzg, blob.blob.clone(), blob.kzg_commitment, blob.kzg_proof) + .map_err(AvailabilityCheckError::Kzg)? { - Ok(KzgVerifiedBlob { blob: blob.blob }) + Ok(KzgVerifiedBlob { blob }) } else { Err(AvailabilityCheckError::KzgVerificationFailed) } @@ -467,7 +467,7 @@ pub fn verify_kzg_for_blob( /// Note: This function should be preferred over calling `verify_kzg_for_blob` /// in a loop since this function kzg verifies a list of blobs more efficiently. pub fn verify_kzg_for_blob_list( - blob_list: BlobSidecarList, + blob_list: Vec>>, kzg: &Kzg, ) -> Result, AvailabilityCheckError> { let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list @@ -608,7 +608,16 @@ impl AsBlock for &MaybeAvailableBlock { #[derivative(Hash(bound = "E: EthSpec"))] pub enum BlockWrapper { Block(Arc>), - BlockAndBlobs(Arc>, Vec>>), + BlockAndBlobs(Arc>, FixedBlobSidecarList), +} + +impl BlockWrapper { + pub fn deconstruct(self) -> (Arc>, Option>) { + match self { + BlockWrapper::Block(block) => (block, None), + BlockWrapper::BlockAndBlobs(block, blobs) => (block, Some(blobs)), + } + } } impl AsBlock for BlockWrapper { @@ -675,13 +684,15 @@ impl From> for BlockWrapper { impl From>> for BlockWrapper { fn from(value: BlockContentsTuple>) -> Self { match value.1 { - Some(variable_list) => Self::BlockAndBlobs( - Arc::new(value.0), - Vec::from(variable_list) - .into_iter() - .map(|signed_blob| signed_blob.message) - .collect::>(), - ), + Some(variable_list) => { + let mut blobs = Vec::with_capacity(E::max_blobs_per_block()); + for blob in variable_list { + if blob.message.index < E::max_blobs_per_block() as u64 { + blobs.insert(blob.message.index as usize, Some(blob.message)); + } + } + Self::BlockAndBlobs(Arc::new(value.0), FixedVector::from(blobs)) + } None => Self::Block(Arc::new(value.0)), } } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index cae52f29de1..a69c6add64d 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -70,7 +70,7 @@ use crate::{ use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; +pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; @@ -150,10 +150,7 @@ pub enum BlockError { /// its parent. ParentUnknown(BlockWrapper), /// The block skips too many slots and is a DoS risk. - TooManySkippedSlots { - parent_slot: Slot, - block_slot: Slot, - }, + TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -168,10 +165,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The peer has incompatible state transition logic and is faulty. - StateRootMismatch { - block: Hash256, - local: Hash256, - }, + StateRootMismatch { block: Hash256, local: Hash256 }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. @@ -190,9 +184,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// imported. - NotFinalizedDescendant { - block_parent_root: Hash256, - }, + NotFinalizedDescendant { block_parent_root: Hash256 }, /// Block is already known, no need to re-import. /// /// ## Peer scoring @@ -205,10 +197,7 @@ pub enum BlockError { /// /// The `proposer` has already proposed a block at this slot. The existing block may or may not /// be equal to the given block. - RepeatProposal { - proposer: u64, - slot: Slot, - }, + RepeatProposal { proposer: u64, slot: Slot }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -223,10 +212,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - IncorrectBlockProposer { - block: u64, - local_shuffling: u64, - }, + IncorrectBlockProposer { block: u64, local_shuffling: u64 }, /// The proposal signature in invalid. /// /// ## Peer scoring @@ -250,10 +236,7 @@ pub enum BlockError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - BlockIsNotLaterThanParent { - block_slot: Slot, - parent_slot: Slot, - }, + BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot }, /// At least one block in the chain segment did not have it's parent root set to the root of /// the prior block. /// @@ -309,15 +292,15 @@ pub enum BlockError { /// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. - ParentExecutionPayloadInvalid { - parent_root: Hash256, - }, - BlobValidation(BlobError), + ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// A blob alone failed validation. + BlobValidation(BlobError), + /// The block and blob together failed validation. AvailabilityCheck(AvailabilityCheckError), } -impl From for BlockError { - fn from(e: BlobError) -> Self { +impl From> for BlockError { + fn from(e: BlobError) -> Self { Self::BlobValidation(e) } } @@ -785,21 +768,17 @@ impl AvailabilityPendingExecutedBlock { } pub fn get_all_blob_ids(&self) -> Vec { - self.get_filtered_blob_ids(|_| true) + let block_root = self.import_data.block_root; + self.block + .get_filtered_blob_ids(Some(block_root), |_, _| true) } - pub fn get_filtered_blob_ids(&self, filter: impl Fn(u64) -> bool) -> Vec { - let num_blobs_expected = self.num_blobs_expected(); - let mut blob_ids = Vec::with_capacity(num_blobs_expected); - for i in 0..num_blobs_expected as u64 { - if filter(i) { - blob_ids.push(BlobIdentifier { - block_root: self.import_data.block_root, - index: i, - }); - } - } - blob_ids + pub fn get_filtered_blob_ids( + &self, + filter: impl Fn(usize, Hash256) -> bool, + ) -> Vec { + self.block + .get_filtered_blob_ids(Some(self.import_data.block_root), filter) } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 9fd3514fd6e..a35e7d615d8 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -419,23 +419,14 @@ where let weak_subj_block_root = weak_subj_block.canonical_root(); let weak_subj_state_root = weak_subj_block.state_root(); - // Check that the given block lies on an epoch boundary. Due to the database only storing + // Check that the given state lies on an epoch boundary. Due to the database only storing // full states on epoch boundaries and at restore points it would be difficult to support // starting from a mid-epoch state. if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { return Err(format!( - "Checkpoint block at slot {} is not aligned to epoch start. \ - Please supply an aligned checkpoint with block.slot % 32 == 0", - weak_subj_block.slot(), - )); - } - - // Check that the block and state have consistent slots and state roots. - if weak_subj_state.slot() != weak_subj_block.slot() { - return Err(format!( - "Slot of snapshot block ({}) does not match snapshot state ({})", - weak_subj_block.slot(), - weak_subj_state.slot(), + "Checkpoint state at slot {} is not aligned to epoch start. \ + Please supply an aligned checkpoint with state.slot % 32 == 0", + weak_subj_slot, )); } @@ -444,16 +435,21 @@ where weak_subj_state .build_all_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; - - let computed_state_root = weak_subj_state + weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; - if weak_subj_state_root != computed_state_root { - return Err(format!( - "Snapshot state root does not match block, expected: {:?}, got: {:?}", - weak_subj_state_root, computed_state_root - )); + let latest_block_slot = weak_subj_state.latest_block_header().slot; + + // We can only validate the block root if it exists in the state. We can't calculated it + // from the `latest_block_header` because the state root might be set to the zero hash. + if let Ok(state_slot_block_root) = weak_subj_state.get_block_root(latest_block_slot) { + if weak_subj_block_root != *state_slot_block_root { + return Err(format!( + "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", + weak_subj_block_root, state_slot_block_root + )); + } } // Check that the checkpoint state is for the same network as the genesis state. @@ -508,13 +504,12 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; - let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, - current_slot, + Some(weak_subj_slot), &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -891,13 +886,10 @@ where validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, //TODO(sean) should we move kzg solely to the da checker? - data_availability_checker: DataAvailabilityChecker::new( - slot_clock, - kzg.clone(), - store, - self.spec, - ) - .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + data_availability_checker: Arc::new( + DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, self.spec) + .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + ), proposal_blob_cache: BlobCache::default(), kzg, }; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index e32c7809120..cd2c468a273 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -10,12 +10,14 @@ use kzg::Error as KzgError; use kzg::Kzg; use slog::{debug, error}; use slot_clock::SlotClock; -use ssz_types::{Error, VariableList}; +use ssz_types::{Error, FixedVector, VariableList}; use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions; +use std::collections::HashSet; use std::sync::Arc; +use strum::IntoStaticStr; use task_executor::TaskExecutor; use types::beacon_block_body::KzgCommitments; -use types::blob_sidecar::{BlobIdentifier, BlobSidecar}; +use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::ssz_tagged_signed_beacon_block; use types::{ @@ -27,27 +29,29 @@ mod overflow_lru_cache; pub const OVERFLOW_LRU_CAPACITY: usize = 1024; -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] pub enum AvailabilityCheckError { - DuplicateBlob(Hash256), Kzg(KzgError), - KzgVerificationFailed, KzgNotInitialized, + KzgVerificationFailed, SszTypes(ssz_types::Error), - MissingBlobs, NumBlobsMismatch { num_kzg_commitments: usize, num_blobs: usize, }, - TxKzgCommitmentMismatch, + MissingBlobs, + TxKzgCommitmentMismatch(String), KzgCommitmentMismatch { blob_index: u64, }, - Pending, IncorrectFork, BlobIndexInvalid(u64), StoreError(store::Error), DecodeError(ssz::DecodeError), + BlockBlobRootMismatch { + block_root: Hash256, + blob_block_root: Hash256, + }, } impl From for AvailabilityCheckError { @@ -86,8 +90,7 @@ pub struct DataAvailabilityChecker { /// to "complete" the requirements for an `AvailableBlock`. #[derive(Debug, PartialEq)] pub enum Availability { - PendingBlobs(Vec), - PendingBlock(Hash256), + MissingComponents(Hash256), Available(Box>), } @@ -119,6 +122,52 @@ impl DataAvailabilityChecker { }) } + pub fn has_block(&self, block_root: &Hash256) -> bool { + self.availability_cache.has_block(block_root) + } + + pub fn get_missing_blob_ids_checking_cache( + &self, + block_root: Hash256, + ) -> Option> { + let (block, blob_indices) = self.availability_cache.get_missing_blob_info(block_root); + self.get_missing_blob_ids(block_root, block.as_ref(), Some(blob_indices)) + } + + /// A `None` indicates blobs are not required. + /// + /// If there's no block, all possible ids will be returned that don't exist in the given blobs. + /// If there no blobs, all possible ids will be returned. + pub fn get_missing_blob_ids( + &self, + block_root: Hash256, + block_opt: Option<&Arc>>, + blobs_opt: Option>, + ) -> Option> { + let epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + + self.da_check_required(epoch).then(|| { + block_opt + .map(|block| { + block.get_filtered_blob_ids(Some(block_root), |i, _| { + blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i)) + }) + }) + .unwrap_or_else(|| { + let mut blob_ids = Vec::with_capacity(T::EthSpec::max_blobs_per_block()); + for i in 0..T::EthSpec::max_blobs_per_block() { + if blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i)) { + blob_ids.push(BlobIdentifier { + block_root, + index: i as u64, + }); + } + } + blob_ids + }) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -127,6 +176,23 @@ impl DataAvailabilityChecker { self.availability_cache.peek_blob(blob_id) } + pub fn put_rpc_blobs( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + ) -> Result, AvailabilityCheckError> { + let mut verified_blobs = vec![]; + if let Some(kzg) = self.kzg.as_ref() { + for blob in blobs.iter().flatten() { + verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?) + } + } else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + self.availability_cache + .put_kzg_verified_blobs(block_root, &verified_blobs) + } + /// This first validates the KZG commitments included in the blob sidecar. /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. @@ -139,13 +205,13 @@ impl DataAvailabilityChecker { ) -> Result, AvailabilityCheckError> { // Verify the KZG commitments. let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { - verify_kzg_for_blob(gossip_blob, kzg)? + verify_kzg_for_blob(gossip_blob.to_blob(), kzg)? } else { return Err(AvailabilityCheckError::KzgNotInitialized); }; self.availability_cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(kzg_verified_blob.block_root(), &[kzg_verified_blob]) } /// Check if we have all the blobs for a block. If we do, return the Availability variant that @@ -171,7 +237,8 @@ impl DataAvailabilityChecker { .kzg .as_ref() .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - let verified_blobs = verify_kzg_for_blob_list(VariableList::new(blob_list)?, kzg)?; + let filtered_blobs = blob_list.iter().flatten().cloned().collect(); + let verified_blobs = verify_kzg_for_blob_list(filtered_blobs, kzg)?; Ok(MaybeAvailableBlock::Available( self.check_availability_with_blobs(block, verified_blobs)?, @@ -180,27 +247,6 @@ impl DataAvailabilityChecker { } } - /// Checks if a block is available, returning an error if the block is not immediately available. - /// Does not access the gossip cache. - pub fn try_check_availability( - &self, - block: BlockWrapper, - ) -> Result, AvailabilityCheckError> { - match block { - BlockWrapper::Block(block) => { - let blob_requirements = self.get_blob_requirements(&block)?; - let blobs = match blob_requirements { - BlobRequirements::EmptyBlobs => VerifiedBlobs::EmptyBlobs, - BlobRequirements::NotRequired => VerifiedBlobs::NotRequired, - BlobRequirements::PreDeneb => VerifiedBlobs::PreDeneb, - BlobRequirements::Required => return Err(AvailabilityCheckError::MissingBlobs), - }; - Ok(AvailableBlock { block, blobs }) - } - BlockWrapper::BlockAndBlobs(_, _) => Err(AvailabilityCheckError::Pending), - } - } - /// Verifies a block against a set of KZG verified blobs. Returns an AvailableBlock if block's /// commitments are consistent with the provided verified blob commitments. pub fn check_availability_with_blobs( @@ -254,9 +300,11 @@ impl DataAvailabilityChecker { transactions, block_kzg_commitments, ) - .map_err(|_| AvailabilityCheckError::TxKzgCommitmentMismatch)?; + .map_err(|e| AvailabilityCheckError::TxKzgCommitmentMismatch(format!("{e:?}")))?; if !verified { - return Err(AvailabilityCheckError::TxKzgCommitmentMismatch); + return Err(AvailabilityCheckError::TxKzgCommitmentMismatch( + "a commitment and version didn't match".to_string(), + )); } } @@ -410,6 +458,27 @@ pub struct AvailabilityPendingBlock { block: Arc>, } +impl AvailabilityPendingBlock { + pub fn slot(&self) -> Slot { + self.block.slot() + } + pub fn num_blobs_expected(&self) -> usize { + self.block.num_expected_blobs() + } + + pub fn get_all_blob_ids(&self, block_root: Option) -> Vec { + self.block.get_expected_blob_ids(block_root) + } + + pub fn get_filtered_blob_ids( + &self, + block_root: Option, + filter: impl Fn(usize, Hash256) -> bool, + ) -> Vec { + self.block.get_filtered_blob_ids(block_root, filter) + } +} + impl AvailabilityPendingBlock { pub fn to_block(self) -> Arc> { self.block @@ -429,7 +498,7 @@ impl AvailabilityPendingBlock { } /// Verifies an AvailabilityPendingBlock against a set of KZG verified blobs. - /// This does not check whether a block *should* have blobs, these checks should must have been + /// This does not check whether a block *should* have blobs, these checks should have been /// completed when producing the `AvailabilityPendingBlock`. pub fn make_available( self, @@ -485,6 +554,13 @@ impl AvailableBlock { &self.block } + pub fn da_check_required(&self) -> bool { + match self.blobs { + VerifiedBlobs::PreDeneb | VerifiedBlobs::NotRequired => false, + VerifiedBlobs::EmptyBlobs | VerifiedBlobs::Available(_) => true, + } + } + pub fn deconstruct(self) -> (Arc>, Option>) { match self.blobs { VerifiedBlobs::EmptyBlobs | VerifiedBlobs::NotRequired | VerifiedBlobs::PreDeneb => { @@ -542,7 +618,8 @@ impl AsBlock for AvailableBlock { fn into_block_wrapper(self) -> BlockWrapper { let (block, blobs_opt) = self.deconstruct(); if let Some(blobs) = blobs_opt { - BlockWrapper::BlockAndBlobs(block, blobs.to_vec()) + let blobs_vec = blobs.iter().cloned().map(Option::Some).collect::>(); + BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs_vec)) } else { BlockWrapper::Block(block) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 4ad5f57eb6d..c9326008553 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -11,7 +11,9 @@ use ssz_derive::{Decode, Encode}; use ssz_types::FixedVector; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, Epoch, EthSpec, Hash256}; +use types::{BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock}; + +type MissingBlobInfo = (Option>>, HashSet); /// Caches partially available blobs and execution verified blocks corresponding /// to a given `block_hash` that are received over gossip. @@ -25,10 +27,12 @@ pub struct PendingComponents { } impl PendingComponents { - pub fn new_from_blob(blob: KzgVerifiedBlob) -> Self { + pub fn new_from_blobs(blobs: &[KzgVerifiedBlob]) -> Self { let mut verified_blobs = FixedVector::<_, _>::default(); - if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) { - *mut_maybe_blob = Some(blob); + for blob in blobs { + if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) { + *mut_maybe_blob = Some(blob.clone()); + } } Self { @@ -82,6 +86,20 @@ impl PendingComponents { None }) } + + pub fn get_missing_blob_info(&self) -> MissingBlobInfo { + let block_opt = self + .executed_block + .as_ref() + .map(|block| block.block.block.clone()); + let blobs = self + .verified_blobs + .iter() + .enumerate() + .filter_map(|(i, maybe_blob)| maybe_blob.as_ref().map(|_| i)) + .collect::>(); + (block_opt, blobs) + } } #[derive(Debug, PartialEq)] @@ -193,11 +211,27 @@ impl OverflowStore { Ok(disk_keys) } + pub fn load_block( + &self, + block_root: &Hash256, + ) -> Result>, AvailabilityCheckError> { + let key = OverflowKey::from_block_root(*block_root); + + self.0 + .hot_db + .get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())? + .map(|block_bytes| { + AvailabilityPendingExecutedBlock::from_ssz_bytes(block_bytes.as_slice()) + }) + .transpose() + .map_err(|e| e.into()) + } + pub fn load_blob( &self, blob_id: &BlobIdentifier, ) -> Result>>, AvailabilityCheckError> { - let key = OverflowKey::from_blob_id::(blob_id.clone())?; + let key = OverflowKey::from_blob_id::(*blob_id)?; self.0 .hot_db @@ -320,6 +354,41 @@ impl OverflowLRUCache { }) } + pub fn has_block(&self, block_root: &Hash256) -> bool { + let read_lock = self.critical.read(); + if read_lock + .in_memory + .peek(block_root) + .map_or(false, |cache| cache.executed_block.is_some()) + { + true + } else if read_lock.store_keys.contains(block_root) { + drop(read_lock); + // If there's some kind of error reading from the store, we should just return false + self.overflow_store + .load_block(block_root) + .map_or(false, |maybe_block| maybe_block.is_some()) + } else { + false + } + } + + pub fn get_missing_blob_info(&self, block_root: Hash256) -> MissingBlobInfo { + let read_lock = self.critical.read(); + if let Some(cache) = read_lock.in_memory.peek(&block_root) { + cache.get_missing_blob_info() + } else if read_lock.store_keys.contains(&block_root) { + drop(read_lock); + // return default if there's an error reading from the store + match self.overflow_store.get_pending_components(block_root) { + Ok(Some(pending_components)) => pending_components.get_missing_blob_info(), + _ => Default::default(), + } + } else { + Default::default() + } + } + pub fn peek_blob( &self, blob_id: &BlobIdentifier, @@ -335,27 +404,39 @@ impl OverflowLRUCache { } } - pub fn put_kzg_verified_blob( + pub fn put_kzg_verified_blobs( &self, - kzg_verified_blob: KzgVerifiedBlob, + block_root: Hash256, + kzg_verified_blobs: &[KzgVerifiedBlob], ) -> Result, AvailabilityCheckError> { + for blob in kzg_verified_blobs { + let blob_block_root = blob.block_root(); + if blob_block_root != block_root { + return Err(AvailabilityCheckError::BlockBlobRootMismatch { + block_root, + blob_block_root, + }); + } + } let mut write_lock = self.critical.write(); - let block_root = kzg_verified_blob.block_root(); let availability = if let Some(mut pending_components) = write_lock.pop_pending_components(block_root, &self.overflow_store)? { - let blob_index = kzg_verified_blob.blob_index(); - *pending_components - .verified_blobs - .get_mut(blob_index as usize) - .ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_index))? = - Some(kzg_verified_blob); + for kzg_verified_blob in kzg_verified_blobs { + let blob_index = kzg_verified_blob.blob_index() as usize; + if let Some(maybe_verified_blob) = + pending_components.verified_blobs.get_mut(blob_index) + { + *maybe_verified_blob = Some(kzg_verified_blob.clone()) + } else { + return Err(AvailabilityCheckError::BlobIndexInvalid(blob_index as u64)); + } + } if let Some(executed_block) = pending_components.executed_block.take() { self.check_block_availability_maybe_cache( write_lock, - block_root, pending_components, executed_block, )? @@ -365,17 +446,17 @@ impl OverflowLRUCache { pending_components, &self.overflow_store, )?; - Availability::PendingBlock(block_root) + Availability::MissingComponents(block_root) } } else { // not in memory or store -> put new in memory - let new_pending_components = PendingComponents::new_from_blob(kzg_verified_blob); + let new_pending_components = PendingComponents::new_from_blobs(kzg_verified_blobs); write_lock.put_pending_components( block_root, new_pending_components, &self.overflow_store, )?; - Availability::PendingBlock(block_root) + Availability::MissingComponents(block_root) }; Ok(availability) @@ -394,7 +475,6 @@ impl OverflowLRUCache { match write_lock.pop_pending_components(block_root, &self.overflow_store)? { Some(pending_components) => self.check_block_availability_maybe_cache( write_lock, - block_root, pending_components, executed_block, )?, @@ -422,7 +502,7 @@ impl OverflowLRUCache { new_pending_components, &self.overflow_store, )?; - Availability::PendingBlobs(all_blob_ids) + Availability::MissingComponents(block_root) } }; @@ -435,11 +515,10 @@ impl OverflowLRUCache { /// Returns an error if there was an error when matching the block commitments against blob commitments. /// /// Returns `Ok(Availability::Available(_))` if all blobs for the block are present in cache. - /// Returns `Ok(Availability::PendingBlobs(_))` if all corresponding blobs have not been received in the cache. + /// Returns `Ok(Availability::MissingComponents(_))` if all corresponding blobs have not been received in the cache. fn check_block_availability_maybe_cache( &self, mut write_lock: RwLockWriteGuard>, - block_root: Hash256, mut pending_components: PendingComponents, executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { @@ -451,11 +530,12 @@ impl OverflowLRUCache { payload_verification_outcome, } = executed_block; - let verified_blobs = Vec::from(pending_components.verified_blobs) + let Some(verified_blobs) = Vec::from(pending_components.verified_blobs) .into_iter() .take(num_blobs_expected) - .map(|maybe_blob| maybe_blob.ok_or(AvailabilityCheckError::MissingBlobs)) - .collect::, _>>()?; + .collect::>>() else { + return Ok(Availability::MissingComponents(import_data.block_root)) + }; let available_block = block.make_available(verified_blobs)?; Ok(Availability::Available(Box::new( @@ -466,14 +546,7 @@ impl OverflowLRUCache { ), ))) } else { - let missing_blob_ids = executed_block.get_filtered_blob_ids(|index| { - pending_components - .verified_blobs - .get(index as usize) - .map(|maybe_blob| maybe_blob.is_none()) - .unwrap_or(true) - }); - + let block_root = executed_block.import_data.block_root; let _ = pending_components.executed_block.insert(executed_block); write_lock.put_pending_components( block_root, @@ -481,7 +554,7 @@ impl OverflowLRUCache { &self.overflow_store, )?; - Ok(Availability::PendingBlobs(missing_blob_ids)) + Ok(Availability::MissingComponents(block_root)) } } @@ -1080,7 +1153,7 @@ mod test { ); } else { assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should be pending blobs" ); assert_eq!( @@ -1100,16 +1173,18 @@ mod test { .as_ref() .cloned() .expect("kzg should exist"); + let mut kzg_verified_blobs = Vec::new(); for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = - verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); } else { - assert!(matches!(availability, Availability::PendingBlobs(_))); + assert!(matches!(availability, Availability::MissingComponents(_))); assert_eq!(cache.critical.read().in_memory.len(), 1); } } @@ -1126,15 +1201,17 @@ mod test { "should have expected number of blobs" ); let root = pending_block.import_data.block_root; + let mut kzg_verified_blobs = vec![]; for gossip_blob in blobs { - let kzg_verified_blob = - verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice()) .expect("should put blob"); assert_eq!( availability, - Availability::PendingBlock(root), + Availability::MissingComponents(root), "should be pending block" ); assert_eq!(cache.critical.read().in_memory.len(), 1); @@ -1270,11 +1347,13 @@ mod test { let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); let expected_blobs = blobs_0.len(); + let mut kzg_verified_blobs = vec![]; for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { - let kzg_verified_blob = - verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.as_slice()) .expect("should put blob"); if blob_index == expected_blobs - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -1284,7 +1363,7 @@ mod test { cache.critical.read().in_memory.peek(&roots[0]).is_some(), "first block should be in memory" ); - assert!(matches!(availability, Availability::PendingBlobs(_))); + assert!(matches!(availability, Availability::MissingComponents(_))); } } assert_eq!( @@ -1360,13 +1439,17 @@ mod test { for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); + let block_root = pending_block.block.as_block().canonical_root(); let expected_blobs = pending_block.num_blobs_expected(); if expected_blobs > 1 { // might as well add a blob too - let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs"); - let one_blob = pending_blobs.pop().expect("should have at least one blob"); - let kzg_verified_blob = - verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify"); + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + let kzg_verified_blobs = vec![kzg_verified_blob]; // generate random boolean let block_first = (rand::random::() % 2) == 0; if block_first { @@ -1374,43 +1457,41 @@ mod test { .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should have pending blobs" ); let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice()) .expect("should put blob"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "availabilty should be pending blobs: {:?}", availability ); } else { let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice()) .expect("should put blob"); let root = pending_block.block.as_block().canonical_root(); assert_eq!( availability, - Availability::PendingBlock(root), + Availability::MissingComponents(root), "should be pending block" ); let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should have pending blobs" ); } } else { - // still need to pop front so the blob count is correct - pending_blobs.pop_front().expect("should have blobs"); let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should be pending blobs" ); } @@ -1511,63 +1592,63 @@ mod test { let mut remaining_blobs = HashMap::new(); for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); + let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); let block_root = pending_block.block.as_block().canonical_root(); let expected_blobs = pending_block.num_blobs_expected(); if expected_blobs > 1 { // might as well add a blob too - let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs"); - let one_blob = pending_blobs.pop().expect("should have at least one blob"); - let kzg_verified_blob = - verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify"); + let one_blob = pending_block_blobs + .pop() + .expect("should have at least one blob"); + let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + let kzg_verified_blobs = vec![kzg_verified_blob]; // generate random boolean let block_first = (rand::random::() % 2) == 0; - remaining_blobs.insert(block_root, pending_blobs); if block_first { let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should have pending blobs" ); let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice()) .expect("should put blob"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "availabilty should be pending blobs: {:?}", availability ); } else { let availability = cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice()) .expect("should put blob"); let root = pending_block.block.as_block().canonical_root(); assert_eq!( availability, - Availability::PendingBlock(root), + Availability::MissingComponents(root), "should be pending block" ); let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should have pending blobs" ); } } else { - // still need to pop front so the blob count is correct - let pending_blobs = pending_blobs.pop_front().expect("should have blobs"); - remaining_blobs.insert(block_root, pending_blobs); let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); assert!( - matches!(availability, Availability::PendingBlobs(_)), + matches!(availability, Availability::MissingComponents(_)), "should be pending blobs" ); } + remaining_blobs.insert(block_root, pending_block_blobs); } // now we should have a full cache spanning multiple epochs @@ -1626,18 +1707,20 @@ mod test { ); // now lets insert the remaining blobs until the cache is empty - for (_, blobs) in remaining_blobs { + for (root, blobs) in remaining_blobs { let additional_blobs = blobs.len(); + let mut kzg_verified_blobs = vec![]; for (i, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = - verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); + let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) + .expect("kzg should verify"); + kzg_verified_blobs.push(kzg_verified_blob); let availability = recovered_cache - .put_kzg_verified_blob(kzg_verified_blob) + .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice()) .expect("should put blob"); if i == additional_blobs - 1 { assert!(matches!(availability, Availability::Available(_))) } else { - assert!(matches!(availability, Availability::PendingBlobs(_))); + assert!(matches!(availability, Availability::MissingComponents(_))); } } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fa3287a4efe..74c99f18d92 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -69,7 +69,9 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ - get_block_root, BlockError, ExecutedBlock, ExecutionPayloadError, GossipVerifiedBlock, + get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock, + ExecutionPayloadError, GossipVerifiedBlock, IntoExecutionPendingBlock, + PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index 3ff1789049e..6c2f07ff593 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -379,7 +379,7 @@ mod tests { // Try adding an out of bounds index let invalid_index = E::max_blobs_per_block() as u64; - let sidecar_d = get_blob_sidecar(0, block_root_a, 4); + let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), Err(Error::InvalidBlobIndex(invalid_index)), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1d28f1c0a10..dbf88def398 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -63,7 +63,7 @@ use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. -const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; +pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index feea5c7218b..0af59dedfea 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -133,10 +133,13 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let block_wrapper: BlockWrapper = Arc::new(block.clone()).into(); - let available_block = chain + let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = chain .data_availability_checker - .try_check_availability(block_wrapper) - .unwrap(); + .check_availability(block_wrapper) + .unwrap() + else { + panic!("block should be available") + }; let early_attestation = { let proto_block = chain @@ -200,11 +203,13 @@ async fn early_attester_cache_old_request() { .unwrap(); let block_wrapper: BlockWrapper = head.beacon_block.clone().into(); - let available_block = harness - .chain + let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = harness.chain .data_availability_checker - .try_check_availability(block_wrapper) - .unwrap(); + .check_availability(block_wrapper) + .unwrap() + else { + panic!("block should be available") + }; harness .chain diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 64c79ea668b..20f9c29308e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" -state_processing = { path = "../../consensus/state_processing" } operation_pool = { path = "../operation_pool" } tokio = "1.14.0" [dependencies] +state_processing = { path = "../../consensus/state_processing" } beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } network = { path = "../network" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e47600ae492..bbd208f95e2 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -28,6 +28,7 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; +use state_processing::per_slot_processing; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -346,10 +347,23 @@ where None }; - debug!(context.log(), "Downloading finalized block"); - // Find a suitable finalized block on an epoch boundary. - let mut block = remote - .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) + debug!( + context.log(), + "Downloading finalized state"; + ); + let mut state = remote + .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) + .await + .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? + .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; + + debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); + + let finalized_block_slot = state.latest_block_header().slot; + + debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); + let block = remote + .get_beacon_blocks_ssz::(BlockId::Slot(finalized_block_slot), &spec) .await .map_err(|e| match e { ApiError::InvalidSsz(e) => format!( @@ -363,55 +377,15 @@ where debug!(context.log(), "Downloaded finalized block"); - let mut block_slot = block.slot(); - - while block.slot() % slots_per_epoch != 0 { - block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot - ); - - if let Some(found_block) = remote - .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) - .await - .map_err(|e| { - format!("Error fetching block at slot {}: {:?}", block_slot, e) - })? - { - block = found_block; - } + let epoch_boundary_slot = state.slot() % slots_per_epoch; + if epoch_boundary_slot != 0 { + debug!(context.log(), "Advancing state to epoch boundary"; "state_slot" => state.slot(), "epoch_boundary_slot" => epoch_boundary_slot); } - debug!( - context.log(), - "Downloaded aligned finalized block"; - "block_root" => ?block.canonical_root(), - "block_slot" => block.slot(), - ); - - let state_root = block.state_root(); - debug!( - context.log(), - "Downloading finalized state"; - "state_root" => ?state_root - ); - let state = remote - .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) - .await - .map_err(|e| { - format!( - "Error loading checkpoint state from remote {:?}: {:?}", - state_root, e - ) - })? - .ok_or_else(|| { - format!("Checkpoint state missing from remote: {:?}", state_root) - })?; - - debug!(context.log(), "Downloaded finalized state"); + while state.slot() % slots_per_epoch != 0 { + per_slot_processing(&mut state, None, &spec) + .map_err(|e| format!("Error advancing state: {:?}", e))?; + } let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; @@ -419,9 +393,9 @@ where info!( context.log(), "Loaded checkpoint block and state"; - "slot" => block.slot(), + "block_slot" => block.slot(), + "state_slot" => state.slot(), "block_root" => ?block.canonical_root(), - "state_root" => ?state_root, ); let service = diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 5e5508b6f8e..96514a15ff8 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -586,7 +586,8 @@ impl ExecutionBlockGenerator { ForkName::Deneb => { // get random number between 0 and Max Blobs let num_blobs = rand::random::() % T::max_blobs_per_block(); - let (bundle, transactions) = self.generate_random_blobs(num_blobs)?; + let kzg = self.kzg.as_ref().ok_or("kzg not initialized")?; + let (bundle, transactions) = generate_random_blobs(num_blobs, kzg)?; for tx in Vec::from(transactions) { execution_payload .transactions_mut() @@ -626,88 +627,82 @@ impl ExecutionBlockGenerator { payload_id: id.map(Into::into), }) } +} - fn generate_random_blobs( - &self, - n_blobs: usize, - ) -> Result<(BlobsBundleV1, Transactions), String> { - let mut bundle = BlobsBundleV1::::default(); - let mut transactions = vec![]; - for blob_index in 0..n_blobs { - // fill a vector with random bytes - let mut blob_bytes = [0u8; BYTES_PER_BLOB]; - rand::thread_rng().fill_bytes(&mut blob_bytes); - // Ensure that the blob is canonical by ensuring that - // each field element contained in the blob is < BLS_MODULUS - for i in 0..FIELD_ELEMENTS_PER_BLOB { - blob_bytes[i * BYTES_PER_FIELD_ELEMENT + BYTES_PER_FIELD_ELEMENT - 1] = 0; - } - - let blob = Blob::::new(Vec::from(blob_bytes)) - .map_err(|e| format!("error constructing random blob: {:?}", e))?; - - let commitment = self - .kzg - .as_ref() - .ok_or("kzg not initialized")? - .blob_to_kzg_commitment(blob_bytes.into()) - .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; - - let proof = self - .kzg - .as_ref() - .ok_or("kzg not initialized")? - .compute_blob_kzg_proof(blob_bytes.into(), commitment) - .map_err(|e| format!("error computing kzg proof: {:?}", e))?; - - let versioned_hash = commitment.calculate_versioned_hash(); - - let blob_transaction = BlobTransaction { - chain_id: Default::default(), - nonce: 0, - max_priority_fee_per_gas: Default::default(), - max_fee_per_gas: Default::default(), - gas: 100000, - to: None, - value: Default::default(), - data: Default::default(), - access_list: Default::default(), - max_fee_per_data_gas: Default::default(), - versioned_hashes: vec![versioned_hash].into(), - }; - let bad_signature = EcdsaSignature { - y_parity: false, - r: Uint256::from(0), - s: Uint256::from(0), - }; - let signed_blob_transaction = SignedBlobTransaction { - message: blob_transaction, - signature: bad_signature, - }; - // calculate transaction bytes - let tx_bytes = [BLOB_TX_TYPE] - .into_iter() - .chain(signed_blob_transaction.as_ssz_bytes().into_iter()) - .collect::>(); - let tx = Transaction::::from(tx_bytes); - - transactions.push(tx); - bundle - .blobs - .push(blob) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .commitments - .push(commitment) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .proofs - .push(proof) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; +pub fn generate_random_blobs( + n_blobs: usize, + kzg: &Kzg, +) -> Result<(BlobsBundleV1, Transactions), String> { + let mut bundle = BlobsBundleV1::::default(); + let mut transactions = vec![]; + for blob_index in 0..n_blobs { + // fill a vector with random bytes + let mut blob_bytes = [0u8; BYTES_PER_BLOB]; + rand::thread_rng().fill_bytes(&mut blob_bytes); + // Ensure that the blob is canonical by ensuring that + // each field element contained in the blob is < BLS_MODULUS + for i in 0..FIELD_ELEMENTS_PER_BLOB { + blob_bytes[i * BYTES_PER_FIELD_ELEMENT + BYTES_PER_FIELD_ELEMENT - 1] = 0; } - Ok((bundle, transactions.into())) - } + let blob = Blob::::new(Vec::from(blob_bytes)) + .map_err(|e| format!("error constructing random blob: {:?}", e))?; + + let commitment = kzg + .blob_to_kzg_commitment(blob_bytes.into()) + .map_err(|e| format!("error computing kzg commitment: {:?}", e))?; + + let proof = kzg + .compute_blob_kzg_proof(blob_bytes.into(), commitment) + .map_err(|e| format!("error computing kzg proof: {:?}", e))?; + + let versioned_hash = commitment.calculate_versioned_hash(); + + let blob_transaction = BlobTransaction { + chain_id: Default::default(), + nonce: 0, + max_priority_fee_per_gas: Default::default(), + max_fee_per_gas: Default::default(), + gas: 100000, + to: None, + value: Default::default(), + data: Default::default(), + access_list: Default::default(), + max_fee_per_data_gas: Default::default(), + versioned_hashes: vec![versioned_hash].into(), + }; + let bad_signature = EcdsaSignature { + y_parity: false, + r: Uint256::from(0), + s: Uint256::from(0), + }; + let signed_blob_transaction = SignedBlobTransaction { + message: blob_transaction, + signature: bad_signature, + }; + // calculate transaction bytes + let tx_bytes = [BLOB_TX_TYPE] + .into_iter() + .chain(signed_blob_transaction.as_ssz_bytes().into_iter()) + .collect::>(); + let tx = Transaction::::from(tx_bytes); + + transactions.push(tx); + bundle + .blobs + .push(blob) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .commitments + .push(commitment) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + bundle + .proofs + .push(proof) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + } + + Ok((bundle, transactions.into())) } fn payload_id_from_u64(n: u64) -> PayloadId { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 72cd0e81e4e..e30e2a35911 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -24,7 +24,9 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; -pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use execution_block_generator::{ + generate_pow_block, generate_random_blobs, Block, ExecutionBlockGenerator, +}; pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index d722cf6c9b7..dcbe8097249 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -12,6 +12,7 @@ use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::Duration; +use store::FixedVector; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ @@ -77,8 +78,11 @@ pub async fn publish_block( PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))), )?; } - let blobs = signed_blobs.into_iter().map(|blob| blob.message).collect(); - BlockWrapper::BlockAndBlobs(block, blobs) + let blobs = signed_blobs + .into_iter() + .map(|blob| Some(blob.message)) + .collect::>(); + BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs)) } else { block.into() } @@ -136,17 +140,8 @@ pub async fn publish_block( Ok(()) } - Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => { - let msg = format!("Missing block with root {:?}", block_root); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) - } - Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids)) => { - let msg = format!("Missing blobs {:?}", blob_ids); + Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => { + let msg = format!("Missing parts of block with root {:?}", block_root); error!( log, "Invalid block provided to HTTP API"; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c9917289944..3673a8a094a 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -19,7 +19,7 @@ store = { path = "../store" } lighthouse_network = { path = "../lighthouse_network" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } -slog = { version = "2.5.2", features = ["max_level_trace"] } +slog = { version = "2.5.2", features = ["max_level_trace", "nested-values"] } hex = "0.4.2" ethereum_ssz = "0.5.0" ssz_types = "0.5.0" @@ -46,4 +46,8 @@ derivative = "2.2.0" delay_map = "0.3.0" ethereum-types = { version = "0.14.1", optional = true } operation_pool = { path = "../operation_pool" } -execution_layer = { path = "../execution_layer" } \ No newline at end of file +execution_layer = { path = "../execution_layer" } + +[features] +spec-minimal = ["beacon_chain/spec-minimal"] +fork_from_env = ["beacon_chain/fork_from_env"] diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 1f66dc7ad0e..d2473e55f67 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -65,6 +65,7 @@ use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; +use types::blob_sidecar::FixedBlobSidecarList; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar, @@ -121,9 +122,9 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that +/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that /// will be stored before we start dropping them. -const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; +const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// within acceptable clock disparity) that will be queued before we start dropping them. @@ -164,6 +165,7 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; /// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; +const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024 * 4; /// The maximum number of queued `Vec` objects received during syncing that will /// be stored before we start dropping them. @@ -233,6 +235,7 @@ pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; +pub const RPC_BLOB: &str = "rpc_blob"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -628,6 +631,23 @@ impl WorkEvent { } } + pub fn rpc_blobs( + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Self { + Self { + drop_during_sync: false, + work: Work::RpcBlobs { + block_root, + blobs, + seen_timestamp, + process_type, + }, + } + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn chain_segment( process_id: ChainSegmentProcessId, @@ -927,6 +947,12 @@ pub enum Work { process_type: BlockProcessType, should_process: bool, }, + RpcBlobs { + block_root: Hash256, + blobs: FixedBlobSidecarList, + seen_timestamp: Duration, + process_type: BlockProcessType, + }, ChainSegment { process_id: ChainSegmentProcessId, blocks: Vec>, @@ -986,6 +1012,7 @@ impl Work { Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, + Work::RpcBlobs { .. } => RPC_BLOB, Work::ChainSegment { process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, .. @@ -1148,11 +1175,11 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); + let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); - let mut gossip_block_and_blobs_sidecar_queue = - FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN); + let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); @@ -1302,6 +1329,8 @@ impl BeaconProcessor { // evolves. } else if let Some(item) = rpc_block_queue.pop() { self.spawn_worker(item, toolbox); + } else if let Some(item) = rpc_blob_queue.pop() { + self.spawn_worker(item, toolbox); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -1310,7 +1339,7 @@ impl BeaconProcessor { // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { self.spawn_worker(item, toolbox); - } else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { + } else if let Some(item) = gossip_blob_queue.pop() { self.spawn_worker(item, toolbox); // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us @@ -1526,7 +1555,7 @@ impl BeaconProcessor { gossip_block_queue.push(work, work_id, &self.log) } Work::GossipSignedBlobSidecar { .. } => { - gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log) + gossip_blob_queue.push(work, work_id, &self.log) } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id, &self.log) @@ -1551,6 +1580,7 @@ impl BeaconProcessor { optimistic_update_queue.push(work, work_id, &self.log) } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), + Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), Work::ChainSegment { ref process_id, .. } => match process_id { ChainSegmentProcessId::RangeBatchId { .. } | ChainSegmentProcessId::ParentLookup { .. } => { @@ -1620,6 +1650,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, rpc_block_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, + rpc_blob_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, @@ -1977,6 +2011,17 @@ impl BeaconProcessor { duplicate_cache, should_process, )), + Work::RpcBlobs { + block_root, + blobs, + seen_timestamp, + process_type, + } => task_spawner.spawn_async(worker.process_rpc_blobs( + block_root, + blobs, + seen_timestamp, + process_type, + )), /* * Verification for a chain segment (multiple blocks). */ diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index dfeb7420cff..478ee9e67ec 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -14,8 +14,7 @@ use super::MAX_SCHEDULED_WORK_QUEUE_LEN; use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::metrics; use crate::sync::manager::BlockProcessType; -use beacon_chain::blob_verification::AsBlock; -use beacon_chain::blob_verification::BlockWrapper; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 6d8cba105a0..b000a36eb0f 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -682,19 +682,15 @@ impl Worker { } Err(err) => { match err { - BlobError::BlobParentUnknown { - blob_root, - blob_parent_root, - } => { + BlobError::BlobParentUnknown(blob) => { debug!( self.log, "Unknown parent hash for blob"; "action" => "requesting parent", - "blob_root" => %blob_root, - "parent_root" => %blob_parent_root + "blob_root" => %blob.block_root, + "parent_root" => %blob.block_parent_root ); - // TODO: send blob to reprocessing queue and queue a sync request for the blob. - todo!(); + self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); } BlobError::ProposerSignatureInvalid | BlobError::UnknownValidator(_) @@ -757,28 +753,42 @@ impl Worker { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - // TODO + let blob_root = verified_blob.block_root(); + let blob_slot = verified_blob.slot(); + let blob_clone = verified_blob.clone().to_blob(); match self .chain .process_blob(verified_blob, CountUnrealized::True) .await { Ok(AvailabilityProcessingStatus::Imported(_hash)) => { - todo!() - // add to metrics - // logging + //TODO(sean) add metrics and logging + self.chain.recompute_head_at_current_slot().await; } - Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => self - .send_sync_message(SyncMessage::UnknownBlobHash { - peer_id, - pending_blobs, - }), - Ok(AvailabilityProcessingStatus::PendingBlock(block_hash)) => { - self.send_sync_message(SyncMessage::UnknownBlockHash(peer_id, block_hash)); + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => { + self.send_sync_message(SyncMessage::MissingGossipBlockComponents( + slot, peer_id, block_hash, + )); } - Err(_err) => { - // handle errors - todo!() + Err(err) => { + debug!( + self.log, + "Invalid gossip blob"; + "outcome" => ?err, + "block root" => ?blob_root, + "block slot" => blob_slot, + "blob index" => blob_clone.index, + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_blob_ssz", + ); + trace!( + self.log, + "Invalid gossip blob ssz"; + "ssz" => format_args!("0x{}", hex::encode(blob_clone.as_ssz_bytes())), + ); } } } @@ -918,16 +928,13 @@ impl Worker { verified_block } - Err(BlockError::AvailabilityCheck(_err)) => { - todo!() - } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, "Unknown parent for gossip block"; "root" => ?block_root ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); + self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)); return None; } Err(e @ BlockError::BeaconChainError(_)) => { @@ -987,8 +994,8 @@ impl Worker { ); return None; } - Err(e @ BlockError::BlobValidation(_)) => { - warn!(self.log, "Could not verify blob for gossip. Rejecting the block and blob"; + Err(e @ BlockError::BlobValidation(_)) | Err(e @ BlockError::AvailabilityCheck(_)) => { + warn!(self.log, "Could not verify block against known blobs in gossip. Rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.gossip_penalize_peer( @@ -1132,23 +1139,13 @@ impl Worker { self.chain.recompute_head_at_current_slot().await; } - Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => { - // This error variant doesn't make any sense in this context - crit!( - self.log, - "Internal error. Cannot get AvailabilityProcessingStatus::PendingBlock on processing block"; - "block_root" => %block_root - ); - } - Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => { + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { // make rpc request for blob - self.send_sync_message(SyncMessage::UnknownBlobHash { + self.send_sync_message(SyncMessage::MissingGossipBlockComponents( + *slot, peer_id, - pending_blobs: pending_blobs.to_vec(), - }); - } - Err(BlockError::AvailabilityCheck(_)) => { - todo!() + *block_root, + )); } Err(BlockError::ParentUnknown(block)) => { // Inform the sync manager to find parents for this block @@ -1158,7 +1155,7 @@ impl Worker { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock( + self.send_sync_message(SyncMessage::UnknownParentBlock( peer_id, block.clone(), block_root, @@ -1997,7 +1994,10 @@ impl Worker { // We don't know the block, get the sync manager to handle the block lookup, and // send the attestation to be scheduled for re-processing. self.sync_tx - .send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) + .send(SyncMessage::UnknownBlockHashFromAttestation( + peer_id, + *beacon_block_root, + )) .unwrap_or_else(|_| { warn!( self.log, diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 5a33650b6ee..206b1edcab7 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -5,7 +5,7 @@ use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; use crate::beacon_processor::DuplicateCache; use crate::metrics; -use crate::sync::manager::{BlockProcessType, SyncMessage}; +use crate::sync::manager::{BlockProcessType, ResponseType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock}; @@ -21,6 +21,7 @@ use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; +use types::blob_sidecar::FixedBlobSidecarList; use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. @@ -57,9 +58,10 @@ impl Worker { ) { if !should_process { // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: crate::sync::manager::BlockProcessResult::Ignored, + result: crate::sync::manager::BlockProcessingResult::Ignored, + response_type: crate::sync::manager::ResponseType::Block, }); return; } @@ -180,7 +182,8 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); // RPC block imported, regardless of process type - //TODO(sean) handle pending availability variants + //TODO(sean) do we need to do anything here for missing blobs? or is passing the result + // along to sync enough? if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); @@ -205,15 +208,50 @@ impl Worker { } } // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, result: result.into(), + response_type: ResponseType::Block, }); // Drop the handle to remove the entry from the cache drop(handle); } + pub async fn process_rpc_blobs( + self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let Some(slot) = blobs.iter().find_map(|blob|{ + blob.as_ref().map(|blob| blob.slot) + }) else { + return; + }; + + let result = self + .chain + .check_availability_and_maybe_import( + slot, + |chain| { + chain + .data_availability_checker + .put_rpc_blobs(block_root, blobs) + }, + CountUnrealized::True, + ) + .await; + + // Sync handles these results + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + response_type: ResponseType::Blob, + }); + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 08708777712..e9a4d1e3cbe 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -453,8 +453,8 @@ impl Router { } id @ (SyncId::BackFillBlocks { .. } | SyncId::RangeBlocks { .. } - | SyncId::BackFillBlobs { .. } - | SyncId::RangeBlobs { .. }) => id, + | SyncId::BackFillBlockAndBlobs { .. } + | SyncId::RangeBlockAndBlobs { .. }) => id, }, RequestId::Router => unreachable!("All BBRange requests belong to sync"), }; @@ -512,8 +512,8 @@ impl Router { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, SyncId::BackFillBlocks { .. } | SyncId::RangeBlocks { .. } - | SyncId::RangeBlobs { .. } - | SyncId::BackFillBlobs { .. } => { + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { unreachable!("Batch syncing do not request BBRoot requests") } }, @@ -545,8 +545,8 @@ impl Router { id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, SyncId::BackFillBlocks { .. } | SyncId::RangeBlocks { .. } - | SyncId::RangeBlobs { .. } - | SyncId::BackFillBlobs { .. } => { + | SyncId::RangeBlockAndBlobs { .. } + | SyncId::BackFillBlockAndBlobs { .. } => { unreachable!("Batch syncing does not request BBRoot requests") } }, diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index b4f52df39d3..02313efbf97 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -151,7 +151,7 @@ impl AttestationService { } /// Return count of all currently subscribed subnets (long-lived **and** short-lived). - #[cfg(test)] + #[cfg(all(test, feature = "spec-mainnet"))] pub fn subscription_count(&self) -> usize { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize @@ -167,7 +167,7 @@ impl AttestationService { } /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] + #[cfg(all(test, feature = "spec-mainnet"))] pub(crate) fn is_subscribed( &self, subnet_id: &SubnetId, @@ -179,7 +179,7 @@ impl AttestationService { } } - #[cfg(test)] + #[cfg(all(test, feature = "spec-mainnet"))] pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { &self.long_lived_subscriptions } diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index eda7ce8efbd..982962b6bab 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -91,7 +91,7 @@ impl SyncCommitteeService { } /// Return count of all currently subscribed subnets. - #[cfg(test)] + #[cfg(all(test, feature = "spec-mainnet"))] pub fn subscription_count(&self) -> usize { use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; if self.subscribe_all_subnets { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 3b8c89a442e..58a882b4d8a 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,3 +1,4 @@ +#![cfg(feature = "spec-mainnet")] use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, diff --git a/beacon_node/network/src/sync/block_lookups/delayed_lookup.rs b/beacon_node/network/src/sync/block_lookups/delayed_lookup.rs new file mode 100644 index 00000000000..a2a44ecdd5d --- /dev/null +++ b/beacon_node/network/src/sync/block_lookups/delayed_lookup.rs @@ -0,0 +1,84 @@ +use crate::sync::SyncMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use slog::{crit, warn}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::time::interval_at; +use tokio::time::Instant; +use types::Hash256; + +#[derive(Debug)] +pub enum DelayedLookupMessage { + /// A lookup for all components of a block or blob seen over gossip. + MissingComponents(Hash256), +} + +/// This service is responsible for collecting lookup messages and sending them back to sync +/// for processing after a short delay. +/// +/// We want to delay lookups triggered from gossip for the following reasons: +/// +/// - We only want to make one request for components we are unlikely to see on gossip. This means +/// we don't have to repeatedly update our RPC request's state as we receive gossip components. +/// +/// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request. +/// +/// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs +/// over gossip and RPC. +/// +/// - We would prefer to request peers based on whether we've seen them attest, because this gives +/// us an idea about whether they *should* have the block/blobs we're missing. This is because a +/// node should not attest to a block unless it has all the blobs for that block. This gives us a +/// stronger basis for peer scoring. +pub fn spawn_delayed_lookup_service( + executor: &task_executor::TaskExecutor, + beacon_chain: Arc>, + mut delayed_lookups_recv: mpsc::Receiver, + sync_send: mpsc::UnboundedSender>, + log: slog::Logger, +) { + executor.spawn( + async move { + let slot_duration = beacon_chain.slot_clock.slot_duration(); + let delay = beacon_chain.slot_clock.single_lookup_delay(); + let interval_start = match ( + beacon_chain.slot_clock.duration_to_next_slot(), + beacon_chain.slot_clock.seconds_from_current_slot_start(), + ) { + (Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => { + let duration_until_start = if seconds_from_current_slot_start > delay { + duration_to_next_slot + delay + } else { + delay - seconds_from_current_slot_start + }; + tokio::time::Instant::now() + duration_until_start + } + _ => { + crit!(log, + "Failed to read slot clock, delayed lookup service timing will be inaccurate.\ + This may degrade performance" + ); + Instant::now() + } + }; + + let mut interval = interval_at(interval_start, slot_duration); + loop { + interval.tick().await; + while let Ok(msg) = delayed_lookups_recv.try_recv() { + match msg { + DelayedLookupMessage::MissingComponents(block_root) => { + if let Err(e) = sync_send + .send(SyncMessage::MissingGossipBlockComponentsDelayed(block_root)) + { + warn!(log, "Failed to send delayed lookup message"; "error" => ?e); + } + } + } + } + } + }, + "delayed_lookups", + ); +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 77e659a2682..f352f882a1f 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,73 +1,139 @@ -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::time::Duration; - -use beacon_chain::blob_verification::AsBlock; -use beacon_chain::blob_verification::BlockWrapper; -use beacon_chain::{BeaconChainTypes, BlockError}; -use fnv::FnvHashMap; -use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; +use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker}; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; +use lighthouse_network::rpc::RPCError; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::collections::HashMap; +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Duration; use store::Hash256; - -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; -use crate::metrics; +use strum::Display; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, SignedBeaconBlock, Slot}; use self::parent_lookup::PARENT_FAIL_TOLERANCE; -use self::{ - parent_lookup::{ParentLookup, VerifyError}, - single_block_lookup::SingleBlockRequest, -}; - -use super::manager::BlockProcessResult; +use self::parent_lookup::{ParentLookup, ParentVerifyError}; +use self::single_block_lookup::{LookupVerifyError, SingleBlockLookup}; +use super::manager::BlockProcessingResult; use super::BatchProcessResult; use super::{ manager::{BlockProcessType, Id}, network_context::SyncNetworkContext, }; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; +use crate::metrics; +use crate::sync::block_lookups::single_block_lookup::LookupId; +pub use single_block_lookup::UnknownParentComponents; +pub(crate) mod delayed_lookup; mod parent_lookup; mod single_block_lookup; #[cfg(test)] mod tests; -pub type RootBlockTuple = (Hash256, BlockWrapper); +pub type DownloadedBlocks = (Hash256, BlockWrapper); +pub type RootBlockTuple = (Hash256, Arc>); +pub type RootBlobsTuple = (Hash256, FixedBlobSidecarList); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; -/// This is used to resolve the scenario where we request a parent from before the data availability -/// boundary and need to retry with a request for only the block. -pub enum ForceBlockRequest { - True, - False, -} - pub(crate) struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, processing_parent_lookups: - HashMap, SingleBlockRequest)>, + HashMap, SingleBlockLookup)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - /// A collection of block hashes being searched for and a flag indicating if a result has been - /// received or not. - /// - /// The flag allows us to determine if the peer returned data or sent us nothing. - single_block_lookups: FnvHashMap>, + single_block_lookups: Vec>, + + da_checker: Arc>, /// The logger for the import manager. log: Logger, } +pub type BlockRequestId = Id; +pub type BlobRequestId = Id; + +#[derive(Debug, PartialEq)] +enum StreamTerminator { + True, + False, +} + +impl From for StreamTerminator { + fn from(value: bool) -> Self { + if value { + StreamTerminator::True + } else { + StreamTerminator::False + } + } +} + +/// Used to track block or blob responses in places we want to reduce code duplication in +/// response handling. +// NOTE: a better solution may be to wrap request `Id` in an enum. +#[derive(Debug, Copy, Clone)] +pub enum ResponseType { + Block, + Blob, +} + +/// This enum is used to track what a peer *should* be able to respond with respond based on +/// other messages we've seen from this peer on the network. This is useful for peer scoring. +/// We expect a peer tracked by the `BlockAndBlobs` variant to be able to respond to all +/// components of a block. This peer has either sent an attestation for the requested block +/// or has forwarded a block or blob that is a descendant of the requested block. An honest node +/// should not attest unless it has all components of a block, and it should not forward +/// messages if it does not have all components of the parent block. A peer tracked by the +/// `Neither` variant has likely just sent us a block or blob over gossip, in which case we +/// can't know whether the peer has all components of the block, and could be acting honestly +/// by forwarding a message without any other block components. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Display)] +pub enum PeerShouldHave { + BlockAndBlobs(PeerId), + Neither(PeerId), +} + +impl PeerShouldHave { + fn as_peer_id(&self) -> &PeerId { + match self { + PeerShouldHave::BlockAndBlobs(id) => id, + PeerShouldHave::Neither(id) => id, + } + } + fn to_peer_id(self) -> PeerId { + match self { + PeerShouldHave::BlockAndBlobs(id) => id, + PeerShouldHave::Neither(id) => id, + } + } + fn should_have_block(&self) -> bool { + match self { + PeerShouldHave::BlockAndBlobs(_) => true, + PeerShouldHave::Neither(_) => false, + } + } +} + +/// Tracks the conditions under which we want to drop a parent or single block lookup. +#[derive(Debug, Copy, Clone)] +pub enum ShouldRemoveLookup { + True, + False, +} + impl BlockLookups { - pub fn new(log: Logger) -> Self { + pub fn new(da_checker: Arc>, log: Logger) -> Self { Self { parent_lookups: Default::default(), processing_parent_lookups: Default::default(), @@ -75,88 +141,199 @@ impl BlockLookups { FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), + da_checker, log, } } /* Lookup requests */ + /// Creates a lookup for the block with the given `block_root` and immediately triggers it. + pub fn search_block( + &mut self, + block_root: Hash256, + peer_source: PeerShouldHave, + cx: &mut SyncNetworkContext, + ) { + let lookup = self.search_block_with(block_root, None, &[peer_source]); + if let Some(lookup) = lookup { + self.trigger_single_lookup(lookup, cx); + } + } + /// Creates a lookup for the block with the given `block_root`. + /// + /// The request is not immediately triggered, and should be triggered by a call to + /// `trigger_lookup_by_root`. + pub fn search_block_delayed(&mut self, block_root: Hash256, peer_source: PeerShouldHave) { + let lookup = self.search_block_with(block_root, None, &[peer_source]); + if let Some(lookup) = lookup { + self.add_single_lookup(lookup) + } + } + + /// Creates a lookup for the block with the given `block_root`, while caching other block + /// components we've already received. The block components are cached here because we haven't + /// imported it's parent and therefore can't fully validate it and store it in the data + /// availability cache. + /// + /// The request is immediately triggered. + pub fn search_child_block( + &mut self, + block_root: Hash256, + parent_components: Option>, + peer_source: &[PeerShouldHave], + cx: &mut SyncNetworkContext, + ) { + let lookup = self.search_block_with(block_root, parent_components, peer_source); + if let Some(lookup) = lookup { + self.trigger_single_lookup(lookup, cx); + } + } + + /// Creates a lookup for the block with the given `block_root`, while caching other block + /// components we've already received. The block components are cached here because we haven't + /// imported it's parent and therefore can't fully validate it and store it in the data + /// availability cache. + /// + /// The request is not immediately triggered, and should be triggered by a call to + /// `trigger_lookup_by_root`. + pub fn search_child_delayed( + &mut self, + block_root: Hash256, + parent_components: Option>, + peer_source: &[PeerShouldHave], + ) { + let lookup = self.search_block_with(block_root, parent_components, peer_source); + if let Some(lookup) = lookup { + self.add_single_lookup(lookup) + } + } + + /// Attempts to trigger the request matching the given `block_root`. + pub fn trigger_single_lookup( + &mut self, + mut single_block_lookup: SingleBlockLookup, + cx: &mut SyncNetworkContext, + ) { + if !single_block_lookup.triggered && single_block_lookup.request_block_and_blobs(cx).is_ok() + { + single_block_lookup.triggered = true; + self.add_single_lookup(single_block_lookup) + } + } + + pub fn add_single_lookup( + &mut self, + single_block_lookup: SingleBlockLookup, + ) { + self.single_block_lookups.push(single_block_lookup); + + metrics::set_gauge( + &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, + self.single_block_lookups.len() as i64, + ); + } + + pub fn trigger_lookup_by_root( + &mut self, + block_root: Hash256, + cx: &mut SyncNetworkContext, + ) -> Result<(), ()> { + for lookup in self.single_block_lookups.iter_mut() { + if lookup.block_request_state.requested_block_root == block_root && !lookup.triggered { + lookup.request_block_and_blobs(cx)?; + lookup.triggered = true; + } + } + Ok(()) + } + + pub fn remove_lookup_by_root(&mut self, block_root: Hash256) { + self.single_block_lookups + .retain(|lookup| lookup.block_request_state.requested_block_root != block_root); + } + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn search_block(&mut self, hash: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext) { + pub fn search_block_with( + &mut self, + block_root: Hash256, + parent_components: Option>, + peers: &[PeerShouldHave], + ) -> Option> { // Do not re-request a block that is already being requested - if self + if let Some(lookup) = self .single_block_lookups - .values_mut() - .any(|single_block_request| single_block_request.add_peer(&hash, &peer_id)) + .iter_mut() + .find(|lookup| lookup.is_for_block(block_root)) { - return; + lookup.add_peers(peers); + if let Some(components) = parent_components { + lookup.add_unknown_parent_components(components); + } + return None; } - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.is_for_block(block_root) || parent_req.contains_block(&block_root) }) { + parent_lookup.add_peers(peers); + // If the block was already downloaded, or is being downloaded in this moment, do not // request it. - return; + return None; } if self .processing_parent_lookups .values() - .any(|(hashes, _last_parent_request)| hashes.contains(&hash)) + .any(|(hashes, _last_parent_request)| hashes.contains(&block_root)) { // we are already processing this block, ignore it. - return; + return None; } debug!( self.log, "Searching for block"; - "peer_id" => %peer_id, - "block" => %hash + "peer_id" => ?peers, + "block" => ?block_root ); - let mut single_block_request = SingleBlockRequest::new(hash, peer_id); - - let (peer_id, request) = single_block_request - .request_block() - .expect("none of the possible failure cases apply for a newly created block lookup"); - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups - .insert(request_id, single_block_request); - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - } + Some(SingleBlockLookup::new( + block_root, + parent_components, + peers, + self.da_checker.clone(), + )) } /// If a block is attempted to be processed but we do not know its parent, this function is /// called in order to find the block's parent. pub fn search_parent( &mut self, + slot: Slot, block_root: Hash256, - block: BlockWrapper, + parent_root: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - let parent_root = block.parent_root(); + // Gossip blocks or blobs shouldn't be propagated if parents are unavailable. + let peer_source = PeerShouldHave::BlockAndBlobs(peer_id); + // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { debug!(self.log, "Block is from a past failed chain. Dropping"; - "block_root" => ?block_root, "block_slot" => block.slot()); + "block_root" => ?block_root, "block_slot" => slot); return; } // Make sure this block is not already downloaded, and that neither it or its parent is // being searched for. - if self.parent_lookups.iter_mut().any(|parent_req| { - parent_req.contains_block(&block_root) - || parent_req.add_peer(&block_root, &peer_id) - || parent_req.add_peer(&parent_root, &peer_id) + if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { + parent_req.contains_block(&block_root) || parent_req.is_for_block(block_root) }) { + parent_lookup.add_peers(&[peer_source]); // we are already searching for this block, ignore it return; } @@ -170,8 +347,13 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(block_root, block, peer_id); - self.request_parent(parent_lookup, cx, ForceBlockRequest::False); + let parent_lookup = ParentLookup::new( + block_root, + parent_root, + peer_source, + self.da_checker.clone(), + ); + self.request_parent_block_and_blobs(parent_lookup, cx); } /* Lookup responses */ @@ -180,58 +362,133 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { - let mut request = match self.single_block_lookups.entry(id) { - Entry::Occupied(req) => req, - Entry::Vacant(_) => { - if block.is_some() { - debug!( - self.log, - "Block returned for single block lookup not present" - ); - } - return; - } + let stream_terminator = block.is_none().into(); + let log = self.log.clone(); + + let Some((has_pending_parent_request, request_ref)) = self.find_single_lookup_request(id, stream_terminator, ResponseType::Block) else { + return; }; - match request.get_mut().verify_block(block) { + let should_remove = match request_ref.verify_block(block) { Ok(Some((block_root, block))) => { - // This is the correct block, send it for processing - if self - .send_block_for_processing( + if let Some(parent_components) = request_ref.unknown_parent_components.as_mut() { + parent_components.add_unknown_parent_block(block.clone()); + }; + + if !has_pending_parent_request { + let block_wrapper = request_ref + .get_downloaded_block() + .unwrap_or(BlockWrapper::Block(block)); + // This is the correct block, send it for processing + match self.send_block_for_processing( block_root, - block, + block_wrapper, seen_timestamp, BlockProcessType::SingleBlock { id }, cx, - ) - .is_err() - { - // Remove to avoid inconsistencies - self.single_block_lookups.remove(&id); + ) { + Ok(()) => ShouldRemoveLookup::False, + Err(()) => ShouldRemoveLookup::True, + } + } else { + ShouldRemoveLookup::False } } - Ok(None) => { - // request finished correctly, it will be removed after the block is processed. - } - Err(error) => { - let msg: &str = error.into(); - cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); - // Remove the request, if it can be retried it will be added with a new id. - let mut req = request.remove(); - - debug!(self.log, "Single block lookup failed"; - "peer_id" => %peer_id, "error" => msg, "block_root" => %req.hash); - // try the request again if possible - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(id) = cx.single_block_lookup_request(peer_id, request) { - self.single_block_lookups.insert(id, req); + Ok(None) => ShouldRemoveLookup::False, + Err(e) => handle_block_lookup_verify_error( + request_ref, + ResponseType::Block, + peer_id, + e, + cx, + &log, + ), + }; + + if matches!(should_remove, ShouldRemoveLookup::True) { + self.single_block_lookups + .retain(|req| req.id.block_request_id != Some(id)); + } + + metrics::set_gauge( + &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, + self.single_block_lookups.len() as i64, + ); + } + + pub fn single_blob_lookup_response( + &mut self, + id: Id, + peer_id: PeerId, + blob: Option>>, + seen_timestamp: Duration, + cx: &mut SyncNetworkContext, + ) { + let stream_terminator = blob.is_none().into(); + + let log = self.log.clone(); + + let Some((has_pending_parent_requests, request_ref)) = + self.find_single_lookup_request(id, stream_terminator, ResponseType::Blob) else { + return; + }; + + let should_remove = match request_ref.verify_blob(blob) { + Ok(Some((block_root, blobs))) => { + if let Some(parent_components) = request_ref.unknown_parent_components.as_mut() { + parent_components.add_unknown_parent_blobs(blobs); + + if !has_pending_parent_requests { + request_ref + .get_downloaded_block() + .map(|block| { + match self.send_block_for_processing( + block_root, + block, + seen_timestamp, + BlockProcessType::SingleBlock { id }, + cx, + ) { + Ok(()) => ShouldRemoveLookup::False, + Err(()) => ShouldRemoveLookup::True, + } + }) + .unwrap_or(ShouldRemoveLookup::False) + } else { + ShouldRemoveLookup::False + } + } else { + // These are the correct blobs, send them for processing + match self.send_blobs_for_processing( + block_root, + blobs, + seen_timestamp, + BlockProcessType::SingleBlock { id }, + cx, + ) { + Ok(()) => ShouldRemoveLookup::False, + Err(()) => ShouldRemoveLookup::True, } } } + Ok(None) => ShouldRemoveLookup::False, + Err(e) => handle_block_lookup_verify_error( + request_ref, + ResponseType::Blob, + peer_id, + e, + cx, + &log, + ), + }; + + if matches!(should_remove, ShouldRemoveLookup::True) { + self.single_block_lookups + .retain(|req| req.id.blob_request_id != Some(id)); } metrics::set_gauge( @@ -240,19 +497,59 @@ impl BlockLookups { ); } + /// Returns the lookup along with a `bool` representing whether the lookup has an outstanding + /// parent lookup that has yet to be resolved. This determines whether we send the + /// block or blob for processing because we would fail block processing and trigger a new lookup + /// via `UnknownParentBlock` or `UnknownParentBlob` until we process the parent. + fn find_single_lookup_request( + &mut self, + target_id: Id, + stream_terminator: StreamTerminator, + response_type: ResponseType, + ) -> Option<( + bool, + &mut SingleBlockLookup, + )> { + let lookup = self.single_block_lookups.iter_mut().find_map(|req| { + let id_opt = match response_type { + ResponseType::Block => req.id.block_request_id, + ResponseType::Blob => req.id.blob_request_id, + }; + if let Some(lookup_id) = id_opt { + if lookup_id == target_id { + let has_pending_parent_request = self.parent_lookups.iter().any(|lookup| { + lookup.chain_hash() == req.block_request_state.requested_block_root + }); + + return Some((has_pending_parent_request, req)); + } + } + None + }); + + if lookup.is_none() && matches!(stream_terminator, StreamTerminator::False) { + warn!( + self.log, + "Block returned for single block lookup not present"; + "response_type" => ?response_type, + ); + } + lookup + } + /// Process a response received from a parent lookup request. pub fn parent_lookup_response( &mut self, id: Id, peer_id: PeerId, - block: Option>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { let mut parent_lookup = if let Some(pos) = self .parent_lookups .iter() - .position(|request| request.pending_response(id)) + .position(|request| request.pending_block_response(id)) { self.parent_lookups.remove(pos) } else { @@ -264,19 +561,45 @@ impl BlockLookups { match parent_lookup.verify_block(block, &mut self.failed_chains) { Ok(Some((block_root, block))) => { - // Block is correct, send to the beacon processor. - let chain_hash = parent_lookup.chain_hash(); - if self - .send_block_for_processing( - block_root, - block, - seen_timestamp, - BlockProcessType::ParentLookup { chain_hash }, - cx, - ) - .is_ok() + parent_lookup.add_current_request_block(block); + if let Some(block_wrapper) = + parent_lookup.current_parent_request.get_downloaded_block() { - self.parent_lookups.push(parent_lookup) + let chain_hash = parent_lookup.chain_hash(); + if self + .send_block_for_processing( + block_root, + block_wrapper, + seen_timestamp, + BlockProcessType::ParentLookup { chain_hash }, + cx, + ) + .is_ok() + { + self.parent_lookups.push(parent_lookup) + } + } else { + let outstanding_blobs_req = parent_lookup + .current_parent_request + .id + .blob_request_id + .is_some(); + if !outstanding_blobs_req { + if let Ok(peer_id) = parent_lookup + .current_parent_request + .downloading_peer(ResponseType::Blob) + { + cx.report_peer( + peer_id.to_peer_id(), + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); + } + + self.request_parent_blobs(parent_lookup, cx); + } else { + self.parent_lookups.push(parent_lookup) + } } } Ok(None) => { @@ -284,37 +607,68 @@ impl BlockLookups { // processing result arrives. self.parent_lookups.push(parent_lookup); } - Err(e) => match e { - VerifyError::RootMismatch - | VerifyError::NoBlockReturned - | VerifyError::ExtraBlocksReturned => { - let e = e.into(); - warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => %e); + Err(e) => { + self.handle_parent_verify_error(peer_id, parent_lookup, ResponseType::Block, e, cx) + } + }; - // We do not tolerate these kinds of errors. We will accept a few but these are signs - // of a faulty peer. - cx.report_peer(peer_id, PeerAction::LowToleranceError, e); + metrics::set_gauge( + &metrics::SYNC_PARENT_BLOCK_LOOKUPS, + self.parent_lookups.len() as i64, + ); + } - // We try again if possible. - self.request_parent(parent_lookup, cx, ForceBlockRequest::False); - } - VerifyError::PreviousFailure { parent_root } => { - debug!( - self.log, - "Parent chain ignored due to past failure"; - "block" => %parent_root, - ); - // Add the root block to failed chains - self.failed_chains.insert(parent_lookup.chain_hash()); + pub fn parent_lookup_blob_response( + &mut self, + id: Id, + peer_id: PeerId, + blob: Option>>, + seen_timestamp: Duration, + cx: &mut SyncNetworkContext, + ) { + let mut parent_lookup = if let Some(pos) = self + .parent_lookups + .iter() + .position(|request| request.pending_blob_response(id)) + { + self.parent_lookups.remove(pos) + } else { + if blob.is_some() { + debug!(self.log, "Response for a parent lookup blob request that was not found"; "peer_id" => %peer_id); + } + return; + }; - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "bbroot_failed_chains", - ); + match parent_lookup.verify_blob(blob, &mut self.failed_chains) { + Ok(Some((block_root, blobs))) => { + parent_lookup.add_current_request_blobs(blobs); + let chain_hash = parent_lookup.chain_hash(); + if let Some(block_wrapper) = + parent_lookup.current_parent_request.get_downloaded_block() + { + if self + .send_block_for_processing( + block_root, + block_wrapper, + seen_timestamp, + BlockProcessType::ParentLookup { chain_hash }, + cx, + ) + .is_ok() + { + self.parent_lookups.push(parent_lookup) + } + } else { + self.parent_lookups.push(parent_lookup) } - }, + } + Ok(None) => { + // Waiting for more blobs to arrive + self.parent_lookups.push(parent_lookup); + } + Err(e) => { + self.handle_parent_verify_error(peer_id, parent_lookup, ResponseType::Blob, e, cx) + } }; metrics::set_gauge( @@ -323,57 +677,88 @@ impl BlockLookups { ); } - /* Error responses */ + fn handle_parent_verify_error( + &mut self, + peer_id: PeerId, + mut parent_lookup: ParentLookup, + response_type: ResponseType, + e: ParentVerifyError, + cx: &mut SyncNetworkContext, + ) { + match e { + ParentVerifyError::RootMismatch + | ParentVerifyError::NoBlockReturned + | ParentVerifyError::NotEnoughBlobsReturned + | ParentVerifyError::ExtraBlocksReturned + | ParentVerifyError::UnrequestedBlobId + | ParentVerifyError::ExtraBlobsReturned + | ParentVerifyError::InvalidIndex(_) => { + let e = e.into(); + warn!(self.log, "Peer sent invalid response to parent request."; + "peer_id" => %peer_id, "reason" => %e); - #[allow(clippy::needless_collect)] // false positive - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { - /* Check disconnection for single block lookups */ - // better written after https://github.com/rust-lang/rust/issues/59618 - let remove_retry_ids: Vec = self - .single_block_lookups - .iter_mut() - .filter_map(|(id, req)| { - if req.check_peer_disconnected(peer_id).is_err() { - Some(*id) - } else { - None - } - }) - .collect(); + // We do not tolerate these kinds of errors. We will accept a few but these are signs + // of a faulty peer. + cx.report_peer(peer_id, PeerAction::LowToleranceError, e); - for mut req in remove_retry_ids - .into_iter() - .map(|id| self.single_block_lookups.remove(&id).unwrap()) - .collect::>() - { - // retry the request - match req.request_block() { - Ok((peer_id, block_request)) => { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, req); - } - } - Err(e) => { - trace!( - self.log, - "Single block request failed on peer disconnection"; - "block_root" => %req.hash, - "peer_id" => %peer_id, - "reason" => <&str>::from(e), - ); - } + // We try again if possible. + match response_type { + ResponseType::Block => self.request_parent_block(parent_lookup, cx), + ResponseType::Blob => self.request_parent_blobs(parent_lookup, cx), + }; + } + ParentVerifyError::PreviousFailure { parent_root } => { + debug!( + self.log, + "Parent chain ignored due to past failure"; + "block" => %parent_root, + ); + // Add the root block to failed chains + self.failed_chains.insert(parent_lookup.chain_hash()); + + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); + } + ParentVerifyError::BenignFailure => { + trace!( + self.log, + "Requested peer could not respond to block request, requesting a new peer"; + ); + parent_lookup + .current_parent_request + .remove_peer_if_useless(&peer_id, response_type); + match response_type { + ResponseType::Block => self.request_parent_block(parent_lookup, cx), + ResponseType::Blob => self.request_parent_blobs(parent_lookup, cx), + }; } } + } + + /* Error responses */ + + pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { + self.single_block_lookups.retain_mut(|req| { + let should_remove_block = + should_remove_disconnected_peer(ResponseType::Block, peer_id, cx, req, &self.log); + let should_remove_blob = + should_remove_disconnected_peer(ResponseType::Blob, peer_id, cx, req, &self.log); + + matches!(should_remove_block, ShouldRemoveLookup::False) + && matches!(should_remove_blob, ShouldRemoveLookup::False) + }); /* Check disconnection for parent lookups */ - while let Some(pos) = self - .parent_lookups - .iter_mut() - .position(|req| req.check_peer_disconnected(peer_id).is_err()) - { + while let Some(pos) = self.parent_lookups.iter_mut().position(|req| { + req.check_block_peer_disconnected(peer_id).is_err() + || req.check_blob_peer_disconnected(peer_id).is_err() + }) { let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); - self.request_parent(parent_lookup, cx, ForceBlockRequest::False); + self.request_parent_block_and_blobs(parent_lookup, cx); } } @@ -385,29 +770,33 @@ impl BlockLookups { cx: &mut SyncNetworkContext, error: RPCError, ) { + let msg = error.as_static_str(); if let Some(pos) = self .parent_lookups .iter() - .position(|request| request.pending_response(id)) + .position(|request| request.pending_block_response(id)) { let mut parent_lookup = self.parent_lookups.remove(pos); - parent_lookup.download_failed(); - trace!(self.log, "Parent lookup request failed"; &parent_lookup); + parent_lookup.block_download_failed(); + trace!(self.log, "Parent lookup block request failed"; &parent_lookup, "error" => msg); - // `ResourceUnavailable` indicates we requested a parent block from prior to the 4844 fork epoch. - let force_block_request = if let RPCError::ErrorResponse( - RPCResponseErrorCode::ResourceUnavailable, - _, - ) = error - { - debug!(self.log, "RPC parent lookup for block and blobs failed. Retrying the request for just a block"; "peer_id" => %peer_id); - ForceBlockRequest::True - } else { - ForceBlockRequest::False - }; - self.request_parent(parent_lookup, cx, force_block_request); + self.request_parent_block(parent_lookup, cx); + } else { + return debug!(self.log, "RPC failure for a block parent lookup request that was not found"; "peer_id" => %peer_id, "error" => msg); + }; + + if let Some(pos) = self + .parent_lookups + .iter() + .position(|request| request.pending_blob_response(id)) + { + let mut parent_lookup = self.parent_lookups.remove(pos); + parent_lookup.blob_download_failed(); + trace!(self.log, "Parent lookup blobs request failed"; &parent_lookup, "error" => msg); + + self.request_parent_blobs(parent_lookup, cx); } else { - return debug!(self.log, "RPC failure for a parent lookup request that was not found"; "peer_id" => %peer_id); + return debug!(self.log, "RPC failure for a blobs parent lookup request that was not found"; "peer_id" => %peer_id, "error" => msg); }; metrics::set_gauge( &metrics::SYNC_PARENT_BLOCK_LOOKUPS, @@ -415,16 +804,37 @@ impl BlockLookups { ); } - pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { - if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure_downloading(); - trace!(self.log, "Single block lookup failed"; "block" => %request.hash); - if let Ok((peer_id, block_request)) = request.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { - self.single_block_lookups.insert(request_id, request); - } - } - } + pub fn single_block_lookup_failed( + &mut self, + id: Id, + peer_id: &PeerId, + cx: &mut SyncNetworkContext, + error: RPCError, + ) { + let msg = error.as_static_str(); + self.single_block_lookups.retain_mut(|req| { + let should_remove_block = should_remove_failed_lookup( + id, + ResponseType::Block, + msg, + peer_id, + cx, + req, + &self.log, + ); + let should_remove_blob = should_remove_failed_lookup( + id, + ResponseType::Blob, + msg, + peer_id, + cx, + req, + &self.log, + ); + + matches!(should_remove_block, ShouldRemoveLookup::False) + && matches!(should_remove_blob, ShouldRemoveLookup::False) + }); metrics::set_gauge( &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, @@ -434,33 +844,51 @@ impl BlockLookups { /* Processing responses */ - pub fn single_block_processed( + pub fn single_block_component_processed( &mut self, - id: Id, - result: BlockProcessResult, + target_id: Id, + result: BlockProcessingResult, + response_type: ResponseType, cx: &mut SyncNetworkContext, ) { - let mut req = match self.single_block_lookups.remove(&id) { + let lookup_components_opt = + self.single_block_lookups + .iter_mut() + .enumerate() + .find_map(|(index, req)| { + let block_match = req.id.block_request_id.as_ref() == Some(&target_id); + let blob_match = req.id.blob_request_id.as_ref() == Some(&target_id); + (block_match || blob_match).then_some((index, req)) + }); + let (index, request_ref) = match lookup_components_opt { Some(req) => req, None => { return debug!( self.log, - "Block processed for single block lookup not present" + "Block component processed for single block lookup not present" ); } }; - let root = req.hash; - let peer_id = match req.processing_peer() { + let root = request_ref.block_request_state.requested_block_root; + let peer_id = request_ref.processing_peer(response_type); + + let peer_id = match peer_id { Ok(peer) => peer, Err(_) => return, }; - match result { - BlockProcessResult::Ok => { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - BlockProcessResult::Ignored => { + let should_remove_lookup = match result { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(root) => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + ShouldRemoveLookup::True + } + AvailabilityProcessingStatus::MissingComponents(_, _block_root) => { + should_remove_missing_components(request_ref, response_type, cx, &self.log) + } + }, + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -468,19 +896,30 @@ impl BlockLookups { "Single block processing was ignored, cpu might be overloaded"; "action" => "dropping single block request" ); + ShouldRemoveLookup::True } - BlockProcessResult::Err(e) => { + BlockProcessingResult::Err(e) => { trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); match e { BlockError::BlockIsAlreadyKnown => { // No error here + ShouldRemoveLookup::True } BlockError::BeaconChainError(e) => { // Internal error error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + ShouldRemoveLookup::True } BlockError::ParentUnknown(block) => { - self.search_parent(root, block, peer_id, cx); + let slot = block.slot(); + let parent_root = block.parent_root(); + let (block, blobs) = block.deconstruct(); + request_ref.add_unknown_parent_block(block); + if let Some(blobs) = blobs { + request_ref.add_unknown_parent_blobs(blobs); + } + self.search_parent(slot, root, parent_root, peer_id.to_peer_id(), cx); + ShouldRemoveLookup::False } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -491,26 +930,59 @@ impl BlockLookups { "root" => %root, "error" => ?e ); + ShouldRemoveLookup::True + } + BlockError::AvailabilityCheck( + AvailabilityCheckError::KzgVerificationFailed, + ) + | BlockError::AvailabilityCheck(AvailabilityCheckError::Kzg(_)) + | BlockError::BlobValidation(_) => { + warn!(self.log, "Blob validation failure"; "root" => %root, "peer_id" => %peer_id); + if let Ok(blob_peer) = request_ref.processing_peer(ResponseType::Blob) { + cx.report_peer( + blob_peer.to_peer_id(), + PeerAction::MidToleranceError, + "single_blob_failure", + ); + // Try it again if possible. + retry_request_after_failure( + request_ref, + ResponseType::Blob, + peer_id.as_peer_id(), + cx, + &self.log, + ) + } else { + ShouldRemoveLookup::False + } } other => { warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - // Try it again if possible. - req.register_failure_processing(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) - { - // insert with the new id - self.single_block_lookups.insert(request_id, req); - } + if let Ok(block_peer) = request_ref.processing_peer(ResponseType::Block) { + cx.report_peer( + block_peer.to_peer_id(), + PeerAction::MidToleranceError, + "single_block_failure", + ); + + // Try it again if possible. + retry_request_after_failure( + request_ref, + ResponseType::Block, + block_peer.as_peer_id(), + cx, + &self.log, + ) + } else { + ShouldRemoveLookup::False } } } } + }; + + if matches!(should_remove_lookup, ShouldRemoveLookup::True) { + self.single_block_lookups.remove(index); } metrics::set_gauge( @@ -522,31 +994,43 @@ impl BlockLookups { pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: BlockProcessResult, + result: BlockProcessingResult, + response_type: ResponseType, cx: &mut SyncNetworkContext, ) { - let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self + let index = self .parent_lookups .iter() .enumerate() - .find_map(|(pos, request)| { - request - .get_processing_peer(chain_hash) - .map(|peer| (pos, peer)) - }) { - (self.parent_lookups.remove(pos), peer) - } else { + .find(|(_, lookup)| lookup.chain_hash() == chain_hash) + .map(|(index, _)| index); + + let Some(mut parent_lookup) = index.map(|index|self.parent_lookups.remove(index)) else { return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; + let peer_id = parent_lookup + .current_parent_request + .processing_peer(response_type); + + let peer_id = match peer_id { + Ok(peer) => peer, + Err(_) => return, + }; + match &result { - BlockProcessResult::Ok => { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup) - } - BlockProcessResult::Err(e) => { + BlockProcessingResult::Ok(status) => match status { + AvailabilityProcessingStatus::Imported(block_root) => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup, "block_root" => ?block_root) + } + AvailabilityProcessingStatus::MissingComponents(_, block_root) => { + trace!(self.log, "Parent missing parts, triggering single block lookup "; &parent_lookup,"block_root" => ?block_root) + } + }, + BlockProcessingResult::Err(e) => { trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { trace!( self.log, "Parent block processing job was ignored"; @@ -557,14 +1041,18 @@ impl BlockLookups { } match result { - BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { - // need to keep looking for parents - // add the block back to the queue and continue the search - parent_lookup.add_block(block); - self.request_parent(parent_lookup, cx, ForceBlockRequest::False); - } - BlockProcessResult::Ok - | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + _, + block_root, + )) => { + self.search_block(block_root, peer_id, cx); + } + BlockProcessingResult::Err(BlockError::ParentUnknown(block)) => { + parent_lookup.add_unknown_parent_block(block); + self.request_parent_block_and_blobs(parent_lookup, cx); + } + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) + | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { // Check if the beacon processor is available let beacon_processor_send = match cx.processor_channel_if_enabled() { Some(channel) => channel, @@ -576,15 +1064,24 @@ impl BlockLookups { ); } }; - let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing(); + let (chain_hash, mut blocks, hashes, block_request) = + parent_lookup.parts_for_processing(); + if let Some(child_block) = self.single_block_lookups.iter_mut().find_map(|req| { + if req.block_request_state.requested_block_root == chain_hash { + req.get_downloaded_block() + } else { + None + } + }) { + blocks.push(child_block); + }; let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - let work = WorkEvent::chain_segment(process_id, blocks); match beacon_processor_send.try_send(work) { Ok(_) => { self.processing_parent_lookups - .insert(chain_hash, (hashes, request)); + .insert(chain_hash, (hashes, block_request)); } Err(e) => { error!( @@ -595,7 +1092,7 @@ impl BlockLookups { } } } - ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + ref e @ BlockProcessingResult::Err(BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -607,25 +1104,10 @@ impl BlockLookups { "error" => ?e ); } - BlockProcessResult::Err(outcome) => { - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "last_peer" => %peer_id, - ); - - // This currently can be a host of errors. We permit this due to the partial - // ambiguity. - cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); - - // Try again if possible - parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx, ForceBlockRequest::False); + BlockProcessingResult::Err(outcome) => { + self.handle_invalid_block(outcome, peer_id.to_peer_id(), cx, parent_lookup); } - BlockProcessResult::Ignored => { + BlockProcessingResult::Ignored => { // Beacon processor signalled to ignore the block processing result. // This implies that the cpu is overloaded. Drop the request. warn!( @@ -642,6 +1124,30 @@ impl BlockLookups { ); } + fn handle_invalid_block( + &mut self, + outcome: BlockError<::EthSpec>, + peer_id: PeerId, + cx: &mut SyncNetworkContext, + mut parent_lookup: ParentLookup, + ) { + // all else we consider the chain a failure and downvote the peer that sent + // us the last block + warn!( + self.log, "Invalid parent chain"; + "score_adjustment" => %PeerAction::MidToleranceError, + "outcome" => ?outcome, + "last_peer" => %peer_id, + ); + // This currently can be a host of errors. We permit this due to the partial + // ambiguity. + cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); + // Try again if possible + parent_lookup.block_processing_failed(); + parent_lookup.blob_processing_failed(); + self.request_parent_block_and_blobs(parent_lookup, cx); + } + pub fn parent_chain_processed( &mut self, chain_hash: Hash256, @@ -658,15 +1164,54 @@ impl BlockLookups { debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); match result { BatchProcessResult::Success { .. } => { - // nothing to do. + if let Some((index, _)) = self + .single_block_lookups + .iter() + .enumerate() + .find(|(_, req)| req.block_request_state.requested_block_root == chain_hash) + { + if let Some((lookup_id, block_wrapper)) = + self.single_block_lookups.get_mut(index).and_then(|lookup| { + lookup + .get_downloaded_block() + .map(|block| (lookup.id.clone(), block)) + }) + { + let LookupId { + block_request_id, + blob_request_id, + } = lookup_id; + let Some(id) = block_request_id.or(blob_request_id) else { + warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); + return; + }; + + // This is the correct block, send it for processing + if self + .send_block_for_processing( + chain_hash, + block_wrapper, + Duration::from_secs(0), //TODO(sean) pipe this through + BlockProcessType::SingleBlock { id }, + cx, + ) + .is_err() + { + // Remove to avoid inconsistencies + self.single_block_lookups.remove(index); + } + } + } } BatchProcessResult::FaultyFailure { imported_blocks: _, penalty, } => { self.failed_chains.insert(chain_hash); - for peer_id in request.used_peers { - cx.report_peer(peer_id, penalty, "parent_chain_failure") + let mut all_peers = request.block_request_state.state.used_peers.clone(); + all_peers.extend(request.blob_request_state.state.used_peers); + for peer_source in all_peers { + cx.report_peer(peer_source, penalty, "parent_chain_failure") } } BatchProcessResult::NonFaultyFailure => { @@ -712,13 +1257,83 @@ impl BlockLookups { } } - fn request_parent( + fn send_blobs_for_processing( + &self, + block_root: Hash256, + blobs: FixedBlobSidecarList, + duration: Duration, + process_type: BlockProcessType, + cx: &mut SyncNetworkContext, + ) -> Result<(), ()> { + let blob_count = blobs.iter().filter(|b| b.is_some()).count(); + if blob_count == 0 { + return Ok(()); + } + match cx.processor_channel_if_enabled() { + Some(beacon_processor_send) => { + trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); + let event = WorkEvent::rpc_blobs(block_root, blobs, duration, process_type); + if let Err(e) = beacon_processor_send.try_send(event) { + error!( + self.log, + "Failed to send sync blobs to processor"; + "error" => ?e + ); + Err(()) + } else { + Ok(()) + } + } + None => { + trace!(self.log, "Dropping blobs ready for processing. Beacon processor not available"; "block_root" => %block_root); + Err(()) + } + } + } + + fn request_parent_block( &mut self, mut parent_lookup: ParentLookup, cx: &mut SyncNetworkContext, - force_block_request: ForceBlockRequest, ) { - match parent_lookup.request_parent(cx, force_block_request) { + let response = parent_lookup.request_parent_block(cx); + self.handle_response(parent_lookup, cx, response, ResponseType::Block); + } + + fn request_parent_blobs( + &mut self, + mut parent_lookup: ParentLookup, + cx: &mut SyncNetworkContext, + ) { + let response = parent_lookup.request_parent_blobs(cx); + self.handle_response(parent_lookup, cx, response, ResponseType::Blob); + } + + fn request_parent_block_and_blobs( + &mut self, + mut parent_lookup: ParentLookup, + cx: &mut SyncNetworkContext, + ) { + let block_res = parent_lookup.request_parent_block(cx); + match block_res { + Ok(()) => { + let blob_res = parent_lookup.request_parent_blobs(cx); + self.handle_response(parent_lookup, cx, blob_res, ResponseType::Blob) + } + Err(e) => { + self.handle_response(parent_lookup, cx, Err(e), ResponseType::Block); + } + } + } + + fn handle_response( + &mut self, + parent_lookup: ParentLookup, + cx: &mut SyncNetworkContext, + result: Result<(), parent_lookup::RequestError>, + response_type: ResponseType, + ) { + match result { Err(e) => { debug!(self.log, "Failed to request parent"; &parent_lookup, "error" => e.as_static()); match e { @@ -728,7 +1343,7 @@ impl BlockLookups { parent_lookup::RequestError::ChainTooLong => { self.failed_chains.insert(parent_lookup.chain_hash()); // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { + for &peer_id in parent_lookup.used_peers(response_type) { cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) } } @@ -741,7 +1356,7 @@ impl BlockLookups { self.failed_chains.insert(parent_lookup.chain_hash()); } // This indicates faulty peers. - for &peer_id in parent_lookup.used_peers() { + for &peer_id in parent_lookup.used_peers(response_type) { cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) } } @@ -766,7 +1381,9 @@ impl BlockLookups { /// Drops all the single block requests and returns how many requests were dropped. pub fn drop_single_block_requests(&mut self) -> usize { - self.single_block_lookups.drain().len() + let requests_to_drop = self.single_block_lookups.len(); + self.single_block_lookups.clear(); + requests_to_drop } /// Drops all the parent chain requests and returns how many requests were dropped. @@ -774,3 +1391,176 @@ impl BlockLookups { self.parent_lookups.drain(..).len() } } + +fn handle_block_lookup_verify_error( + request_ref: &mut SingleBlockLookup, + response_type: ResponseType, + peer_id: PeerId, + e: LookupVerifyError, + cx: &mut SyncNetworkContext, + log: &Logger, +) -> ShouldRemoveLookup { + let msg = if matches!(e, LookupVerifyError::BenignFailure) { + request_ref.remove_peer_if_useless(&peer_id, response_type); + "peer could not response to request" + } else { + let msg = e.into(); + cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); + msg + }; + + debug!(log, "Single block lookup failed"; + "peer_id" => %peer_id, + "error" => msg, + "block_root" => ?request_ref.block_request_state.requested_block_root, + "response_type" => ?response_type + ); + retry_request_after_failure(request_ref, response_type, &peer_id, cx, log) +} + +fn retry_request_after_failure( + request_ref: &mut SingleBlockLookup, + response_type: ResponseType, + initial_peer_id: &PeerId, + cx: &mut SyncNetworkContext, + log: &Logger, +) -> ShouldRemoveLookup { + let requested_block_root = request_ref.block_request_state.requested_block_root; + + // try the request again if possible + match response_type { + ResponseType::Block => { + let id = request_ref.request_block().map(|request_opt| { + request_opt + .map(|(peer_id, request)| cx.single_block_lookup_request(peer_id, request)) + }); + match id { + Ok(Some(Ok(id))) => { + request_ref.id.block_request_id = Some(id); + } + Ok(Some(Err(e))) => { + debug!(log, "Single block lookup failed"; + "peer_id" => %initial_peer_id, + "error" => ?e, + "block_root" => ?requested_block_root, + "response_type" => ?response_type); + return ShouldRemoveLookup::True; + } + Ok(None) => { + request_ref.id.block_request_id = None; + // The lookup failed but the block or blob was found via other means. + } + Err(e) => { + debug!(log, "Single block lookup failed"; + "peer_id" => %initial_peer_id, + "error" => ?e, + "block_root" => ?requested_block_root, + "response_type" => ?response_type); + return ShouldRemoveLookup::True; + } + } + } + ResponseType::Blob => { + let id = request_ref.request_blobs().map(|request_opt| { + request_opt + .map(|(peer_id, request)| cx.single_blobs_lookup_request(peer_id, request)) + }); + + match id { + Ok(Some(Ok(id))) => { + request_ref.id.blob_request_id = Some(id); + } + Ok(Some(Err(e))) => { + debug!(log, "Single block lookup failed"; + "peer_id" => %initial_peer_id, + "error" => ?e, + "block_root" => ?requested_block_root, + "response_type" => ?response_type); + return ShouldRemoveLookup::True; + } + Ok(None) => { + request_ref.id.blob_request_id = None; + // The lookup failed but the block or blob was found via other means. + } + Err(e) => { + debug!(log, "Single block lookup failed"; + "peer_id" => %initial_peer_id, + "error" => ?e, + "block_root" => ?requested_block_root, + "response_type" => ?response_type); + return ShouldRemoveLookup::True; + } + } + } + }; + ShouldRemoveLookup::False +} + +fn should_remove_disconnected_peer( + response_type: ResponseType, + peer_id: &PeerId, + cx: &mut SyncNetworkContext, + req: &mut SingleBlockLookup, + log: &Logger, +) -> ShouldRemoveLookup { + if req.check_peer_disconnected(peer_id, response_type).is_err() { + trace!(log, "Single lookup failed on peer disconnection"; "block_root" => ?req.block_request_state.requested_block_root, "response_type" => ?response_type); + retry_request_after_failure(req, response_type, peer_id, cx, log) + } else { + ShouldRemoveLookup::False + } +} + +fn should_remove_failed_lookup( + id: Id, + response_type: ResponseType, + msg: &'static str, + peer_id: &PeerId, + cx: &mut SyncNetworkContext, + req: &mut SingleBlockLookup, + log: &Logger, +) -> ShouldRemoveLookup { + if req.id.block_request_id == Some(id) || req.id.blob_request_id == Some(id) { + req.register_failure_downloading(response_type); + trace!(log, "Single lookup failed"; "block" => %req.block_request_state.requested_block_root, "error" => msg, "response_type" => ?response_type); + retry_request_after_failure(req, response_type, peer_id, cx, log) + } else { + ShouldRemoveLookup::False + } +} + +fn should_remove_missing_components( + request_ref: &mut SingleBlockLookup, + response_type: ResponseType, + cx: &mut SyncNetworkContext, + log: &Logger, +) -> ShouldRemoveLookup { + request_ref.set_component_processed(response_type); + + // If we get a missing component response after processing both a blob and a block response, the + // blobs must be what are missing. + if request_ref.both_components_processed() { + let Ok(blob_peer) = request_ref.processing_peer(ResponseType::Blob) else { + return ShouldRemoveLookup::False; + }; + if let PeerShouldHave::BlockAndBlobs(blob_peer) = blob_peer { + cx.report_peer( + blob_peer, + PeerAction::MidToleranceError, + "single_block_failure", + ); + } + request_ref.remove_peer_if_useless(blob_peer.as_peer_id(), ResponseType::Blob); + if !request_ref.downloading(ResponseType::Blob) { + // Try it again if possible. + return retry_request_after_failure( + request_ref, + ResponseType::Blob, + blob_peer.as_peer_id(), + cx, + log, + ); + } + } + ShouldRemoveLookup::False +} diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index f066191c00a..5175450d9e0 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,18 +1,18 @@ -use super::RootBlockTuple; +use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup}; +use super::{BlobRequestId, BlockRequestId, DownloadedBlocks, PeerShouldHave, ResponseType}; +use crate::sync::block_lookups::single_block_lookup::{State, UnknownParentComponents}; +use crate::sync::block_lookups::{RootBlobsTuple, RootBlockTuple}; +use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; use beacon_chain::blob_verification::AsBlock; use beacon_chain::blob_verification::BlockWrapper; +use beacon_chain::data_availability_checker::DataAvailabilityChecker; use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; +use std::sync::Arc; use store::Hash256; use strum::IntoStaticStr; - -use crate::sync::block_lookups::ForceBlockRequest; -use crate::sync::{ - manager::{Id, SLOT_IMPORT_TOLERANCE}, - network_context::SyncNetworkContext, -}; - -use super::single_block_lookup::{self, SingleBlockRequest}; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{BlobSidecar, SignedBeaconBlock}; /// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; @@ -26,19 +26,22 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, /// Request of the last parent. - current_parent_request: SingleBlockRequest, - /// Id of the last parent request. - current_parent_request_id: Option, + pub current_parent_request: SingleBlockLookup, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum ParentVerifyError { RootMismatch, NoBlockReturned, + NotEnoughBlobsReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + InvalidIndex(u64), PreviousFailure { parent_root: Hash256 }, + BenignFailure, } #[derive(Debug, PartialEq, Eq)] @@ -55,62 +58,143 @@ pub enum RequestError { } impl ParentLookup { + pub fn new( + block_root: Hash256, + parent_root: Hash256, + peer_id: PeerShouldHave, + da_checker: Arc>, + ) -> Self { + let current_parent_request = + SingleBlockLookup::new(parent_root, Some(<_>::default()), &[peer_id], da_checker); + + Self { + chain_hash: block_root, + downloaded_blocks: vec![], + current_parent_request, + } + } + pub fn contains_block(&self, block_root: &Hash256) -> bool { self.downloaded_blocks .iter() .any(|(root, _d_block)| root == block_root) } - pub fn new(block_root: Hash256, block: BlockWrapper, peer_id: PeerId) -> Self { - let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); - - Self { - chain_hash: block_root, - downloaded_blocks: vec![(block_root, block)], - current_parent_request, - current_parent_request_id: None, - } + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.current_parent_request.is_for_block(block_root) } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent( + pub fn request_parent_block( &mut self, cx: &mut SyncNetworkContext, - force_block_request: ForceBlockRequest, ) -> Result<(), RequestError> { // check to make sure this request hasn't failed - if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); } - let (peer_id, request) = self.current_parent_request.request_block()?; - match cx.parent_lookup_request(peer_id, request, force_block_request) { - Ok(request_id) => { - self.current_parent_request_id = Some(request_id); - Ok(()) + if let Some((peer_id, request)) = self.current_parent_request.request_block()? { + match cx.parent_lookup_block_request(peer_id, request) { + Ok(request_id) => { + self.current_parent_request.id.block_request_id = Some(request_id); + return Ok(()); + } + Err(reason) => { + self.current_parent_request.id.block_request_id = None; + return Err(RequestError::SendFailed(reason)); + } } - Err(reason) => { - self.current_parent_request_id = None; - Err(RequestError::SendFailed(reason)) + } + Ok(()) + } + + pub fn request_parent_blobs( + &mut self, + cx: &mut SyncNetworkContext, + ) -> Result<(), RequestError> { + // check to make sure this request hasn't failed + if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { + return Err(RequestError::ChainTooLong); + } + + if let Some((peer_id, request)) = self.current_parent_request.request_blobs()? { + match cx.parent_lookup_blobs_request(peer_id, request) { + Ok(request_id) => { + self.current_parent_request.id.blob_request_id = Some(request_id); + return Ok(()); + } + Err(reason) => { + self.current_parent_request.id.blob_request_id = None; + return Err(RequestError::SendFailed(reason)); + } } } + Ok(()) } - pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { - self.current_parent_request.check_peer_disconnected(peer_id) + pub fn check_block_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { + self.current_parent_request + .block_request_state + .state + .check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: BlockWrapper) { + pub fn check_blob_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { + self.current_parent_request + .blob_request_state + .state + .check_peer_disconnected(peer_id) + } + + pub fn add_unknown_parent_block(&mut self, block: BlockWrapper) { let next_parent = block.parent_root(); - let current_root = self.current_parent_request.hash; + + // Cache the block. + let current_root = self + .current_parent_request + .block_request_state + .requested_block_root; self.downloaded_blocks.push((current_root, block)); - self.current_parent_request.hash = next_parent; - self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; - self.current_parent_request_id = None; + + // Update the block request. + self.current_parent_request + .block_request_state + .requested_block_root = next_parent; + self.current_parent_request.block_request_state.state.state = State::AwaitingDownload; + self.current_parent_request.id.block_request_id = None; + + // Update the blobs request. + self.current_parent_request.blob_request_state.state.state = State::AwaitingDownload; + self.current_parent_request.id.blob_request_id = None; + + // Reset the unknown parent components. + self.current_parent_request.unknown_parent_components = + Some(UnknownParentComponents::default()); + } + + pub fn add_current_request_block(&mut self, block: Arc>) { + // Cache the block. + self.current_parent_request.add_unknown_parent_block(block); + + // Update the request. + self.current_parent_request.id.block_request_id = None; + } + + pub fn add_current_request_blobs(&mut self, blobs: FixedBlobSidecarList) { + // Cache the blobs. + self.current_parent_request.add_unknown_parent_blobs(blobs); + + // Update the request. + self.current_parent_request.id.blob_request_id = None; + } + + pub fn pending_block_response(&self, req_id: BlockRequestId) -> bool { + self.current_parent_request.id.block_request_id == Some(req_id) } - pub fn pending_response(&self, req_id: Id) -> bool { - self.current_parent_request_id == Some(req_id) + pub fn pending_blob_response(&self, req_id: BlobRequestId) -> bool { + self.current_parent_request.id.blob_request_id == Some(req_id) } /// Consumes the parent request and destructures it into it's parts. @@ -121,18 +205,17 @@ impl ParentLookup { Hash256, Vec>, Vec, - SingleBlockRequest, + SingleBlockLookup, ) { let ParentLookup { chain_hash, downloaded_blocks, current_parent_request, - current_parent_request_id: _, } = self; let block_count = downloaded_blocks.len(); let mut blocks = Vec::with_capacity(block_count); let mut hashes = Vec::with_capacity(block_count); - for (hash, block) in downloaded_blocks { + for (hash, block) in downloaded_blocks.into_iter() { blocks.push(block); hashes.push(hash); } @@ -144,23 +227,59 @@ impl ParentLookup { self.chain_hash } - pub fn download_failed(&mut self) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; + pub fn block_download_failed(&mut self) { + self.current_parent_request + .block_request_state + .state + .register_failure_downloading(); + self.current_parent_request.id.block_request_id = None; } - pub fn processing_failed(&mut self) { - self.current_parent_request.register_failure_processing(); - self.current_parent_request_id = None; + pub fn blob_download_failed(&mut self) { + self.current_parent_request + .blob_request_state + .state + .register_failure_downloading(); + self.current_parent_request.id.blob_request_id = None; + } + + pub fn block_processing_failed(&mut self) { + self.current_parent_request + .block_request_state + .state + .register_failure_processing(); + if let Some(components) = self + .current_parent_request + .unknown_parent_components + .as_mut() + { + components.downloaded_block = None; + } + self.current_parent_request.id.block_request_id = None; + } + + pub fn blob_processing_failed(&mut self) { + self.current_parent_request + .blob_request_state + .state + .register_failure_processing(); + if let Some(components) = self + .current_parent_request + .unknown_parent_components + .as_mut() + { + components.downloaded_blobs = <_>::default(); + } + self.current_parent_request.id.blob_request_id = None; } /// Verifies that the received block is what we requested. If so, parent lookup now waits for /// the processing result of the block. pub fn verify_block( &mut self, - block: Option>, + block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>, VerifyError> { + ) -> Result>, ParentVerifyError> { let root_and_block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should @@ -170,50 +289,83 @@ impl ParentLookup { .map(|(_, block)| block.parent_root()) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure_downloading(); - self.current_parent_request_id = None; - return Err(VerifyError::PreviousFailure { parent_root }); + self.current_parent_request + .block_request_state + .state + .register_failure_downloading(); + self.current_parent_request.id.block_request_id = None; + return Err(ParentVerifyError::PreviousFailure { parent_root }); } } Ok(root_and_block) } - pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option { - if self.chain_hash == chain_hash { - return self.current_parent_request.processing_peer().ok(); + pub fn verify_blob( + &mut self, + blob: Option>>, + failed_chains: &mut lru_cache::LRUTimeCache, + ) -> Result>, ParentVerifyError> { + let parent_root_opt = blob.as_ref().map(|b| b.block_parent_root); + let blobs = self.current_parent_request.verify_blob(blob)?; + + // check if the parent of this block isn't in the failed cache. If it is, this chain should + // be dropped and the peer downscored. + if let Some(parent_root) = parent_root_opt { + if failed_chains.contains(&parent_root) { + self.current_parent_request + .blob_request_state + .state + .register_failure_downloading(); + self.current_parent_request.id.blob_request_id = None; + return Err(ParentVerifyError::PreviousFailure { parent_root }); + } } - None - } - #[cfg(test)] - pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts() + Ok(blobs) } - pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { - self.current_parent_request.add_peer(block_root, peer_id) + pub fn add_peers(&mut self, peer_source: &[PeerShouldHave]) { + self.current_parent_request.add_peers(peer_source) } - pub fn used_peers(&self) -> impl Iterator + '_ { - self.current_parent_request.used_peers.iter() + pub fn used_peers(&self, response_type: ResponseType) -> impl Iterator + '_ { + match response_type { + ResponseType::Block => self + .current_parent_request + .block_request_state + .state + .used_peers + .iter(), + ResponseType::Blob => self + .current_parent_request + .blob_request_state + .state + .used_peers + .iter(), + } } } -impl From for VerifyError { - fn from(e: super::single_block_lookup::VerifyError) -> Self { - use super::single_block_lookup::VerifyError as E; +impl From for ParentVerifyError { + fn from(e: LookupVerifyError) -> Self { + use LookupVerifyError as E; match e { - E::RootMismatch => VerifyError::RootMismatch, - E::NoBlockReturned => VerifyError::NoBlockReturned, - E::ExtraBlocksReturned => VerifyError::ExtraBlocksReturned, + E::RootMismatch => ParentVerifyError::RootMismatch, + E::NoBlockReturned => ParentVerifyError::NoBlockReturned, + E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned, + E::UnrequestedBlobId => ParentVerifyError::UnrequestedBlobId, + E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned, + E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index), + E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned, + E::BenignFailure => ParentVerifyError::BenignFailure, } } } -impl From for RequestError { - fn from(e: super::single_block_lookup::LookupRequestError) -> Self { - use super::single_block_lookup::LookupRequestError as E; +impl From for RequestError { + fn from(e: LookupRequestError) -> Self { + use LookupRequestError as E; match e { E::TooManyAttempts { cannot_process } => { RequestError::TooManyAttempts { cannot_process } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 60911dbb395..7ccc3872407 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,43 +1,210 @@ -use super::RootBlockTuple; -use beacon_chain::blob_verification::AsBlock; +use crate::sync::block_lookups::{BlobRequestId, BlockRequestId, RootBlobsTuple, RootBlockTuple}; +use crate::sync::network_context::SyncNetworkContext; use beacon_chain::blob_verification::BlockWrapper; -use beacon_chain::get_block_root; +use beacon_chain::data_availability_checker::DataAvailabilityChecker; +use beacon_chain::{get_block_root, BeaconChainTypes}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; use ssz_types::VariableList; use std::collections::HashSet; -use store::{EthSpec, Hash256}; +use std::ops::IndexMut; +use std::sync::Arc; +use store::Hash256; use strum::IntoStaticStr; +use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; -/// Object representing a single block lookup request. -#[derive(PartialEq, Eq)] -pub struct SingleBlockRequest { - /// The hash of the requested block. - pub hash: Hash256, +use super::{PeerShouldHave, ResponseType}; + +pub struct SingleBlockLookup { + pub id: LookupId, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, + pub da_checker: Arc>, + /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` because any + /// blocks or blobs without parents won't hit the data availability cache. + pub unknown_parent_components: Option>, + /// We may want to delay the actual request trigger to give us a chance to receive all block + /// components over gossip. + pub triggered: bool, +} + +#[derive(Default, Clone)] +pub struct LookupId { + pub block_request_id: Option, + pub blob_request_id: Option, +} + +pub struct BlobRequestState { + pub requested_ids: Vec, + /// Where we store blobs until we receive the stream terminator. + pub blob_download_queue: FixedBlobSidecarList, + pub state: SingleLookupRequestState, +} + +impl BlobRequestState { + pub fn new(peer_source: &[PeerShouldHave]) -> Self { + Self { + requested_ids: <_>::default(), + blob_download_queue: <_>::default(), + state: SingleLookupRequestState::new(peer_source), + } + } +} + +pub struct BlockRequestState { + pub requested_block_root: Hash256, + pub state: SingleLookupRequestState, +} + +impl BlockRequestState { + pub fn new(block_root: Hash256, peers: &[PeerShouldHave]) -> Self { + Self { + requested_block_root: block_root, + state: SingleLookupRequestState::new(peers), + } + } +} + +impl SingleBlockLookup { + pub(crate) fn register_failure_downloading(&mut self, response_type: ResponseType) { + match response_type { + ResponseType::Block => self + .block_request_state + .state + .register_failure_downloading(), + ResponseType::Blob => self.blob_request_state.state.register_failure_downloading(), + } + } +} + +impl SingleBlockLookup { + pub(crate) fn downloading(&mut self, response_type: ResponseType) -> bool { + match response_type { + ResponseType::Block => { + matches!( + self.block_request_state.state.state, + State::Downloading { .. } + ) + } + ResponseType::Blob => { + matches!( + self.blob_request_state.state.state, + State::Downloading { .. } + ) + } + } + } + + pub(crate) fn remove_peer_if_useless(&mut self, peer_id: &PeerId, response_type: ResponseType) { + match response_type { + ResponseType::Block => self + .block_request_state + .state + .remove_peer_if_useless(peer_id), + ResponseType::Blob => self + .blob_request_state + .state + .remove_peer_if_useless(peer_id), + } + } + + pub(crate) fn check_peer_disconnected( + &mut self, + peer_id: &PeerId, + response_type: ResponseType, + ) -> Result<(), ()> { + match response_type { + ResponseType::Block => self + .block_request_state + .state + .check_peer_disconnected(peer_id), + ResponseType::Blob => self + .blob_request_state + .state + .check_peer_disconnected(peer_id), + } + } +} + +/// For requests triggered by an `UnknownBlockParent` or `UnknownBlockParent`, this struct +/// is used to cache components as they are sent to the networking layer. We can't use the +/// data availability cache currently because any blocks or blobs without parents won't hit +/// won't pass validation and therefore won't make it into the cache. +#[derive(Default)] +pub struct UnknownParentComponents { + pub downloaded_block: Option>>, + pub downloaded_blobs: FixedBlobSidecarList, +} + +impl UnknownParentComponents { + pub fn new( + block: Option>>, + blobs: Option>, + ) -> Self { + Self { + downloaded_block: block, + downloaded_blobs: blobs.unwrap_or_default(), + } + } + pub fn add_unknown_parent_block(&mut self, block: Arc>) { + self.downloaded_block = Some(block); + } + pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList) { + for (index, blob_opt) in self.downloaded_blobs.iter_mut().enumerate() { + if let Some(Some(downloaded_blob)) = blobs.get(index) { + *blob_opt = Some(downloaded_blob.clone()); + } + } + } + pub fn downloaded_indices(&self) -> HashSet { + self.downloaded_blobs + .iter() + .enumerate() + .filter_map(|(i, blob_opt)| blob_opt.as_ref().map(|_| i)) + .collect::>() + } +} + +/// Object representing the state of a single block or blob lookup request. +#[derive(PartialEq, Eq, Debug)] +pub struct SingleLookupRequestState { /// State of this request. pub state: State, - /// Peers that should have this block. + /// Peers that should have this block or blob. pub available_peers: HashSet, + /// Peers that mar or may not have this block or blob. + pub potential_peers: HashSet, /// Peers from which we have requested this block. pub used_peers: HashSet, - /// How many times have we attempted to process this block. + /// How many times have we attempted to process this block or blob. failed_processing: u8, - /// How many times have we attempted to download this block. + /// How many times have we attempted to download this block or blob. failed_downloading: u8, + pub component_processed: bool, } #[derive(Debug, PartialEq, Eq)] pub enum State { AwaitingDownload, - Downloading { peer_id: PeerId }, - Processing { peer_id: PeerId }, + Downloading { peer_id: PeerShouldHave }, + Processing { peer_id: PeerShouldHave }, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] -pub enum VerifyError { +pub enum LookupVerifyError { RootMismatch, NoBlockReturned, ExtraBlocksReturned, + UnrequestedBlobId, + ExtraBlobsReturned, + NotEnoughBlobsReturned, + InvalidIndex(u64), + /// We don't have enough information to know + /// whether the peer is at fault or simply missed + /// what was requested on gossip. + BenignFailure, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -50,95 +217,251 @@ pub enum LookupRequestError { NoPeers, } -impl SingleBlockRequest { - pub fn new(hash: Hash256, peer_id: PeerId) -> Self { +impl SingleBlockLookup { + pub fn new( + requested_block_root: Hash256, + unknown_parent_components: Option>, + peers: &[PeerShouldHave], + da_checker: Arc>, + ) -> Self { Self { - hash, - state: State::AwaitingDownload, - available_peers: HashSet::from([peer_id]), - used_peers: HashSet::default(), - failed_processing: 0, - failed_downloading: 0, + id: <_>::default(), + block_request_state: BlockRequestState::new(requested_block_root, peers), + blob_request_state: BlobRequestState::new(peers), + da_checker, + unknown_parent_components, + triggered: false, } } - /// Registers a failure in processing a block. - pub fn register_failure_processing(&mut self) { - self.failed_processing = self.failed_processing.saturating_add(1); - self.state = State::AwaitingDownload; + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.block_request_state.requested_block_root == block_root } - /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong - /// block. - pub fn register_failure_downloading(&mut self) { - self.failed_downloading = self.failed_downloading.saturating_add(1); - self.state = State::AwaitingDownload; + /// Send the necessary request for blobs and blocks and update `self.id` with the latest + /// request `Id`s. This will return `Err(())` if neither the block nor blob request could be made + /// or are no longer required. + pub fn request_block_and_blobs(&mut self, cx: &mut SyncNetworkContext) -> Result<(), ()> { + let block_request_id = if let Ok(Some((peer_id, block_request))) = self.request_block() { + cx.single_block_lookup_request(peer_id, block_request).ok() + } else { + None + }; + + let blob_request_id = if let Ok(Some((peer_id, blob_request))) = self.request_blobs() { + cx.single_blobs_lookup_request(peer_id, blob_request).ok() + } else { + None + }; + + if block_request_id.is_none() && blob_request_id.is_none() { + return Err(()); + } + + self.id = LookupId { + block_request_id, + blob_request_id, + }; + Ok(()) } - /// The total number of failures, whether it be processing or downloading. - pub fn failed_attempts(&self) -> u8 { - self.failed_processing + self.failed_downloading + pub fn update_blobs_request(&mut self) { + self.blob_request_state.requested_ids = if let Some(components) = + self.unknown_parent_components.as_ref() + { + let blobs = components.downloaded_indices(); + self.da_checker + .get_missing_blob_ids( + self.block_request_state.requested_block_root, + components.downloaded_block.as_ref(), + Some(blobs), + ) + .unwrap_or_default() + } else { + self.da_checker + .get_missing_blob_ids_checking_cache(self.block_request_state.requested_block_root) + .unwrap_or_default() + }; } - pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { - let is_useful = &self.hash == hash; - if is_useful { - self.available_peers.insert(*peer_id); - } - is_useful + pub fn get_downloaded_block(&mut self) -> Option> { + self.unknown_parent_components + .as_mut() + .and_then(|components| { + let downloaded_block = components.downloaded_block.as_ref(); + let downloaded_indices = components.downloaded_indices(); + let missing_ids = self.da_checker.get_missing_blob_ids( + self.block_request_state.requested_block_root, + downloaded_block, + Some(downloaded_indices), + ); + let download_complete = + missing_ids.map_or(true, |missing_ids| missing_ids.is_empty()); + if download_complete { + let UnknownParentComponents { + downloaded_block, + downloaded_blobs, + } = components; + downloaded_block.as_ref().map(|block| { + BlockWrapper::BlockAndBlobs(block.clone(), std::mem::take(downloaded_blobs)) + }) + } else { + None + } + }) } - /// If a peer disconnects, this request could be failed. If so, an error is returned - pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { - self.available_peers.remove(dc_peer_id); - if let State::Downloading { peer_id } = &self.state { - if peer_id == dc_peer_id { - // Peer disconnected before providing a block - self.register_failure_downloading(); - return Err(()); + pub fn add_unknown_parent_components( + &mut self, + components: UnknownParentComponents, + ) { + if let Some(ref mut existing_components) = self.unknown_parent_components { + let UnknownParentComponents { + downloaded_block, + downloaded_blobs, + } = components; + if let Some(block) = downloaded_block { + existing_components.add_unknown_parent_block(block); } + existing_components.add_unknown_parent_blobs(downloaded_blobs); + } else { + self.unknown_parent_components = Some(components); + } + } + pub fn add_unknown_parent_block(&mut self, block: Arc>) { + if let Some(ref mut components) = self.unknown_parent_components { + components.add_unknown_parent_block(block) + } else { + self.unknown_parent_components = Some(UnknownParentComponents { + downloaded_block: Some(block), + downloaded_blobs: FixedBlobSidecarList::default(), + }) + } + } + + pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList) { + if let Some(ref mut components) = self.unknown_parent_components { + components.add_unknown_parent_blobs(blobs) + } else { + self.unknown_parent_components = Some(UnknownParentComponents { + downloaded_block: None, + downloaded_blobs: blobs, + }) } - Ok(()) } /// Verifies if the received block matches the requested one. /// Returns the block for processing if the response is what we expected. - pub fn verify_block( + pub fn verify_block( &mut self, - block: Option>, - ) -> Result>, VerifyError> { - match self.state { + block: Option>>, + ) -> Result>, LookupVerifyError> { + match self.block_request_state.state.state { State::AwaitingDownload => { - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) + self.block_request_state + .state + .register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) } - State::Downloading { peer_id } => match block { - Some(block) => { - // Compute the block root using this specific function so that we can get timing - // metrics. - let block_root = get_block_root(block.as_block()); - if block_root != self.hash { - // return an error and drop the block - // NOTE: we take this is as a download failure to prevent counting the - // attempt as a chain failure, but simply a peer failure. - self.register_failure_downloading(); - Err(VerifyError::RootMismatch) + State::Downloading { peer_id } => { + match block { + Some(block) => { + // Compute the block root using this specific function so that we can get timing + // metrics. + let block_root = get_block_root(&block); + if block_root != self.block_request_state.requested_block_root { + // return an error and drop the block + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.block_request_state + .state + .register_failure_downloading(); + Err(LookupVerifyError::RootMismatch) + } else { + // Return the block for processing. + self.block_request_state.state.state = State::Processing { peer_id }; + Ok(Some((block_root, block))) + } + } + None => { + if peer_id.should_have_block() { + self.block_request_state + .state + .register_failure_downloading(); + Err(LookupVerifyError::NoBlockReturned) + } else { + self.block_request_state.state.state = State::AwaitingDownload; + Err(LookupVerifyError::BenignFailure) + } + } + } + } + State::Processing { peer_id: _ } => match block { + Some(_) => { + // We sent the block for processing and received an extra block. + self.block_request_state + .state + .register_failure_downloading(); + Err(LookupVerifyError::ExtraBlocksReturned) + } + None => { + // This is simply the stream termination and we are already processing the + // block + Ok(None) + } + }, + } + } + + pub fn verify_blob( + &mut self, + blob: Option>>, + ) -> Result>, LookupVerifyError> { + match self.blob_request_state.state.state { + State::AwaitingDownload => { + self.blob_request_state.state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlobsReturned) + } + State::Downloading { + peer_id: peer_source, + } => match blob { + Some(blob) => { + let received_id = blob.id(); + if !self.blob_request_state.requested_ids.contains(&received_id) { + self.blob_request_state.state.register_failure_downloading(); + Err(LookupVerifyError::UnrequestedBlobId) } else { - // Return the block for processing. - self.state = State::Processing { peer_id }; - Ok(Some((block_root, block))) + // State should remain downloading until we receive the stream terminator. + self.blob_request_state + .requested_ids + .retain(|id| *id != received_id); + let blob_index = blob.index; + + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(LookupVerifyError::InvalidIndex(blob.index)); + } + *self + .blob_request_state + .blob_download_queue + .index_mut(blob_index as usize) = Some(blob); + Ok(None) } } None => { - self.register_failure_downloading(); - Err(VerifyError::NoBlockReturned) + self.blob_request_state.state.state = State::Processing { + peer_id: peer_source, + }; + Ok(Some(( + self.block_request_state.requested_block_root, + std::mem::take(&mut self.blob_request_state.blob_download_queue), + ))) } }, - State::Processing { peer_id: _ } => match block { + State::Processing { peer_id: _ } => match blob { Some(_) => { - // We sent the block for processing and received an extra block. - self.register_failure_downloading(); - Err(VerifyError::ExtraBlocksReturned) + // We sent the blob for processing and received an extra blob. + self.blob_request_state.state.register_failure_downloading(); + Err(LookupVerifyError::ExtraBlobsReturned) } None => { // This is simply the stream termination and we are already processing the @@ -149,42 +472,317 @@ impl SingleBlockRequest { } } - pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { - debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts() >= MAX_ATTEMPTS { + pub fn request_block( + &mut self, + ) -> Result, LookupRequestError> { + let block_already_downloaded = + if let Some(components) = self.unknown_parent_components.as_ref() { + components.downloaded_block.is_some() + } else { + self.da_checker + .has_block(&self.block_request_state.requested_block_root) + }; + + if block_already_downloaded { + return Ok(None); + } + + debug_assert!(matches!( + self.block_request_state.state.state, + State::AwaitingDownload + )); + let request = BlocksByRootRequest { + block_roots: VariableList::from(vec![self.block_request_state.requested_block_root]), + }; + let response_type = ResponseType::Block; + if self.too_many_attempts(response_type) { Err(LookupRequestError::TooManyAttempts { - cannot_process: self.failed_processing >= self.failed_downloading, + cannot_process: self.cannot_process(response_type), }) - } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest { - block_roots: VariableList::from(vec![self.hash]), - }; - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) + } else if let Some(peer_id) = self.get_peer(response_type) { + self.add_used_peer(peer_id, response_type); + Ok(Some((peer_id.to_peer_id(), request))) + } else { + Err(LookupRequestError::NoPeers) + } + } + + pub fn request_blobs( + &mut self, + ) -> Result, LookupRequestError> { + self.update_blobs_request(); + + if self.blob_request_state.requested_ids.is_empty() { + return Ok(None); + } + + debug_assert!(matches!( + self.blob_request_state.state.state, + State::AwaitingDownload + )); + let request = BlobsByRootRequest { + blob_ids: VariableList::from(self.blob_request_state.requested_ids.clone()), + }; + let response_type = ResponseType::Blob; + if self.too_many_attempts(response_type) { + Err(LookupRequestError::TooManyAttempts { + cannot_process: self.cannot_process(response_type), + }) + } else if let Some(peer_id) = self.get_peer(response_type) { + self.add_used_peer(peer_id, response_type); + Ok(Some((peer_id.to_peer_id(), request))) } else { Err(LookupRequestError::NoPeers) } } - pub fn processing_peer(&self) -> Result { + fn too_many_attempts(&self, response_type: ResponseType) -> bool { + match response_type { + ResponseType::Block => self.block_request_state.state.failed_attempts() >= MAX_ATTEMPTS, + ResponseType::Blob => self.blob_request_state.state.failed_attempts() >= MAX_ATTEMPTS, + } + } + + fn cannot_process(&self, response_type: ResponseType) -> bool { + match response_type { + ResponseType::Block => { + self.block_request_state.state.failed_processing + >= self.block_request_state.state.failed_downloading + } + ResponseType::Blob => { + self.blob_request_state.state.failed_processing + >= self.blob_request_state.state.failed_downloading + } + } + } + + fn get_peer(&self, response_type: ResponseType) -> Option { + match response_type { + ResponseType::Block => self + .block_request_state + .state + .available_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::BlockAndBlobs) + .or(self + .block_request_state + .state + .potential_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::Neither)), + ResponseType::Blob => self + .blob_request_state + .state + .available_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::BlockAndBlobs) + .or(self + .blob_request_state + .state + .potential_peers + .iter() + .choose(&mut rand::thread_rng()) + .copied() + .map(PeerShouldHave::Neither)), + } + } + + fn add_used_peer(&mut self, peer_id: PeerShouldHave, response_type: ResponseType) { + match response_type { + ResponseType::Block => { + self.block_request_state + .state + .used_peers + .insert(peer_id.to_peer_id()); + self.block_request_state.state.state = State::Downloading { peer_id }; + } + ResponseType::Blob => { + self.blob_request_state + .state + .used_peers + .insert(peer_id.to_peer_id()); + self.blob_request_state.state.state = State::Downloading { peer_id }; + } + } + } + + pub fn add_peers(&mut self, peers: &[PeerShouldHave]) { + for peer in peers { + match peer { + PeerShouldHave::BlockAndBlobs(peer_id) => { + self.block_request_state.state.add_peer(peer_id); + self.blob_request_state.state.add_peer(peer_id); + } + PeerShouldHave::Neither(peer_id) => { + self.block_request_state.state.add_potential_peer(peer_id); + self.blob_request_state.state.add_potential_peer(peer_id); + } + } + } + } + + pub fn processing_peer(&self, response_type: ResponseType) -> Result { + match response_type { + ResponseType::Block => self.block_request_state.state.processing_peer(), + ResponseType::Blob => self.blob_request_state.state.processing_peer(), + } + } + + pub fn downloading_peer(&self, response_type: ResponseType) -> Result { + match response_type { + ResponseType::Block => self.block_request_state.state.peer(), + ResponseType::Blob => self.blob_request_state.state.peer(), + } + } + + pub fn both_components_processed(&self) -> bool { + self.block_request_state.state.component_processed + && self.blob_request_state.state.component_processed + } + + pub fn set_component_processed(&mut self, response_type: ResponseType) { + match response_type { + ResponseType::Block => self.block_request_state.state.component_processed = true, + ResponseType::Blob => self.blob_request_state.state.component_processed = true, + } + } +} + +impl SingleLookupRequestState { + pub fn new(peers: &[PeerShouldHave]) -> Self { + let mut available_peers = HashSet::default(); + let mut potential_peers = HashSet::default(); + for peer in peers { + match peer { + PeerShouldHave::BlockAndBlobs(peer_id) => { + available_peers.insert(*peer_id); + } + PeerShouldHave::Neither(peer_id) => { + potential_peers.insert(*peer_id); + } + } + } + Self { + state: State::AwaitingDownload, + available_peers, + potential_peers, + used_peers: HashSet::default(), + failed_processing: 0, + failed_downloading: 0, + component_processed: false, + } + } + + /// Registers a failure in processing a block. + pub fn register_failure_processing(&mut self) { + self.failed_processing = self.failed_processing.saturating_add(1); + self.state = State::AwaitingDownload; + } + + /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong + /// block. + pub fn register_failure_downloading(&mut self) { + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload; + } + + /// The total number of failures, whether it be processing or downloading. + pub fn failed_attempts(&self) -> u8 { + self.failed_processing + self.failed_downloading + } + + pub fn add_peer(&mut self, peer_id: &PeerId) { + self.potential_peers.remove(peer_id); + self.available_peers.insert(*peer_id); + } + + pub fn add_potential_peer(&mut self, peer_id: &PeerId) { + if !self.available_peers.contains(peer_id) { + self.potential_peers.insert(*peer_id); + } + } + + /// If a peer disconnects, this request could be failed. If so, an error is returned + pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { + self.available_peers.remove(dc_peer_id); + self.potential_peers.remove(dc_peer_id); + if let State::Downloading { peer_id } = &self.state { + if peer_id.as_peer_id() == dc_peer_id { + // Peer disconnected before providing a block + self.register_failure_downloading(); + return Err(()); + } + } + Ok(()) + } + + pub fn processing_peer(&self) -> Result { if let State::Processing { peer_id } = &self.state { Ok(*peer_id) } else { Err(()) } } + + pub fn peer(&self) -> Result { + match &self.state { + State::Processing { peer_id } => Ok(*peer_id), + State::Downloading { peer_id } => Ok(*peer_id), + _ => Err(()), + } + } + + pub fn remove_peer_if_useless(&mut self, peer_id: &PeerId) { + if !self.available_peers.is_empty() || self.potential_peers.len() > 1 { + self.potential_peers.remove(peer_id); + } + } } -impl slog::Value for SingleBlockRequest { +impl slog::Value + for SingleBlockLookup +{ fn serialize( &self, - record: &slog::Record, + _record: &slog::Record, key: slog::Key, serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_str("request", key)?; - serializer.emit_arguments("hash", &format_args!("{}", self.hash))?; + serializer.emit_arguments( + "hash", + &format_args!("{}", self.block_request_state.requested_block_root), + )?; + serializer.emit_arguments( + "blob_ids", + &format_args!("{:?}", self.blob_request_state.requested_ids), + )?; + serializer.emit_arguments( + "block_request_state.state", + &format_args!("{:?}", self.block_request_state.state), + )?; + serializer.emit_arguments( + "blob_request_state.state", + &format_args!("{:?}", self.blob_request_state.state), + )?; + slog::Result::Ok(()) + } +} + +impl slog::Value for SingleLookupRequestState { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_str("request_state", key)?; match &self.state { State::AwaitingDownload => { "awaiting_download".serialize(record, "state", serializer)? @@ -205,9 +803,16 @@ impl slog::Value for SingleBlockRequest { #[cfg(test)] mod tests { use super::*; + use beacon_chain::builder::Witness; + use beacon_chain::eth1_chain::CachingEth1Backend; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use slot_clock::{SlotClock, TestingSlotClock}; + use std::time::Duration; + use store::{HotColdDB, MemoryStore, StoreConfig}; use types::{ test_utils::{SeedableRng, TestRandom, XorShiftRng}, - MinimalEthSpec as E, SignedBeaconBlock, + ChainSpec, EthSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; fn rand_block() -> SignedBeaconBlock { @@ -219,13 +824,27 @@ mod tests { types::Signature::random_for_test(&mut rng), ) } + type T = Witness, E, MemoryStore, MemoryStore>; #[test] fn test_happy_path() { - let peer_id = PeerId::random(); + let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); let block = rand_block(); - - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log) + .expect("store"); + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), spec) + .expect("data availability checker"), + ); + let mut sl = + SingleBlockLookup::<4, T>::new(block.canonical_root(), None, &[peer_id], da_checker); sl.request_block().unwrap(); sl.verify_block(Some(block.into())).unwrap().unwrap(); } @@ -233,13 +852,32 @@ mod tests { #[test] fn test_block_lookup_failures() { const FAILURES: u8 = 3; - let peer_id = PeerId::random(); + let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); let block = rand_block(); + let spec = E::default_spec(); + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(spec.seconds_per_slot), + ); + let log = NullLoggerBuilder.build().expect("logger should build"); + let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log) + .expect("store"); + + let da_checker = Arc::new( + DataAvailabilityChecker::new(slot_clock, None, store.into(), spec) + .expect("data availability checker"), + ); - let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); + let mut sl = SingleBlockLookup::::new( + block.canonical_root(), + None, + &[peer_id], + da_checker, + ); for _ in 1..FAILURES { sl.request_block().unwrap(); - sl.register_failure_downloading(); + sl.block_request_state.state.register_failure_downloading(); } // Now we receive the block and send it for processing @@ -247,7 +885,7 @@ mod tests { sl.verify_block(Some(block.into())).unwrap().unwrap(); // One processing failure maxes the available attempts - sl.register_failure_processing(); + sl.block_request_state.state.register_failure_processing(); assert_eq!( sl.request_block(), Err(LookupRequestError::TooManyAttempts { diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index e491d5c8435..3bc552291c4 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,3 +1,4 @@ +#![cfg(feature = "spec-minimal")] use std::sync::Arc; use crate::service::RequestId; @@ -11,15 +12,18 @@ use beacon_chain::{ eth1_chain::CachingEth1Backend, test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType}, }; +use execution_layer::BlobsBundleV1; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; +use lighthouse_network::rpc::RPCResponseErrorCode; use lighthouse_network::{NetworkGlobals, Request}; -use slot_clock::TestingSlotClock; +use slot_clock::{SlotClock, TestingSlotClock}; use std::time::Duration; use store::MemoryStore; use tokio::sync::mpsc; use types::{ + map_fork_name, map_fork_name_with, test_utils::{SeedableRng, TestRandom, XorShiftRng}, - MinimalEthSpec as E, SignedBeaconBlock, + BeaconBlock, EthSpec, ForkName, FullPayloadDeneb, MinimalEthSpec as E, SignedBeaconBlock, }; type T = Witness, E, MemoryStore, MemoryStore>; @@ -28,10 +32,16 @@ struct TestRig { beacon_processor_rx: mpsc::Receiver>, network_rx: mpsc::UnboundedReceiver>, rng: XorShiftRng, + harness: BeaconChainHarness, } const D: Duration = Duration::new(0, 0); +enum NumBlobs { + Random, + None, +} + impl TestRig { fn test_setup(enable_log: bool) -> (BlockLookups, SyncNetworkContext, Self) { let log = build_log(slog::Level::Debug, enable_log); @@ -42,9 +52,14 @@ impl TestRig { .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() + .testing_slot_clock(TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_secs(12), + )) .build(); - let chain = harness.chain; + let chain = harness.chain.clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100); let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -53,8 +68,13 @@ impl TestRig { beacon_processor_rx, network_rx, rng, + harness, }; - let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); + + let bl = BlockLookups::new( + chain.data_availability_checker.clone(), + log.new(slog::o!("component" => "block_lookups")), + ); let cx = { let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( @@ -69,48 +89,137 @@ impl TestRig { (bl, cx, rig) } - fn rand_block(&mut self) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + fn rand_block(&mut self, fork_name: ForkName) -> SignedBeaconBlock { + self.rand_block_and_blobs(fork_name, NumBlobs::None).0 } - #[track_caller] - fn expect_block_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::SingleBlock { id }), - }) => id, - other => { - panic!("Expected block request, found {:?}", other); + fn rand_block_and_blobs( + &mut self, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(&mut self.rng)); + let mut block = + SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(&mut self.rng)); + let mut blob_sidecars = vec![]; + if let Ok(message) = block.message_deneb_mut() { + // get random number between 0 and Max Blobs + let mut payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; + let num_blobs = match num_blobs { + NumBlobs::Random => { + let mut num_blobs = rand::random::() % E::max_blobs_per_block(); + if num_blobs == 0 { + num_blobs += 1; + } + num_blobs + } + NumBlobs::None => 0, + }; + let (bundle, transactions) = execution_layer::test_utils::generate_random_blobs::( + num_blobs, + &self.harness.chain.kzg.as_ref().unwrap(), + ) + .unwrap(); + + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + message.body.blob_kzg_commitments = bundle.commitments.clone(); + + let BlobsBundleV1 { + commitments, + proofs, + blobs, + } = bundle; + + let block_root = block.canonical_root(); + + for (index, ((blob, kzg_commitment), kzg_proof)) in blobs + .into_iter() + .zip(commitments.into_iter()) + .zip(proofs.into_iter()) + .enumerate() + { + blob_sidecars.push(BlobSidecar { + block_root, + index: index as u64, + slot: block.slot(), + block_parent_root: block.parent_root(), + proposer_index: block.message().proposer_index(), + blob: blob.clone(), + kzg_commitment: kzg_commitment.clone(), + kzg_proof: kzg_proof.clone(), + }); } } + + (block, blob_sidecars) } #[track_caller] - fn expect_parent_request(&mut self) -> Id { - match self.network_rx.try_recv() { - Ok(NetworkMessage::SendRequest { - peer_id: _, - request: Request::BlocksByRoot(_request), - request_id: RequestId::Sync(SyncId::ParentLookup { id }), - }) => id, - other => panic!("Expected parent request, found {:?}", other), + fn expect_block_request(&mut self, response_type: ResponseType) -> Id { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlock { id }), + }) => id, + other => { + panic!("Expected block request, found {:?}", other); + } + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::SingleBlock { id }), + }) => id, + other => { + panic!("Expected blob request, found {:?}", other); + } + }, } } #[track_caller] - fn expect_block_process(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOCK); - } - other => panic!("Expected block process, found {:?}", other), + fn expect_parent_request(&mut self, response_type: ResponseType) -> Id { + match response_type { + ResponseType::Block => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlocksByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookup { id }), + }) => id, + other => panic!("Expected parent request, found {:?}", other), + }, + ResponseType::Blob => match self.network_rx.try_recv() { + Ok(NetworkMessage::SendRequest { + peer_id: _, + request: Request::BlobsByRoot(_request), + request_id: RequestId::Sync(SyncId::ParentLookup { id }), + }) => id, + other => panic!("Expected parent blobs request, found {:?}", other), + }, + } + } + + #[track_caller] + fn expect_block_process(&mut self, response_type: ResponseType) { + match response_type { + ResponseType::Block => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOCK); + } + other => panic!("Expected block process, found {:?}", other), + }, + ResponseType::Blob => match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), crate::beacon_processor::RPC_BLOB); + } + other => panic!("Expected blob process, found {:?}", other), + }, } } @@ -132,6 +241,14 @@ impl TestRig { ); } + #[track_caller] + fn expect_empty_beacon_processor(&mut self) { + assert_eq!( + self.beacon_processor_rx.try_recv().expect_err("must err"), + mpsc::error::TryRecvError::Empty + ); + } + #[track_caller] pub fn expect_penalty(&mut self) { match self.network_rx.try_recv() { @@ -140,33 +257,59 @@ impl TestRig { } } - pub fn block_with_parent(&mut self, parent_root: Hash256) -> SignedBeaconBlock { - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - parent_root, - ..<_>::random_for_test(&mut self.rng) - }), - types::Signature::random_for_test(&mut self.rng), - ) + pub fn block_with_parent( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + ) -> SignedBeaconBlock { + let mut block = self.rand_block(fork_name); + *block.message_mut().parent_root_mut() = parent_root; + block + } + + pub fn block_with_parent_and_blobs( + &mut self, + parent_root: Hash256, + fork_name: ForkName, + num_blobs: NumBlobs, + ) -> (SignedBeaconBlock, Vec>) { + let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs); + *block.message_mut().parent_root_mut() = parent_root; + let block_root = block.canonical_root(); + blobs.iter_mut().for_each(|blob| { + blob.block_parent_root = parent_root; + blob.block_root = block_root; + }); + (block, blobs) } } #[test] fn test_single_block_lookup_happy_path() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let block = rig.rand_block(); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); - + let block_root = block.canonical_root(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_root, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. bl.single_block_lookup_response(id, peer_id, Some(block.into()), D, &mut cx); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); @@ -174,45 +317,70 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()).into(), &mut cx); + bl.single_block_component_processed( + id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + response_type, + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_single_block_lookup_empty_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // The peer does not have the block. It should be penalized. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); rig.expect_penalty(); - rig.expect_block_request(); // it should be retried + rig.expect_block_request(response_type); // it should be retried } #[test] fn test_single_block_lookup_wrong_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // Peer sends something else. It should be penalized. - let bad_block = rig.rand_block(); + let bad_block = rig.rand_block(fork_name); bl.single_block_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx); rig.expect_penalty(); - rig.expect_block_request(); // should be retried + rig.expect_block_request(response_type); // should be retried // Send the stream termination. This should not produce an additional penalty. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); @@ -221,70 +389,122 @@ fn test_single_block_lookup_wrong_response() { #[test] fn test_single_block_lookup_failure() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let block_hash = Hash256::random(); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block_hash, peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. - bl.single_block_lookup_failed(id, &mut cx); - rig.expect_block_request(); + bl.single_block_lookup_failed(id, &peer_id, &mut cx, RPCError::UnsupportedProtocol); + rig.expect_block_request(response_type); rig.expect_empty_network(); } #[test] fn test_single_block_lookup_becomes_parent_request() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = Arc::new(rig.rand_block(fork_name)); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block.canonical_root(), + PeerShouldHave::BlockAndBlobs(peer_id), + &mut cx, + ); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, BlockError::ParentUnknown(block.into()).into(), &mut cx); - assert_eq!(bl.single_block_lookups.len(), 0); - rig.expect_parent_request(); + bl.single_block_component_processed( + id, + BlockError::ParentUnknown(block.into()).into(), + response_type, + &mut cx, + ); + assert_eq!(bl.single_block_lookups.len(), 1); + rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 1); } #[test] fn test_parent_lookup_happy_path() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. bl.parent_lookup_response(id, peer_id, Some(parent.into()), D, &mut cx); - rig.expect_block_process(); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockError::BlockIsAlreadyKnown.into(), + response_type, + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, @@ -295,22 +515,35 @@ fn test_parent_lookup_happy_path() { #[test] fn test_parent_lookup_wrong_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the wrong block, peer should be penalized and the block re-requested. - let bad_block = rig.rand_block(); + let bad_block = rig.rand_block(fork_name); bl.parent_lookup_response(id1, peer_id, Some(bad_block.into()), D, &mut cx); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the stream termination for the first request. This should not produce extra penalties. bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); @@ -318,10 +551,15 @@ fn test_parent_lookup_wrong_response() { // Send the right block this time. bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx); - rig.expect_block_process(); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + response_type, + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, @@ -332,28 +570,46 @@ fn test_parent_lookup_wrong_response() { #[test] fn test_parent_lookup_empty_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends an empty response, peer should be penalized and the block re-requested. bl.parent_lookup_response(id1, peer_id, None, D, &mut cx); rig.expect_penalty(); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx); - rig.expect_block_process(); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + response_type, + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, @@ -364,16 +620,29 @@ fn test_parent_lookup_empty_response() { #[test] fn test_parent_lookup_rpc_failure() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); - let id1 = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id1 = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. bl.parent_lookup_failed( @@ -385,14 +654,19 @@ fn test_parent_lookup_rpc_failure() { "older than deneb".into(), ), ); - let id2 = rig.expect_parent_request(); + let id2 = rig.expect_parent_request(response_type); // Send the right block this time. bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx); - rig.expect_block_process(); + rig.expect_block_process(response_type); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + response_type, + &mut cx, + ); rig.expect_parent_chain_process(); let process_result = BatchProcessResult::Success { was_non_empty: true, @@ -403,17 +677,29 @@ fn test_parent_lookup_rpc_failure() { #[test] fn test_parent_lookup_too_many_attempts() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } match i % 2 { // make sure every error is accounted for 0 => { @@ -430,7 +716,7 @@ fn test_parent_lookup_too_many_attempts() { } _ => { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); + let bad_block = rig.rand_block(fork_name); bl.parent_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx); // Send the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); @@ -438,7 +724,14 @@ fn test_parent_lookup_too_many_attempts() { } } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -447,18 +740,31 @@ fn test_parent_lookup_too_many_attempts() { #[test] fn test_parent_lookup_too_many_download_attempts_no_blacklist() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let block_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, block.into(), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { assert!(!bl.failed_chains.contains(&block_hash)); - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 1 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } if i % 2 != 0 { // The request fails. It should be tried again. bl.parent_lookup_failed( @@ -472,12 +778,19 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { ); } else { // Send a bad block this time. It should be tried again. - let bad_block = rig.rand_block(); + let bad_block = rig.rand_block(fork_name); bl.parent_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx); rig.expect_penalty(); } if i < parent_lookup::PARENT_FAIL_TOLERANCE { - assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i)); + assert_eq!( + bl.parent_lookups[0] + .current_parent_request + .block_request_state + .state + .failed_attempts(), + dbg!(i) + ); } } @@ -488,20 +801,32 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { #[test] fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + let response_type = ResponseType::Block; const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); - let parent = Arc::new(rig.rand_block()); - let block = rig.block_with_parent(parent.canonical_root()); - let block_hash = block.canonical_root(); + let parent = Arc::new(rig.rand_block(fork_name)); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(block_hash, block.into(), peer_id, &mut cx); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); // Fail downloading the block - for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { - let id = rig.expect_parent_request(); + for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) && i == 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // The request fails. It should be tried again. bl.parent_lookup_failed( id, @@ -515,51 +840,81 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { } // Now fail processing a block in the parent request - for _ in 0..PROCESSING_FAILURES { - let id = dbg!(rig.expect_parent_request()); - assert!(!bl.failed_chains.contains(&block_hash)); + for i in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request(response_type)); + if matches!(fork_name, ForkName::Deneb) && i != 0 { + let _ = rig.expect_parent_request(ResponseType::Blob); + } + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + assert!(!bl.failed_chains.contains(&block_root)); // send the right parent but fail processing - bl.parent_lookup_response(id, peer_id, Some(parent.clone().into()), D, &mut cx); - bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); + bl.parent_block_processed( + block_root, + BlockError::InvalidSignature.into(), + response_type, + &mut cx, + ); bl.parent_lookup_response(id, peer_id, None, D, &mut cx); rig.expect_penalty(); } - assert!(bl.failed_chains.contains(&block_hash)); + assert!(bl.failed_chains.contains(&block_root)); assert_eq!(bl.parent_lookups.len(), 0); } #[test] fn test_parent_lookup_too_deep() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); let mut blocks = - Vec::>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); + Vec::>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE { let parent = blocks .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = rig.block_with_parent(parent); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, trigger_block.into(), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for block in blocks.into_iter().rev() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result bl.parent_block_processed( chain_hash, BlockError::ParentUnknown(block.into()).into(), + response_type, &mut cx, ) } @@ -572,33 +927,56 @@ fn test_parent_lookup_too_deep() { fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); let peer_id = PeerId::random(); - let trigger_block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let trigger_block = rig.rand_block(fork_name); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); bl.search_parent( - trigger_block.canonical_root(), - trigger_block.into(), + trigger_slot, + trigger_block_root, + trigger_parent_root, peer_id, &mut cx, ); + bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_lookups.is_empty()); } #[test] fn test_single_block_lookup_ignored_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let block = rig.rand_block(); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let block = rig.rand_block(fork_name); let peer_id = PeerId::random(); // Trigger the request - bl.search_block(block.canonical_root(), peer_id, &mut cx); - let id = rig.expect_block_request(); + bl.search_block( + block.canonical_root(), + PeerShouldHave::BlockAndBlobs(peer_id), + &mut cx, + ); + let id = rig.expect_block_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_block_request(ResponseType::Blob); + } // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. bl.single_block_lookup_response(id, peer_id, Some(block.into()), D, &mut cx); rig.expect_empty_network(); - rig.expect_block_process(); + rig.expect_block_process(response_type); // The request should still be active. assert_eq!(bl.single_block_lookups.len(), 1); @@ -607,31 +985,50 @@ fn test_single_block_lookup_ignored_response() { // after processing. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); // Send an Ignored response, the request should be dropped - bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + bl.single_block_component_processed(id, BlockProcessingResult::Ignored, response_type, &mut cx); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } #[test] fn test_parent_lookup_ignored_response() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); - let parent = rig.rand_block(); - let block = rig.block_with_parent(parent.canonical_root()); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); + let parent = rig.rand_block(fork_name); + let block = rig.block_with_parent(parent.canonical_root(), fork_name); let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); + let block_root = block.canonical_root(); + let parent_root = block.parent_root(); + let slot = block.slot(); // Trigger the request - bl.search_parent(chain_hash, block.into(), peer_id, &mut cx); - let id = rig.expect_parent_request(); + bl.search_parent(slot, block_root, parent_root, peer_id, &mut cx); + let id = rig.expect_parent_request(response_type); + + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // Peer sends the right block, it should be sent for processing. Peer should not be penalized. bl.parent_lookup_response(id, peer_id, Some(parent.into()), D, &mut cx); - rig.expect_block_process(); + rig.expect_block_process(response_type); rig.expect_empty_network(); // Return an Ignored result. The request should be dropped - bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + bl.parent_block_processed( + chain_hash, + BlockProcessingResult::Ignored, + response_type, + &mut cx, + ); rig.expect_empty_network(); assert_eq!(bl.parent_lookups.len(), 0); } @@ -639,8 +1036,13 @@ fn test_parent_lookup_ignored_response() { /// This is a regression test. #[test] fn test_same_chain_race_condition() { + let response_type = ResponseType::Block; let (mut bl, mut cx, mut rig) = TestRig::test_setup(true); + let fork_name = rig + .harness + .spec + .fork_name_at_slot::(rig.harness.chain.slot().unwrap()); #[track_caller] fn parent_lookups_consistency(bl: &BlockLookups) { let hashes: Vec<_> = bl @@ -667,31 +1069,51 @@ fn test_same_chain_race_condition() { .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); - let block = Arc::new(rig.block_with_parent(parent)); + let block = Arc::new(rig.block_with_parent(parent, fork_name)); blocks.push(block); } let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(chain_hash, trigger_block.clone().into(), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); for (i, block) in blocks.into_iter().rev().enumerate() { - let id = rig.expect_parent_request(); + let id = rig.expect_parent_request(response_type); + // If we're in deneb, a blob request should have been triggered as well, + // we don't require a response because we're generateing 0-blob blocks in this test. + if matches!(fork_name, ForkName::Deneb) { + let _ = rig.expect_parent_request(ResponseType::Blob); + } // the block - bl.parent_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request - rig.expect_block_process(); + rig.expect_block_process(response_type); // the processing result if i + 2 == depth { // one block was removed - bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) + bl.parent_block_processed( + chain_hash, + BlockError::BlockIsAlreadyKnown.into(), + response_type, + &mut cx, + ) } else { bl.parent_block_processed( chain_hash, BlockError::ParentUnknown(block.into()).into(), + response_type, &mut cx, ) } @@ -703,7 +1125,16 @@ fn test_same_chain_race_condition() { // Try to get this block again while the chain is being processed. We should not request it again. let peer_id = PeerId::random(); - bl.search_parent(chain_hash, trigger_block.into(), peer_id, &mut cx); + let trigger_block_root = trigger_block.canonical_root(); + let trigger_parent_root = trigger_block.parent_root(); + let trigger_slot = trigger_block.slot(); + bl.search_parent( + trigger_slot, + trigger_block_root, + trigger_parent_root, + peer_id, + &mut cx, + ); parent_lookups_consistency(&bl); let process_result = BatchProcessResult::Success { @@ -712,3 +1143,1060 @@ fn test_same_chain_race_condition() { bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_lookups.len(), 0); } + +mod deneb_only { + use super::*; + use beacon_chain::blob_verification::BlobError; + use std::ops::IndexMut; + use std::str::FromStr; + + struct DenebTester { + bl: BlockLookups, + cx: SyncNetworkContext, + rig: TestRig, + block: Option>>, + blobs: Vec>>, + parent_block: Option>>, + parent_blobs: Vec>>, + peer_id: PeerId, + block_req_id: Option, + parent_block_req_id: Option, + blob_req_id: Option, + parent_blob_req_id: Option, + slot: Slot, + block_root: Hash256, + } + + enum RequestTrigger { + AttestationUnknownBlock, + GossipUnknownParentBlock, + GossipUnknownParentBlob, + GossipUnknownBlockOrBlob, + } + + impl DenebTester { + fn new(request_trigger: RequestTrigger) -> Option { + let fork_name = get_fork_name(); + if !matches!(fork_name, ForkName::Deneb) { + return None; + } + let (mut bl, mut cx, mut rig) = TestRig::test_setup(false); + rig.harness.chain.slot_clock.set_slot( + E::slots_per_epoch() * rig.harness.spec.deneb_fork_epoch.unwrap().as_u64(), + ); + let (block, blobs) = rig.rand_block_and_blobs(fork_name, NumBlobs::Random); + let block = Arc::new(block); + let slot = block.slot(); + let mut block_root = block.canonical_root(); + let mut block = Some(block); + let mut blobs = blobs.into_iter().map(Arc::new).collect::>(); + + let mut parent_block = None; + let mut parent_blobs = vec![]; + + let peer_id = PeerId::random(); + + // Trigger the request + let (block_req_id, blob_req_id, parent_block_req_id, parent_blob_req_id) = + match request_trigger { + RequestTrigger::AttestationUnknownBlock => { + bl.search_block( + block_root, + PeerShouldHave::BlockAndBlobs(peer_id), + &mut cx, + ); + let block_req_id = rig.expect_block_request(ResponseType::Block); + let blob_req_id = rig.expect_block_request(ResponseType::Blob); + (Some(block_req_id), Some(blob_req_id), None, None) + } + RequestTrigger::GossipUnknownParentBlock => { + let (child_block, child_blobs) = rig.block_with_parent_and_blobs( + block_root, + get_fork_name(), + NumBlobs::Random, + ); + parent_block = Some(Arc::new(child_block)); + parent_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); + std::mem::swap(&mut parent_block, &mut block); + std::mem::swap(&mut parent_blobs, &mut blobs); + + let child_block = block.as_ref().expect("block").clone(); + let child_root = child_block.canonical_root(); + let parent_root = block_root; + block_root = child_root; + bl.search_child_block( + child_root, + Some(UnknownParentComponents::new(Some(child_block), None)), + &[PeerShouldHave::Neither(peer_id)], + &mut cx, + ); + + let blob_req_id = rig.expect_block_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, child_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + None, + Some(blob_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + RequestTrigger::GossipUnknownParentBlob => { + let (child_block, child_blobs) = rig.block_with_parent_and_blobs( + block_root, + get_fork_name(), + NumBlobs::Random, + ); + + parent_block = Some(Arc::new(child_block)); + parent_blobs = child_blobs.into_iter().map(Arc::new).collect::>(); + std::mem::swap(&mut parent_block, &mut block); + std::mem::swap(&mut parent_blobs, &mut blobs); + + let child_blob = blobs.first().cloned().unwrap(); + let parent_root = block_root; + let child_root = child_blob.block_root; + block_root = child_root; + + let mut blobs = FixedBlobSidecarList::default(); + *blobs.index_mut(0) = Some(child_blob); + bl.search_child_block( + child_root, + Some(UnknownParentComponents::new(None, Some(blobs))), + &[PeerShouldHave::Neither(peer_id)], + &mut cx, + ); + + let block_req_id = rig.expect_block_request(ResponseType::Block); + let blobs_req_id = rig.expect_block_request(ResponseType::Blob); + rig.expect_empty_network(); // expect no block request + bl.search_parent(slot, child_root, parent_root, peer_id, &mut cx); + let parent_block_req_id = rig.expect_parent_request(ResponseType::Block); + let parent_blob_req_id = rig.expect_parent_request(ResponseType::Blob); + ( + Some(block_req_id), + Some(blobs_req_id), + Some(parent_block_req_id), + Some(parent_blob_req_id), + ) + } + RequestTrigger::GossipUnknownBlockOrBlob => { + bl.search_block(block_root, PeerShouldHave::Neither(peer_id), &mut cx); + let block_req_id = rig.expect_block_request(ResponseType::Block); + let blob_req_id = rig.expect_block_request(ResponseType::Blob); + (Some(block_req_id), Some(blob_req_id), None, None) + } + }; + + Some(Self { + bl, + cx, + rig, + block, + blobs, + parent_block, + parent_blobs, + peer_id, + block_req_id, + parent_block_req_id, + blob_req_id, + parent_blob_req_id, + slot, + block_root, + }) + } + + fn parent_block_response(mut self) -> Self { + self.rig.expect_empty_network(); + self.bl.parent_lookup_response( + self.parent_block_req_id.expect("parent request id"), + self.peer_id, + self.parent_block.clone(), + D, + &mut self.cx, + ); + + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn parent_blob_response(mut self) -> Self { + for blob in &self.parent_blobs { + dbg!("sendingblob"); + self.bl.parent_lookup_blob_response( + self.parent_blob_req_id.expect("parent blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + } + dbg!("sending stream terminator"); + self.bl.parent_lookup_blob_response( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + + self + } + + fn block_response_triggering_process(self) -> Self { + let mut me = self.block_response(); + me.rig.expect_block_process(ResponseType::Block); + + // The request should still be active. + assert_eq!(me.bl.single_block_lookups.len(), 1); + me + } + + fn block_response(mut self) -> Self { + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + self.bl.single_block_lookup_response( + self.block_req_id.expect("block request id"), + self.peer_id, + self.block.clone(), + D, + &mut self.cx, + ); + self.rig.expect_empty_network(); + + // The request should still be active. + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn blobs_response(mut self) -> Self { + for blob in &self.blobs { + self.bl.single_blob_lookup_response( + self.blob_req_id.expect("blob request id"), + self.peer_id, + Some(blob.clone()), + D, + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + } + self.bl.single_blob_lookup_response( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + self + } + + fn blobs_response_was_valid(mut self) -> Self { + self.rig.expect_empty_network(); + if self.blobs.len() > 0 { + self.rig.expect_block_process(ResponseType::Blob); + } + self + } + + fn expect_empty_beacon_processor(mut self) -> Self { + self.rig.expect_empty_beacon_processor(); + self + } + + fn empty_block_response(mut self) -> Self { + self.bl.single_block_lookup_response( + self.block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + self + } + + fn empty_blobs_response(mut self) -> Self { + self.bl.single_blob_lookup_response( + self.blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + self + } + + fn empty_parent_block_response(mut self) -> Self { + self.bl.parent_lookup_response( + self.parent_block_req_id.expect("block request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + self + } + + fn empty_parent_blobs_response(mut self) -> Self { + self.bl.parent_lookup_blob_response( + self.parent_blob_req_id.expect("blob request id"), + self.peer_id, + None, + D, + &mut self.cx, + ); + self + } + + fn block_imported(mut self) -> Self { + // Missing blobs should be the request is not removed, the outstanding blobs request should + // mean we do not send a new request. + self.bl.single_block_component_processed( + self.block_req_id.expect("block request id"), + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + ResponseType::Block, + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.single_block_lookups.len(), 0); + self + } + + fn parent_block_imported(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + ResponseType::Block, + &mut self.cx, + ); + self.rig.expect_empty_network(); + assert_eq!(self.bl.parent_lookups.len(), 0); + self + } + + fn parent_block_unknown_parent(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ParentUnknown(BlockWrapper::Block( + self.parent_block.clone().expect("parent block"), + ))), + ResponseType::Block, + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_parent_processed(mut self) -> Self { + self.bl.parent_block_processed( + self.block_root, + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + ResponseType::Block, + &mut self.cx, + ); + assert_eq!(self.bl.parent_lookups.len(), 1); + self + } + + fn invalid_block_processed(mut self) -> Self { + self.bl.single_block_component_processed( + self.block_req_id.expect("block request id"), + BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + ResponseType::Block, + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn invalid_blob_processed(mut self) -> Self { + self.bl.single_block_component_processed( + self.blob_req_id.expect("blob request id"), + BlockProcessingResult::Err(BlockError::BlobValidation( + BlobError::ProposerSignatureInvalid, + )), + ResponseType::Blob, + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_block_request(mut self) -> Self { + self.bl.single_block_component_processed( + self.block_req_id.expect("block request id"), + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + ResponseType::Block, + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn missing_components_from_blob_request(mut self) -> Self { + self.bl.single_block_component_processed( + self.blob_req_id.expect("blob request id"), + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.slot, + self.block_root, + )), + ResponseType::Blob, + &mut self.cx, + ); + assert_eq!(self.bl.single_block_lookups.len(), 1); + self + } + + fn expect_penalty(mut self) -> Self { + self.rig.expect_penalty(); + self + } + fn expect_no_penalty(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_block_request(mut self) -> Self { + let id = self.rig.expect_block_request(ResponseType::Block); + self.block_req_id = Some(id); + self + } + fn expect_blobs_request(mut self) -> Self { + let id = self.rig.expect_block_request(ResponseType::Blob); + self.blob_req_id = Some(id); + self + } + fn expect_parent_block_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Block); + self.parent_block_req_id = Some(id); + self + } + fn expect_parent_blobs_request(mut self) -> Self { + let id = self.rig.expect_parent_request(ResponseType::Blob); + self.parent_blob_req_id = Some(id); + self + } + fn expect_no_blobs_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn expect_no_block_request(mut self) -> Self { + self.rig.expect_empty_network(); + self + } + fn invalidate_blobs_too_few(mut self) -> Self { + self.blobs.pop().expect("blobs"); + self + } + fn invalidate_blobs_too_many(mut self) -> Self { + let first_blob = self.blobs.get(0).expect("blob").clone(); + self.blobs.push(first_blob); + self + } + fn expect_parent_chain_process(mut self) -> Self { + self.rig.expect_parent_chain_process(); + self + } + fn expect_block_process(mut self) -> Self { + self.rig.expect_block_process(ResponseType::Block); + self + } + } + + fn get_fork_name() -> ForkName { + ForkName::from_str( + &std::env::var(beacon_chain::test_utils::FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + beacon_chain::test_utils::FORK_NAME_ENV_VAR, + e + ) + }), + ) + .unwrap() + } + + #[test] + fn single_block_and_blob_lookup_block_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .blobs_response() + .blobs_response_was_valid() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_blobs_returned_first_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .block_response_triggering_process() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_empty_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .empty_blobs_response() + .expect_empty_beacon_processor() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .block_response_triggering_process() + .missing_components_from_block_request(); + } + + #[test] + fn single_block_response_then_empty_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .empty_blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_blob_response_then_empty_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .missing_components_from_blob_request() + .empty_block_response() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request(); + } + + #[test] + fn single_invalid_block_response_then_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalid_block_processed() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .blobs_response() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_invalid_blob_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .blobs_response() + .invalid_blob_processed() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_few_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .invalidate_blobs_too_few() + .blobs_response() + .missing_components_from_blob_request() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_many_blobs_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .block_response_triggering_process() + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + #[test] + fn too_few_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_few() + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn too_many_blobs_response_then_block_response_attestation() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { + return; + }; + + tester + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn single_block_and_blob_lookup_block_returned_first_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .blobs_response() + .blobs_response_was_valid() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_blobs_returned_first_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .block_response_triggering_process() + .block_imported(); + } + + #[test] + fn single_block_and_blob_lookup_empty_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .empty_block_response() + .expect_block_request() + .expect_no_penalty() + .expect_no_blobs_request() + .empty_blobs_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .block_response_triggering_process() + .missing_components_from_block_request(); + } + + #[test] + fn single_block_response_then_empty_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .empty_blobs_response() + .missing_components_from_blob_request() + .expect_blobs_request() + .expect_no_penalty() + .expect_no_block_request(); + } + + #[test] + fn single_blob_response_then_empty_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .blobs_response() + .blobs_response_was_valid() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .missing_components_from_blob_request() + .empty_block_response() + .expect_block_request() + .expect_no_penalty() + .expect_no_blobs_request(); + } + + #[test] + fn single_invalid_block_response_then_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .invalid_block_processed() + .expect_penalty() + .expect_block_request() + .expect_no_blobs_request() + .blobs_response() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_invalid_blob_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .blobs_response() + .invalid_blob_processed() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_few_blobs_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .missing_components_from_block_request() + .invalidate_blobs_too_few() + .blobs_response() + .missing_components_from_blob_request() + .expect_blobs_request() + .expect_no_penalty() + .expect_no_block_request(); + } + + #[test] + fn single_block_response_then_too_many_blobs_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .block_response_triggering_process() + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request(); + } + #[test] + fn too_few_blobs_response_then_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .invalidate_blobs_too_few() + .blobs_response() + .blobs_response_was_valid() + .missing_components_from_blob_request() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .block_response_triggering_process() + .missing_components_from_block_request() + .expect_blobs_request(); + } + + #[test] + fn too_many_blobs_response_then_block_response_gossip() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { + return; + }; + + tester + .invalidate_blobs_too_many() + .blobs_response() + .expect_penalty() + .expect_blobs_request() + .expect_no_block_request() + .block_response_triggering_process(); + } + + #[test] + fn parent_block_unknown_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_invalid_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .blobs_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { + return; + }; + + tester + .blobs_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn parent_blob_unknown_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_unknown_parent() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_blob_invalid_parent() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_empty_beacon_processor() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .invalid_parent_processed() + .expect_penalty() + .expect_parent_block_request() + .expect_parent_blobs_request() + .expect_empty_beacon_processor(); + } + + #[test] + fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn parent_block_and_blob_lookup_child_returned_first_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .expect_no_penalty() + .expect_no_block_request() + .expect_no_blobs_request() + .parent_block_response() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_block_then_parent_blob_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .empty_parent_block_response() + .expect_penalty() + .expect_parent_block_request() + .expect_no_blobs_request() + .parent_blob_response() + .expect_empty_beacon_processor() + .parent_block_response() + .expect_block_process() + .parent_block_imported() + .block_response() + .expect_parent_chain_process(); + } + + #[test] + fn empty_parent_blobs_then_parent_block_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob) else { + return; + }; + + tester + .block_response() + .empty_parent_blobs_response() + .expect_no_penalty() + .expect_no_blobs_request() + .expect_no_block_request() + .parent_block_response() + .expect_penalty() + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_imported() + .expect_parent_chain_process(); + } +} diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index e6c5549cc9e..7e5362a6f0d 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,4 +1,5 @@ use beacon_chain::blob_verification::BlockWrapper; +use ssz_types::FixedVector; use std::{collections::VecDeque, sync::Arc}; use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; @@ -55,7 +56,22 @@ impl BlocksAndBlobsRequestInfo { if blob_list.is_empty() { responses.push(BlockWrapper::Block(block)) } else { - responses.push(BlockWrapper::BlockAndBlobs(block, blob_list)) + let mut blobs_fixed = vec![None; T::max_blobs_per_block()]; + for blob in blob_list { + let blob_index = blob.index as usize; + let Some(blob_opt) = blobs_fixed.get_mut(blob_index) else { + return Err("Invalid blob index"); + }; + if blob_opt.is_some() { + return Err("Repeat blob index"); + } else { + *blob_opt = Some(blob); + } + } + responses.push(BlockWrapper::BlockAndBlobs( + block, + FixedVector::from(blobs_fixed), + )) } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 482bfab7083..9f9397f9c7c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,17 +34,24 @@ //! search for the block and subsequently search for parents if needed. use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; -use super::block_lookups::BlockLookups; +use super::block_lookups::{BlockLookups, PeerShouldHave}; use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; +use crate::sync::block_lookups::delayed_lookup; +use crate::sync::block_lookups::delayed_lookup::DelayedLookupMessage; +pub use crate::sync::block_lookups::ResponseType; +use crate::sync::block_lookups::UnknownParentComponents; use crate::sync::range_sync::ByRangeRequestType; use beacon_chain::blob_verification::AsBlock; use beacon_chain::blob_verification::BlockWrapper; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; +use beacon_chain::{ + AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, +}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::rpc::RPCError; @@ -52,12 +59,14 @@ use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; use slog::{crit, debug, error, info, trace, warn, Logger}; +use slot_clock::SlotClock; use std::boxed::Box; +use std::ops::IndexMut; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::blob_sidecar::BlobIdentifier; +use types::blob_sidecar::FixedBlobSidecarList; use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -68,6 +77,9 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// gossip if no peers are further than this range ahead of us that we have not already downloaded /// blocks for. pub const SLOT_IMPORT_TOLERANCE: usize = 32; +/// The maximum number of messages the delay queue can handle in a single slot before messages are +/// dropped. +pub const DELAY_QUEUE_CHANNEL_SIZE: usize = 128; pub type Id = u32; @@ -81,11 +93,11 @@ pub enum RequestId { /// Request was from the backfill sync algorithm. BackFillBlocks { id: Id }, /// Backfill request that is composed by both a block range request and a blob range request. - BackFillBlobs { id: Id }, + BackFillBlockAndBlobs { id: Id }, /// The request was from a chain in the range sync algorithm. RangeBlocks { id: Id }, /// Range request that is composed by both a block range request and a blob range request. - RangeBlobs { id: Id }, + RangeBlockAndBlobs { id: Id }, } // TODO(diva) I'm updating functions what at a time, but this should be revisited because I think @@ -115,18 +127,24 @@ pub enum SyncMessage { }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, BlockWrapper, Hash256), + UnknownParentBlock(PeerId, BlockWrapper, Hash256), - /// A peer has sent an object that references a block that is unknown. This triggers the + /// A blob with an unknown parent has been received. + UnknownParentBlob(PeerId, Arc>), + + /// A peer has sent an attestation that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. - UnknownBlockHash(PeerId, Hash256), + UnknownBlockHashFromAttestation(PeerId, Hash256), - /// A peer has sent us a block that we haven't received all the blobs for. This triggers - /// the manager to attempt to find the pending blobs for the given block root. - UnknownBlobHash { - peer_id: PeerId, - pending_blobs: Vec, - }, + /// A peer has sent a blob that references a block that is unknown or a peer has sent a block for + /// which we haven't received blobs. + /// + /// We will either attempt to find the block matching the unknown hash immediately or queue a lookup, + /// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`. + MissingGossipBlockComponents(Slot, PeerId, Hash256), + + /// This message triggers a request for missing block components after a delay. + MissingGossipBlockComponentsDelayed(Hash256), /// A peer has disconnected. Disconnect(PeerId), @@ -145,9 +163,10 @@ pub enum SyncMessage { }, /// Block processed - BlockProcessed { + BlockComponentProcessed { process_type: BlockProcessType, - result: BlockProcessResult, + result: BlockProcessingResult, + response_type: ResponseType, }, } @@ -159,8 +178,8 @@ pub enum BlockProcessType { } #[derive(Debug)] -pub enum BlockProcessResult { - Ok, +pub enum BlockProcessingResult { + Ok(AvailabilityProcessingStatus), Err(BlockError), Ignored, } @@ -205,6 +224,8 @@ pub struct SyncManager { block_lookups: BlockLookups, + delayed_lookups: mpsc::Sender, + /// The logger for the import manager. log: Logger, } @@ -226,6 +247,8 @@ pub fn spawn( ); // generate the message channel let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); + let (delayed_lookups_send, delayed_lookups_recv) = + mpsc::channel::(DELAY_QUEUE_CHANNEL_SIZE); // create an instance of the SyncManager let mut sync_manager = SyncManager { @@ -240,15 +263,29 @@ pub fn spawn( log.clone(), ), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), - backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), - block_lookups: BlockLookups::new(log.clone()), + backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()), + block_lookups: BlockLookups::new( + beacon_chain.data_availability_checker.clone(), + log.clone(), + ), + delayed_lookups: delayed_lookups_send, log: log.clone(), }; + let log_clone = log.clone(); + let sync_send_clone = sync_send.clone(); + delayed_lookup::spawn_delayed_lookup_service( + &executor, + beacon_chain, + delayed_lookups_recv, + sync_send, + log, + ); + // spawn the sync manager thread - debug!(log, "Sync Manager started"); + debug!(log_clone, "Sync Manager started"); executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); - sync_send + sync_send_clone } impl SyncManager { @@ -291,8 +328,12 @@ impl SyncManager { trace!(self.log, "Sync manager received a failed RPC"); match request_id { RequestId::SingleBlock { id } => { - self.block_lookups - .single_block_lookup_failed(id, &mut self.network); + self.block_lookups.single_block_lookup_failed( + id, + &peer_id, + &mut self.network, + error, + ); } RequestId::ParentLookup { id } => { self.block_lookups @@ -313,7 +354,7 @@ impl SyncManager { } } - RequestId::BackFillBlobs { id } => { + RequestId::BackFillBlockAndBlobs { id } => { if let Some(batch_id) = self .network .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) @@ -342,7 +383,7 @@ impl SyncManager { self.update_sync_state() } } - RequestId::RangeBlobs { id } => { + RequestId::RangeBlockAndBlobs { id } => { if let Some((chain_id, batch_id)) = self .network .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) @@ -567,48 +608,83 @@ impl SyncManager { beacon_block, seen_timestamp, } => { - self.rpc_block_or_blob_received( - request_id, + self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); + } + SyncMessage::RpcBlob { + request_id, + peer_id, + blob_sidecar, + seen_timestamp, + } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { + let block_slot = block.slot(); + let (block, blobs) = block.deconstruct(); + let parent_root = block.parent_root(); + let parent_components = UnknownParentComponents::new(Some(block), blobs); + self.handle_unknown_parent( peer_id, - beacon_block.into(), - seen_timestamp, + block_root, + parent_root, + block_slot, + Some(parent_components), ); } - SyncMessage::UnknownBlock(peer_id, block, block_root) => { - // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self.chain.canonical_head.cached_head().head_slot(); - let unknown_block_slot = block.slot(); - - // if the block is far in the future, ignore it. If its within the slot tolerance of - // our current head, regardless of the syncing state, fetch it. - if (head_slot >= unknown_block_slot - && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - || (head_slot < unknown_block_slot - && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) - { - return; - } - } - if self.network_globals.peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { - self.block_lookups - .search_parent(block_root, block, peer_id, &mut self.network); + SyncMessage::UnknownParentBlob(peer_id, blob) => { + let blob_slot = blob.slot; + let block_root = blob.block_root; + let parent_root = blob.block_parent_root; + let blob_index = blob.index; + let mut blobs = FixedBlobSidecarList::default(); + *blobs.index_mut(blob_index as usize) = Some(blob); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + blob_slot, + Some(UnknownParentComponents::new(None, Some(blobs))), + ); + } + SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => { + // If we are not synced, ignore this block. + if self.synced_and_connected(&peer_id) { + self.block_lookups.search_block( + block_hash, + PeerShouldHave::BlockAndBlobs(peer_id), + &mut self.network, + ); } } - SyncMessage::UnknownBlockHash(peer_id, block_hash) => { + SyncMessage::MissingGossipBlockComponents(slot, peer_id, block_root) => { // If we are not synced, ignore this block. - if self.network_globals.sync_state.read().is_synced() - && self.network_globals.peers.read().is_connected(&peer_id) - && self.network.is_execution_engine_online() - { - self.block_lookups - .search_block(block_hash, peer_id, &mut self.network); + if self.synced_and_connected(&peer_id) { + if self.should_delay_lookup(slot) { + self.block_lookups + .search_block_delayed(block_root, PeerShouldHave::Neither(peer_id)); + if let Err(e) = self + .delayed_lookups + .try_send(DelayedLookupMessage::MissingComponents(block_root)) + { + warn!(self.log, "Delayed lookup dropped for block referenced by a blob"; + "block_root" => ?block_root, "error" => ?e); + } + } else { + self.block_lookups.search_block( + block_root, + PeerShouldHave::Neither(peer_id), + &mut self.network, + ) + } } } - SyncMessage::UnknownBlobHash { .. } => { - unimplemented!() + SyncMessage::MissingGossipBlockComponentsDelayed(block_root) => { + if self + .block_lookups + .trigger_lookup_by_root(block_root, &mut self.network) + .is_err() + { + // No request was made for block or blob so the lookup is dropped. + self.block_lookups.remove_lookup_by_root(block_root); + } } SyncMessage::Disconnect(peer_id) => { self.peer_disconnect(&peer_id); @@ -618,17 +694,17 @@ impl SyncManager { request_id, error, } => self.inject_error(peer_id, request_id, error), - SyncMessage::BlockProcessed { + SyncMessage::BlockComponentProcessed { process_type, result, + response_type, } => match process_type { - BlockProcessType::SingleBlock { id } => { - self.block_lookups - .single_block_processed(id, result, &mut self.network) - } + BlockProcessType::SingleBlock { id } => self + .block_lookups + .single_block_component_processed(id, result, response_type, &mut self.network), BlockProcessType::ParentLookup { chain_hash } => self .block_lookups - .parent_block_processed(chain_hash, result, &mut self.network), + .parent_block_processed(chain_hash, result, response_type, &mut self.network), }, SyncMessage::BatchProcessed { sync_type, result } => match sync_type { ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { @@ -659,18 +735,95 @@ impl SyncManager { .block_lookups .parent_chain_processed(chain_hash, result, &mut self.network), }, - SyncMessage::RpcBlob { - request_id, - peer_id, - blob_sidecar, - seen_timestamp, - } => self.rpc_block_or_blob_received( - request_id, + } + } + + fn handle_unknown_parent( + &mut self, + peer_id: PeerId, + block_root: Hash256, + parent_root: Hash256, + slot: Slot, + parent_components: Option>, + ) { + if self.should_search_for_block(slot, &peer_id) { + self.block_lookups.search_parent( + slot, + block_root, + parent_root, peer_id, - blob_sidecar.into(), - seen_timestamp, - ), + &mut self.network, + ); + if self.should_delay_lookup(slot) { + self.block_lookups.search_child_delayed( + block_root, + parent_components, + &[PeerShouldHave::Neither(peer_id)], + ); + if let Err(e) = self + .delayed_lookups + .try_send(DelayedLookupMessage::MissingComponents(block_root)) + { + warn!(self.log, "Delayed lookups dropped for block"; "block_root" => ?block_root, "error" => ?e); + } + } else { + self.block_lookups.search_child_block( + block_root, + parent_components, + &[PeerShouldHave::Neither(peer_id)], + &mut self.network, + ); + } + } + } + + fn should_delay_lookup(&mut self, slot: Slot) -> bool { + let earliest_slot = self + .chain + .slot_clock + .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY); + let latest_slot = self + .chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY); + if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) { + let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot; + let delay_threshold_unmet = self + .chain + .slot_clock + .seconds_from_current_slot_start() + .map_or(false, |secs_into_slot| { + secs_into_slot < self.chain.slot_clock.single_lookup_delay() + }); + msg_for_current_slot && delay_threshold_unmet + } else { + false + } + } + + fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool { + if !self.network_globals.sync_state.read().is_synced() { + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= block_slot + && head_slot.sub(block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < block_slot + && block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return false; + } } + + self.network_globals.peers.read().is_connected(peer_id) + && self.network.is_execution_engine_online() + } + + fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool { + self.network_globals.sync_state.read().is_synced() + && self.network_globals.peers.read().is_connected(peer_id) + && self.network.is_execution_engine_online() } fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { @@ -728,50 +881,30 @@ impl SyncManager { } } - fn rpc_block_or_blob_received( + fn rpc_block_received( &mut self, request_id: RequestId, peer_id: PeerId, - block_or_blob: BlockOrBlob, + block: Option>>, seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } => { - // TODO(diva) adjust when dealing with by root requests. This code is here to - // satisfy dead code analysis - match block_or_blob { - BlockOrBlob::Block(maybe_block) => { - self.block_lookups.single_block_lookup_response( - id, - peer_id, - maybe_block.map(BlockWrapper::Block), - seen_timestamp, - &mut self.network, - ) - } - BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."), - } - } - RequestId::ParentLookup { id } => { - // TODO(diva) adjust when dealing with by root requests. This code is here to - // satisfy dead code analysis - match block_or_blob { - BlockOrBlob::Block(maybe_block) => self.block_lookups.parent_lookup_response( - id, - peer_id, - maybe_block.map(BlockWrapper::Block), - seen_timestamp, - &mut self.network, - ), - BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."), - } - } + RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response( + id, + peer_id, + block, + seen_timestamp, + &mut self.network, + ), + RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response( + id, + peer_id, + block, + seen_timestamp, + &mut self.network, + ), RequestId::BackFillBlocks { id } => { - let maybe_block = match block_or_blob { - BlockOrBlob::Block(maybe_block) => maybe_block, - BlockOrBlob::Sidecar(_) => todo!("I think this is unreachable"), - }; - let is_stream_terminator = maybe_block.is_none(); + let is_stream_terminator = block.is_none(); if let Some(batch_id) = self .network .backfill_sync_only_blocks_response(id, is_stream_terminator) @@ -781,7 +914,7 @@ impl SyncManager { batch_id, &peer_id, id, - maybe_block.map(|block| block.into()), + block.map(BlockWrapper::Block), ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -794,14 +927,10 @@ impl SyncManager { } } RequestId::RangeBlocks { id } => { - let maybe_block = match block_or_blob { - BlockOrBlob::Block(maybe_block) => maybe_block, - BlockOrBlob::Sidecar(_) => todo!("I think this should be unreachable, since this is a range only-blocks request, and the network should not accept this chunk at all. Needs better handling"), - }; - let is_stream_terminator = maybe_block.is_none(); + let is_stream_terminator = block.is_none(); if let Some((chain_id, batch_id)) = self .network - .range_sync_block_response(id, is_stream_terminator) + .range_sync_block_only_response(id, is_stream_terminator) { self.range_sync.blocks_by_range_response( &mut self.network, @@ -809,17 +938,53 @@ impl SyncManager { chain_id, batch_id, id, - maybe_block.map(|block| block.into()), + block.map(BlockWrapper::Block), ); self.update_sync_state(); } } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, block.into()) + } + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, block.into()) + } + } + } - RequestId::BackFillBlobs { id } => { - self.backfill_block_and_blobs_response(id, peer_id, block_or_blob) + fn rpc_blob_received( + &mut self, + request_id: RequestId, + peer_id: PeerId, + blob: Option>>, + seen_timestamp: Duration, + ) { + match request_id { + RequestId::SingleBlock { id } => self.block_lookups.single_blob_lookup_response( + id, + peer_id, + blob, + seen_timestamp, + &mut self.network, + ), + RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_blob_response( + id, + peer_id, + blob, + seen_timestamp, + &mut self.network, + ), + RequestId::BackFillBlocks { id: _ } => { + crit!(self.log, "Blob received during backfill block request"; "peer_id" => %peer_id ); + } + RequestId::RangeBlocks { id: _ } => { + crit!(self.log, "Blob received during range block request"; "peer_id" => %peer_id ); + } + RequestId::BackFillBlockAndBlobs { id } => { + self.backfill_block_and_blobs_response(id, peer_id, blob.into()) } - RequestId::RangeBlobs { id } => { - self.range_block_and_blobs_response(id, peer_id, block_or_blob) + RequestId::RangeBlockAndBlobs { id } => { + self.range_block_and_blobs_response(id, peer_id, blob.into()) } } } @@ -863,7 +1028,7 @@ impl SyncManager { "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e ); // TODO: penalize the peer for being a bad boy - let id = RequestId::RangeBlobs { id }; + let id = RequestId::RangeBlockAndBlobs { id }; self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } @@ -915,7 +1080,7 @@ impl SyncManager { "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e ); // TODO: penalize the peer for being a bad boy - let id = RequestId::BackFillBlobs { id }; + let id = RequestId::BackFillBlockAndBlobs { id }; self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) } } @@ -923,17 +1088,19 @@ impl SyncManager { } } -impl From>> for BlockProcessResult { - fn from(result: Result>) -> Self { +impl From>> + for BlockProcessingResult +{ + fn from(result: Result>) -> Self { match result { - Ok(_) => BlockProcessResult::Ok, - Err(e) => e.into(), + Ok(status) => BlockProcessingResult::Ok(status), + Err(e) => BlockProcessingResult::Err(e), } } } -impl From> for BlockProcessResult { +impl From> for BlockProcessingResult { fn from(e: BlockError) -> Self { - BlockProcessResult::Err(e) + BlockProcessingResult::Err(e) } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 7b244bceceb..1dd33bd31c8 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,5 +9,6 @@ mod network_context; mod peer_sync_info; mod range_sync; +pub use block_lookups::UnknownParentComponents; pub use manager::{BatchProcessResult, SyncMessage}; pub use range_sync::{BatchOperationOutcome, ChainId}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index ced6aeb52ed..c969feb2752 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -7,11 +7,11 @@ use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; -use crate::sync::block_lookups::ForceBlockRequest; +use crate::sync::block_lookups::{BlobRequestId, BlockRequestId}; use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; @@ -62,7 +62,7 @@ pub struct SyncNetworkContext { /// Channel to send work to the beacon processor. beacon_processor_send: mpsc::Sender>, - chain: Arc>, + pub chain: Arc>, /// Logger for the `SyncNetworkContext`. log: slog::Logger, @@ -71,7 +71,7 @@ pub struct SyncNetworkContext { /// Small enumeration to make dealing with block and blob requests easier. pub enum BlockOrBlob { Block(Option>>), - Sidecar(Option>>), + Blob(Option>>), } impl From>>> for BlockOrBlob { @@ -82,7 +82,7 @@ impl From>>> for BlockOrBlob { impl From>>> for BlockOrBlob { fn from(blob: Option>>) -> Self { - BlockOrBlob::Sidecar(blob) + BlockOrBlob::Blob(blob) } } @@ -187,7 +187,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::RangeBlobs { id }); + let request_id = RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -260,7 +260,7 @@ impl SyncNetworkContext { // create the shared request id. This is fine since the rpc handles substream ids. let id = self.next_id(); - let request_id = RequestId::Sync(SyncRequestId::BackFillBlobs { id }); + let request_id = RequestId::Sync(SyncRequestId::BackFillBlockAndBlobs { id }); // Create the blob request based on the blob request. let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { @@ -289,7 +289,7 @@ impl SyncNetworkContext { } /// Response for a request that is only for blocks. - pub fn range_sync_block_response( + pub fn range_sync_block_only_response( &mut self, request_id: Id, is_stream_terminator: bool, @@ -313,7 +313,7 @@ impl SyncNetworkContext { let info = &mut req.block_blob_info; match block_or_blob { BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { // If the request is finished, dequeue everything @@ -390,7 +390,7 @@ impl SyncNetworkContext { let (_, info) = entry.get_mut(); match block_or_blob { BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), } if info.is_finished() { // If the request is finished, dequeue everything @@ -409,83 +409,101 @@ impl SyncNetworkContext { } } - /// Sends a blocks by root request for a single block lookup. + /// Sends a blocks by root request for a parent request. pub fn single_block_lookup_request( &mut self, peer_id: PeerId, request: BlocksByRootRequest, ) -> Result { - let request = if self - .chain - .is_data_availability_check_required() - .map_err(|_| "Unable to read slot clock")? - { - trace!( - self.log, - "Sending BlobsByRoot Request"; - "method" => "BlobsByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - unimplemented!("There is no longer such thing as a single block lookup, since we nede to ask for blobs and blocks separetely"); - } else { - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - Request::BlocksByRoot(request) - }; let id = self.next_id(); let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id }); + + trace!( + self.log, + "Sending BlocksByRoot Request"; + "method" => "BlocksByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRoot(request), + request_id, + })?; + Ok(id) + } + + /// Sends a blobs by root request for a parent request. + pub fn single_blobs_lookup_request( + &mut self, + peer_id: PeerId, + request: BlobsByRootRequest, + ) -> Result { + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id }); + + trace!( + self.log, + "Sending BlobsByRoot Request"; + "method" => "BlobsByRoot", + "count" => request.blob_ids.len(), + "peer" => %peer_id + ); + self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request, + request: Request::BlobsByRoot(request), request_id, })?; Ok(id) } /// Sends a blocks by root request for a parent request. - pub fn parent_lookup_request( + pub fn parent_lookup_block_request( &mut self, peer_id: PeerId, request: BlocksByRootRequest, - force_block_request: ForceBlockRequest, - ) -> Result { - let request = if self - .chain - .is_data_availability_check_required() - .map_err(|_| "Unable to read slot clock")? - && matches!(force_block_request, ForceBlockRequest::False) - { - trace!( - self.log, - "Sending BlobsByRoot Request"; - "method" => "BlobsByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - unimplemented!( - "Parent requests now need to interleave blocks and blobs or something like that." - ) - } else { - trace!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "count" => request.block_roots.len(), - "peer" => %peer_id - ); - Request::BlocksByRoot(request) - }; + ) -> Result { let id = self.next_id(); let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id }); + + trace!( + self.log, + "Sending parent BlocksByRoot Request"; + "method" => "BlocksByRoot", + "count" => request.block_roots.len(), + "peer" => %peer_id + ); + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRoot(request), + request_id, + })?; + Ok(id) + } + + /// Sends a blocks by root request for a parent request. + pub fn parent_lookup_blobs_request( + &mut self, + peer_id: PeerId, + request: BlobsByRootRequest, + ) -> Result { + let id = self.next_id(); + let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id }); + + trace!( + self.log, + "Sending parent BlobsByRoot Request"; + "method" => "BlobsByRoot", + "count" => request.blob_ids.len(), + "peer" => %peer_id + ); + self.send_network_msg(NetworkMessage::SendRequest { peer_id, - request, + request: Request::BlobsByRoot(request), request_id, })?; Ok(id) diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9afbed96a62..58f137dfe58 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -685,7 +685,7 @@ mod tests { range.add_peer(&mut rig.cx, local_info, peer1, head_info); let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { - (rig.cx.range_sync_block_response(id, true).unwrap(), id) + (rig.cx.range_sync_block_only_response(id, true).unwrap(), id) } other => panic!("unexpected request {:?}", other), }; @@ -704,7 +704,7 @@ mod tests { range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { - (rig.cx.range_sync_block_response(id, true).unwrap(), id) + (rig.cx.range_sync_block_only_response(id, true).unwrap(), id) } other => panic!("unexpected request {:?}", other), }; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0809ca560c1..74d8e6fa187 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1952,7 +1952,7 @@ impl, Cold: ItemStore> HotColdDB && last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune > end_epoch.as_u64() { - info!(self.log, "Blobs sidecars are pruned"); + debug!(self.log, "Blobs sidecars are pruned"); return Ok(()); } diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 8f7bbc1b783..6bf74645000 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -137,4 +137,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone { slot_clock.set_current_time(freeze_at); slot_clock } + + /// Returns the delay between the start of the slot and when a request for block components + /// missed over gossip in the current slot should be made via RPC. + /// + /// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be + /// changed in the future. + fn single_lookup_delay(&self) -> Duration { + self.unagg_attestation_production_delay() / 2 + } } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index b78e486d512..19415f373bc 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -352,7 +352,7 @@ where spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. - if anchor_block.slot() % E::slots_per_epoch() != 0 { + if anchor_state.slot() % E::slots_per_epoch() != 0 { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), @@ -388,6 +388,7 @@ where let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let proto_array = ProtoArrayForkChoice::new::( + current_slot, finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 157f072ad37..98d43e4850c 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new::( + self.finalized_block_slot, self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index fe831b3c357..5911e50fcdc 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] pub fn new( + current_slot: Slot, finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -380,7 +381,7 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::(block, finalized_block_slot) + .on_block::(block, current_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -983,6 +984,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, genesis_slot, state_root, genesis_checkpoint, @@ -1108,6 +1110,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::( + genesis_slot, genesis_slot, junk_state_root, genesis_checkpoint, diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 75b8c6b0061..e89a78c4d84 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -23,7 +23,7 @@ impl From for Error { /// /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. -/// Providing the `state_root` makes this function several orders of magniude faster. +/// Providing the `state_root` makes this function several orders of magnitude faster. pub fn per_slot_processing( state: &mut BeaconState, state_root: Option, diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index dbc250de51b..2d25de4032b 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -6,13 +6,15 @@ use kzg::{KzgCommitment, KzgProof}; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; +use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// Container of the data that identifies an individual blob. -#[derive(Serialize, Deserialize, Encode, Decode, TreeHash, Clone, Debug, PartialEq, Eq, Hash)] +#[derive( + Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, +)] pub struct BlobIdentifier { pub block_root: Hash256, pub index: u64, @@ -73,6 +75,8 @@ impl Ord for BlobSidecar { } pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; +pub type FixedBlobSidecarList = + FixedVector>>, ::MaxBlobsPerBlock>; pub type Blobs = VariableList, ::MaxBlobsPerBlock>; impl SignedRoot for BlobSidecar {} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 096c8ef1cf3..66fb9047bbe 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -105,7 +105,7 @@ pub trait EthSpec: /* * New in Deneb */ - type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* @@ -255,6 +255,11 @@ pub trait EthSpec: fn max_blobs_per_block() -> usize { Self::MaxBlobsPerBlock::to_usize() } + + /// Returns the `BYTES_PER_BLOB` constant for this specification. + fn bytes_per_blob() -> usize { + Self::BytesPerBlob::to_usize() + } } /// Macro to inherit some type values from another EthSpec. diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 23a254079d2..cf7fd6819ea 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,3 +1,4 @@ +use crate::blob_sidecar::BlobIdentifier; use crate::*; use bls::Signature; use derivative::Derivative; @@ -248,6 +249,38 @@ impl> SignedBeaconBlock pub fn canonical_root(&self) -> Hash256 { self.message().tree_hash_root() } + + pub fn num_expected_blobs(&self) -> usize { + self.message() + .body() + .blob_kzg_commitments() + .map(|c| c.len()) + .unwrap_or(0) + } + + pub fn get_expected_blob_ids(&self, block_root: Option) -> Vec { + self.get_filtered_blob_ids(block_root, |_, _| true) + } + + /// If the filter returns `true` the id for the corresponding index and root will be included. + pub fn get_filtered_blob_ids( + &self, + block_root: Option, + filter: impl Fn(usize, Hash256) -> bool, + ) -> Vec { + let block_root = block_root.unwrap_or_else(|| self.canonical_root()); + let num_blobs_expected = self.num_expected_blobs(); + let mut blob_ids = Vec::with_capacity(num_blobs_expected); + for i in 0..num_blobs_expected { + if filter(i, block_root) { + blob_ids.push(BlobIdentifier { + block_root, + index: i as u64, + }); + } + } + blob_ids + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 5cb4e7d521f..3992421e375 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -4,8 +4,21 @@ use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); + + let non_zero_bytes = raw_bytes + .iter() + .enumerate() + .rev() + .find_map(|(i, byte)| (*byte > 0).then_some(i + 1)) + .unwrap_or(0); + + if non_zero_bytes < initial_len { + raw_bytes.truncate(non_zero_bytes); + } + Self::from_bytes(raw_bytes).expect("we generate a valid BitList") } }