From 074c5eb09364621019b00d2e01668cecaf03da4b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 30 Nov 2021 15:36:51 +1100 Subject: [PATCH 01/92] Add invalid propagation --- consensus/proto_array/src/error.rs | 1 + consensus/proto_array/src/proto_array.rs | 56 ++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index adb10c035d6..a5802d7b05f 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -4,6 +4,7 @@ use types::{Checkpoint, Epoch, Hash256}; pub enum Error { FinalizedNodeUnknown(Hash256), JustifiedNodeUnknown(Hash256), + NodeUnknown(Hash256), InvalidFinalizedRootChange, InvalidNodeIndex(usize), InvalidParentIndex(usize), diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 465ef9d4fc7..cd3473b60e5 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -4,7 +4,7 @@ use serde_derive::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -250,14 +250,14 @@ impl ProtoArray { self.maybe_update_best_child_and_descendant(parent_index, node_index)?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { - self.propagate_execution_payload_verification(parent_index)?; + self.propagate_execution_payload_validation(parent_index)?; } } Ok(()) } - pub fn propagate_execution_payload_verification( + pub fn propagate_execution_payload_validation( &mut self, verified_node_index: usize, ) -> Result<(), Error> { @@ -288,6 +288,8 @@ impl ProtoArray { } // An ancestor of the valid payload was invalid. This is a serious error which // indicates a consensus failure in the execution node. This is unrecoverable. + // + // TODO(paul): relax this? ExecutionStatus::Invalid(ancestor_payload_block_hash) => { return Err(Error::InvalidAncestorOfValidPayload { ancestor_block_root: node.root, @@ -300,6 +302,54 @@ impl ProtoArray { } } + pub fn propagate_execution_payload_invalidation( + &mut self, + invalid_root: Hash256, + latest_valid_ancestor_root: Hash256, + ) -> Result, Error> { + let mut invalidated_roots = HashSet::default(); + let mut index = *self + .indices + .get(&invalid_root) + .ok_or(Error::NodeUnknown(invalid_root))?; + + loop { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if node.root == latest_valid_ancestor_root { + // It might be new knowledge that this block is valid, ensure that it and all + // ancestors are marked as valid. + self.propagate_execution_payload_validation(index)?; + break; + } + + match &node.execution_status { + // TODO(paul): valid->invalid shouldn't happen.. + ExecutionStatus::Valid(hash) | ExecutionStatus::Unknown(hash) => { + invalidated_roots.insert(node.root); + node.execution_status = ExecutionStatus::Invalid(*hash) + } + // The block is already invalid, but keep going backwards to ensure all ancestors + // are updated. + ExecutionStatus::Invalid(_) => (), + // This block is pre-merge, therefore it has no execution status. Nor does its + // ancestors. + ExecutionStatus::Irrelevant(_) => break, + } + + if let Some(parent_index) = node.parent { + index = parent_index + } else { + break; + } + } + + Ok(invalidated_roots) + } + /// Follows the best-descendant links to find the best-block (i.e., head-block). /// /// ## Notes From a75d7b88add661e12b678a98661c951e699c5eb1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 30 Nov 2021 15:54:00 +1100 Subject: [PATCH 02/92] Adjust deltas for invalid blocks --- consensus/proto_array/src/error.rs | 1 + consensus/proto_array/src/proto_array.rs | 15 +++++++++++---- .../proto_array/src/proto_array_fork_choice.rs | 4 ++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index a5802d7b05f..2b18ae6f1e2 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -16,6 +16,7 @@ pub enum Error { DeltaOverflow(usize), ProposerBoostOverflow(usize), IndexOverflow(&'static str), + InvalidExecutionDeltaOverflow(usize), InvalidDeltaLen { deltas: usize, indices: usize, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index cd3473b60e5..6a8ac5e7636 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -126,10 +126,17 @@ impl ProtoArray { continue; } - let mut node_delta = deltas - .get(node_index) - .copied() - .ok_or(Error::InvalidNodeDelta(node_index))?; + let mut node_delta = if node.execution_status.is_invalid() { + // If the node has an invalid execution payload, reduce its weight to zero. + 0_i64 + .checked_sub(node.weight as i64) + .ok_or(Error::InvalidExecutionDeltaOverflow(node_index))? + } else { + deltas + .get(node_index) + .copied() + .ok_or(Error::InvalidNodeDelta(node_index))? + }; // If we find the node for which the proposer boost was previously applied, decrease // the delta by the previous score amount. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 891eafabe9a..0ea066ddcc0 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -49,6 +49,10 @@ impl ExecutionStatus { ExecutionStatus::Irrelevant(_) => None, } } + + pub fn is_invalid(&self) -> bool { + matches!(self, ExecutionStatus::Invalid(_)) + } } /// A block that is to be applied to the fork choice. From 5add0134a4549af87247ea7372db1eae3401a352 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 30 Nov 2021 16:21:09 +1100 Subject: [PATCH 03/92] Avoid invalid blocks when finding the head --- consensus/proto_array/src/error.rs | 3 +++ consensus/proto_array/src/proto_array.rs | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 2b18ae6f1e2..fe38dbc2c9d 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -30,6 +30,9 @@ pub enum Error { ancestor_block_root: Hash256, ancestor_payload_block_hash: Hash256, }, + InvalidJustifiedCheckpointExecutionStatus { + justified_root: Hash256, + }, } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6a8ac5e7636..f1f40b9f130 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -377,6 +377,19 @@ impl ProtoArray { .get(justified_index) .ok_or(Error::InvalidJustifiedIndex(justified_index))?; + // Since there are no valid descendants of a justified block with an invalid execution + // payload, there would be no head to choose from. + // + // Fork choice is effectively broken until a new justified root is set. It might not be + // practically possible to set a new justified root if we are unable to find a new head. + // + // This scenario is *unsupported*. It represents a serious consensus failure. + if justified_node.execution_status.is_invalid() { + return Err(Error::InvalidJustifiedCheckpointExecutionStatus { + justified_root: *justified_root, + }); + } + let best_descendant_index = justified_node.best_descendant.unwrap_or(justified_index); let best_node = self @@ -594,6 +607,10 @@ impl ProtoArray { /// Any node that has a different finalized or justified epoch should not be viable for the /// head. fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + if node.execution_status.is_invalid() { + return false; + } + if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) { From 4a224824c156558e8b1fe82c1f7cf12c5f53a21b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 30 Nov 2021 16:44:30 +1100 Subject: [PATCH 04/92] Invalidate descendants of invalid blocks too --- consensus/proto_array/src/error.rs | 4 ++ consensus/proto_array/src/proto_array.rs | 51 +++++++++++++++++++++--- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index fe38dbc2c9d..185fbf9ce89 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -30,6 +30,10 @@ pub enum Error { ancestor_block_root: Hash256, ancestor_payload_block_hash: Hash256, }, + ValidExecutionStatusBecameInvalid { + block_root: Hash256, + payload_block_hash: Hash256, + }, InvalidJustifiedCheckpointExecutionStatus { justified_root: Hash256, }, diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f1f40b9f130..8a005d26ac6 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -313,13 +313,16 @@ impl ProtoArray { &mut self, invalid_root: Hash256, latest_valid_ancestor_root: Hash256, - ) -> Result, Error> { - let mut invalidated_roots = HashSet::default(); + ) -> Result<(), Error> { + let mut invalidated_indices: HashSet = <_>::default(); let mut index = *self .indices .get(&invalid_root) .ok_or(Error::NodeUnknown(invalid_root))?; + let first_potential_descendant = index + 1; + // Collect all *ancestors* which were declared invalid since they reside between the + // `invalid_root` and the `latest_valid_ancestor_root`. loop { let node = self .nodes @@ -334,9 +337,15 @@ impl ProtoArray { } match &node.execution_status { - // TODO(paul): valid->invalid shouldn't happen.. - ExecutionStatus::Valid(hash) | ExecutionStatus::Unknown(hash) => { - invalidated_roots.insert(node.root); + // It's illegal for an execution client to declare that some previously-valid block + // is now invalid. This is a consensus failure on their behalf. + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) => { node.execution_status = ExecutionStatus::Invalid(*hash) } // The block is already invalid, but keep going backwards to ensure all ancestors @@ -347,6 +356,8 @@ impl ProtoArray { ExecutionStatus::Irrelevant(_) => break, } + invalidated_indices.insert(index); + if let Some(parent_index) = node.parent { index = parent_index } else { @@ -354,7 +365,35 @@ impl ProtoArray { } } - Ok(invalidated_roots) + // Collect all *descendants* which declared invalid since they're the descendant of a block + // with an invalid execution payload. + for index in first_potential_descendant..self.nodes.len() { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if let Some(parent_index) = node.parent { + if invalidated_indices.contains(&parent_index) { + match &node.execution_status { + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash) + } + ExecutionStatus::Irrelevant(_) => (), + } + + invalidated_indices.insert(index); + } + } + } + + Ok(()) } /// Follows the best-descendant links to find the best-block (i.e., head-block). From bf8bc8c6adeac6c7303f3b69665f149bae46ee0e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 1 Dec 2021 11:00:03 +1100 Subject: [PATCH 05/92] Pass latest_valid_hash from API --- beacon_node/execution_layer/src/engine_api.rs | 6 +- .../execution_layer/src/engine_api/http.rs | 10 +++- .../src/engine_api/json_structures.rs | 60 ++++++------------- beacon_node/execution_layer/src/lib.rs | 51 +++++++--------- .../test_utils/execution_block_generator.rs | 11 ++-- .../src/test_utils/handle_rpc.rs | 39 ++++++++---- .../src/test_utils/mock_execution_layer.rs | 11 ++-- .../execution_layer/src/test_utils/mod.rs | 7 +-- consensus/fork_choice/src/fork_choice.rs | 11 ++++ 9 files changed, 102 insertions(+), 104 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f9654a497bc..71f90632db8 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -17,6 +17,7 @@ pub enum Error { Reqwest(reqwest::Error), BadResponse(String), RequestFailed(String), + InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), ServerMessage { code: i64, message: String }, @@ -74,15 +75,14 @@ pub trait EngineApi { #[derive(Clone, Copy, Debug, PartialEq)] pub enum ExecutePayloadResponseStatus { - Valid, - Invalid, + Valid { latest_valid_hash: Hash256 }, + Invalid { latest_valid_hash: Hash256 }, Syncing, } #[derive(Clone, Debug, PartialEq)] pub struct ExecutePayloadResponse { pub status: ExecutePayloadResponseStatus, - pub latest_valid_hash: Option, pub validation_error: Option, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c7c60a90062..4f0c19e2d67 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -8,6 +8,7 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use std::convert::TryInto; use std::time::Duration; use types::EthSpec; @@ -147,7 +148,9 @@ impl EngineApi for HttpJsonRpc { ) .await?; - Ok(response.into()) + response + .try_into() + .map_err(Error::InvalidExecutePayloadResponse) } async fn get_payload_v1( @@ -780,10 +783,11 @@ mod test { .await .unwrap(); + let latest_valid_hash = Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(); + assert_eq!(response, ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, - latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), + status: ExecutePayloadResponseStatus::Valid { latest_valid_hash }, validation_error: None } ); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index ae6d730fa5a..556a854d3df 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,5 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use types::{EthSpec, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -261,44 +262,10 @@ pub struct JsonExecutePayloadV1Response { pub validation_error: Option, } -impl From for JsonExecutePayloadV1ResponseStatus { - fn from(e: ExecutePayloadResponseStatus) -> Self { - match e { - ExecutePayloadResponseStatus::Valid => JsonExecutePayloadV1ResponseStatus::Valid, - ExecutePayloadResponseStatus::Invalid => JsonExecutePayloadV1ResponseStatus::Invalid, - ExecutePayloadResponseStatus::Syncing => JsonExecutePayloadV1ResponseStatus::Syncing, - } - } -} -impl From for ExecutePayloadResponseStatus { - fn from(j: JsonExecutePayloadV1ResponseStatus) -> Self { - match j { - JsonExecutePayloadV1ResponseStatus::Valid => ExecutePayloadResponseStatus::Valid, - JsonExecutePayloadV1ResponseStatus::Invalid => ExecutePayloadResponseStatus::Invalid, - JsonExecutePayloadV1ResponseStatus::Syncing => ExecutePayloadResponseStatus::Syncing, - } - } -} +impl TryFrom for ExecutePayloadResponse { + type Error = &'static str; -impl From for JsonExecutePayloadV1Response { - fn from(e: ExecutePayloadResponse) -> Self { - // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutePayloadResponse { - status, - latest_valid_hash, - validation_error, - } = e; - - Self { - status: status.into(), - latest_valid_hash, - validation_error, - } - } -} - -impl From for ExecutePayloadResponse { - fn from(j: JsonExecutePayloadV1Response) -> Self { + fn try_from(j: JsonExecutePayloadV1Response) -> Result { // Use this verbose deconstruction pattern to ensure no field is left unused. let JsonExecutePayloadV1Response { status, @@ -306,11 +273,22 @@ impl From for ExecutePayloadResponse { validation_error, } = j; - Self { - status: status.into(), - latest_valid_hash, + let status = match status { + JsonExecutePayloadV1ResponseStatus::Valid => latest_valid_hash + .map(|latest_valid_hash| ExecutePayloadResponseStatus::Valid { latest_valid_hash }) + .ok_or("valid response is missing latest_valid_hash")?, + JsonExecutePayloadV1ResponseStatus::Invalid => latest_valid_hash + .map(|latest_valid_hash| ExecutePayloadResponseStatus::Invalid { + latest_valid_hash, + }) + .ok_or("invalid response is missing latest_valid_hash")?, + JsonExecutePayloadV1ResponseStatus::Syncing => ExecutePayloadResponseStatus::Syncing, + }; + + Ok(Self { + status, validation_error, - } + }) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5c069f0b0b1..5f53aa2255d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -321,7 +321,7 @@ impl ExecutionLayer { pub async fn execute_payload( &self, execution_payload: &ExecutionPayload, - ) -> Result<(ExecutePayloadResponseStatus, Option), Error> { + ) -> Result { debug!( self.log(), "Issuing engine_executePayload"; @@ -336,46 +336,38 @@ impl ExecutionLayer { .await; let mut errors = vec![]; - let mut valid = 0; - let mut invalid = 0; + let mut valid_responses: Vec = vec![]; + let mut invalid_responses: Vec = vec![]; let mut syncing = 0; - let mut invalid_latest_valid_hash = vec![]; for result in broadcast_results { - match result.map(|response| (response.latest_valid_hash, response.status)) { - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Valid)) => { - if latest_hash == execution_payload.block_hash { - valid += 1; + match result.map(|response| response.status) { + Ok(ExecutePayloadResponseStatus::Valid { latest_valid_hash }) => { + if latest_valid_hash == execution_payload.block_hash { + valid_responses.push(latest_valid_hash); } else { - invalid += 1; errors.push(EngineError::Api { id: "unknown".to_string(), error: engine_api::Error::BadResponse( format!( - "execute_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + "execute_payload: response.status.is_ok(), but mismatched latest_valid_hash. Expected({:?}) Found({:?})", execution_payload.block_hash, - latest_hash, + latest_valid_hash, ) ), }); - invalid_latest_valid_hash.push(latest_hash); } } - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Invalid)) => { - invalid += 1; - invalid_latest_valid_hash.push(latest_hash); + Ok(ExecutePayloadResponseStatus::Invalid { latest_valid_hash }) => { + invalid_responses.push(latest_valid_hash); } - Ok((_, ExecutePayloadResponseStatus::Syncing)) => syncing += 1, - Ok((None, status)) => errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "execute_payload: status {:?} returned with null latest_valid_hash", - status - )), - }), + Ok(ExecutePayloadResponseStatus::Syncing) => syncing += 1, Err(e) => errors.push(e), } } + let valid = valid_responses.len(); + let invalid = invalid_responses.len(); + if valid > 0 && invalid > 0 { crit!( self.log(), @@ -384,15 +376,12 @@ impl ExecutionLayer { ); } - if valid > 0 { - Ok(( - ExecutePayloadResponseStatus::Valid, - Some(execution_payload.block_hash), - )) - } else if invalid > 0 { - Ok((ExecutePayloadResponseStatus::Invalid, None)) + if let Some(latest_valid_hash) = valid_responses.first().copied() { + Ok(ExecutePayloadResponseStatus::Valid { latest_valid_hash }) + } else if let Some(latest_valid_hash) = invalid_responses.first().copied() { + Ok(ExecutePayloadResponseStatus::Invalid { latest_valid_hash }) } else if syncing > 0 { - Ok((ExecutePayloadResponseStatus::Syncing, None)) + Ok(ExecutePayloadResponseStatus::Syncing) } else { Err(Error::EngineErrors(errors)) } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 552bea0ea48..f66a5c1d176 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -241,15 +241,15 @@ impl ExecutionBlockGenerator { } else { return ExecutePayloadResponse { status: ExecutePayloadResponseStatus::Syncing, - latest_valid_hash: None, validation_error: None, }; }; if payload.block_number != parent.block_number() + 1 { return ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Invalid, - latest_valid_hash: Some(parent.block_hash()), + status: ExecutePayloadResponseStatus::Invalid { + latest_valid_hash: parent.block_hash(), + }, validation_error: Some("invalid block number".to_string()), }; } @@ -258,8 +258,9 @@ impl ExecutionBlockGenerator { self.pending_payloads.insert(payload.block_hash, payload); ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, - latest_valid_hash: Some(valid_hash), + status: ExecutePayloadResponseStatus::Valid { + latest_valid_hash: valid_hash, + }, validation_error: None, } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 131bc8ba0af..f19f1738341 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -57,19 +57,12 @@ pub async fn handle_rpc( ENGINE_EXECUTE_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_execute_payload_response.lock() { - match status { - ExecutePayloadResponseStatus::Valid => ExecutePayloadResponse { - status, - latest_valid_hash: Some(request.block_hash), - validation_error: None, + let response = if *ctx.all_payloads_valid.lock() { + ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Valid { + latest_valid_hash: request.block_hash, }, - ExecutePayloadResponseStatus::Syncing => ExecutePayloadResponse { - status, - latest_valid_hash: None, - validation_error: None, - }, - _ => unimplemented!("invalid static executePayloadResponse"), + validation_error: None, } } else { ctx.execution_block_generator @@ -77,7 +70,27 @@ pub async fn handle_rpc( .execute_payload(request.into()) }; - Ok(serde_json::to_value(JsonExecutePayloadV1Response::from(response)).unwrap()) + let (status, latest_valid_hash) = match response.status { + ExecutePayloadResponseStatus::Valid { latest_valid_hash } => ( + JsonExecutePayloadV1ResponseStatus::Valid, + Some(latest_valid_hash), + ), + ExecutePayloadResponseStatus::Invalid { latest_valid_hash } => ( + JsonExecutePayloadV1ResponseStatus::Invalid, + Some(latest_valid_hash), + ), + ExecutePayloadResponseStatus::Syncing => { + (JsonExecutePayloadV1ResponseStatus::Syncing, None) + } + }; + + let json_response = JsonExecutePayloadV1Response { + status, + latest_valid_hash, + validation_error: None, + }; + + Ok(serde_json::to_value(json_response).unwrap()) } ENGINE_GET_PAYLOAD_V1 => { let request: JsonPayloadIdRequest = get_param(params, 0)?; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 59345bc01f2..1bd80442cae 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -138,10 +138,13 @@ impl MockExecutionLayer { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.random, random); - let (payload_response, latest_valid_hash) = - self.el.execute_payload(&payload).await.unwrap(); - assert_eq!(payload_response, ExecutePayloadResponseStatus::Valid); - assert_eq!(latest_valid_hash, Some(payload.block_hash)); + let payload_response = self.el.execute_payload(&payload).await.unwrap(); + assert_eq!( + payload_response, + ExecutePayloadResponseStatus::Valid { + latest_valid_hash: payload.block_hash + } + ); self.el .notify_forkchoice_updated(block_hash, Hash256::zero(), None) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index cd45d34a1f7..c4230d923cf 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -1,7 +1,6 @@ //! Provides a mock execution engine HTTP JSON-RPC API for use in testing. use crate::engine_api::http::JSONRPC_VERSION; -use crate::engine_api::ExecutePayloadResponseStatus; use bytes::Bytes; use environment::null_logger; use execution_block_generator::{Block, PoWBlock}; @@ -62,7 +61,7 @@ impl MockServer { last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), preloaded_responses, - static_execute_payload_response: <_>::default(), + all_payloads_valid: <_>::default(), _phantom: PhantomData, }); @@ -117,7 +116,7 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_execute_payload_response.lock() = Some(ExecutePayloadResponseStatus::Valid) + *self.ctx.all_payloads_valid.lock() = true; } pub fn insert_pow_block( @@ -187,7 +186,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_execute_payload_response: Arc>>, + pub all_payloads_valid: Arc>, pub _phantom: PhantomData, } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 3ab07c6af12..2fe81fe1da8 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -17,6 +17,7 @@ pub enum Error { ProtoArrayError(String), InvalidProtoArrayBytes(String), InvalidLegacyProtoArrayBytes(String), + FailedToProcessInvalidExecutionPayload(String), MissingProtoArrayBlock(Hash256), UnknownAncestor { ancestor_slot: Slot, @@ -464,6 +465,16 @@ where Ok(true) } + pub fn on_invalid_execution_payload( + &mut self, + invalid_root: Hash256, + latest_valid_ancestor_root: Hash256, + ) -> Result<(), Error> { + self.proto_array + .process_execution_payload_invalidation(invalid_root, latest_valid_ancestor_root) + .map_err(Error::FailedToProcessInvalidExecutionPayload) + } + /// Add `block` to the fork choice DAG. /// /// - `block_root` is the root of `block. From e372e06f23c1790bf7473eb673d62c8c58fdc181 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 1 Dec 2021 11:00:27 +1100 Subject: [PATCH 06/92] Add invalidation of payloads --- .../beacon_chain/src/execution_payload.rs | 34 ++++++++++++++++--- .../src/proto_array_fork_choice.rs | 10 ++++++ 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index c19bba61268..545a4aaef1b 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -14,12 +14,13 @@ use crate::{ use execution_layer::ExecutePayloadResponseStatus; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; -use slog::debug; +use slog::{crit, debug, warn}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, }; +use tree_hash::TreeHash; use types::*; /// Verify that `execution_payload` contained by `block` is considered valid by an execution @@ -57,10 +58,33 @@ pub fn execute_payload( .block_on(|execution_layer| execution_layer.execute_payload(execution_payload)); match execute_payload_response { - Ok((status, _latest_valid_hash)) => match status { - ExecutePayloadResponseStatus::Valid => Ok(PayloadVerificationStatus::Verified), - // TODO(merge): invalidate any invalid ancestors of this block in fork choice. - ExecutePayloadResponseStatus::Invalid => { + Ok(status) => match status { + ExecutePayloadResponseStatus::Valid { .. } => Ok(PayloadVerificationStatus::Verified), + ExecutePayloadResponseStatus::Invalid { latest_valid_hash } => { + // TODO(paul): pass this value to avoid double hashing? + let invalid_root = block.tree_hash_root(); + match chain + .fork_choice + .write() + .on_invalid_execution_payload(invalid_root, latest_valid_hash) + { + Ok(()) => warn!( + chain.log, + "Invalid execution payload in block"; + "latest_valid_hash" => ?latest_valid_hash, + "root" => ?invalid_root, + ), + Err(e) => { + crit!( + chain.log, + "Failed to process invalid payload"; + "latest_valid_hash" => ?latest_valid_hash, + "root" => ?invalid_root, + ); + + return Err(BeaconChainError::from(e).into()); + } + } Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) } ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 0ea066ddcc0..14c13b37a69 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -154,6 +154,16 @@ impl ProtoArrayForkChoice { }) } + pub fn process_execution_payload_invalidation( + &mut self, + invalid_root: Hash256, + latest_valid_ancestor_root: Hash256, + ) -> Result<(), String> { + self.proto_array + .propagate_execution_payload_invalidation(invalid_root, latest_valid_ancestor_root) + .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) + } + pub fn process_attestation( &mut self, validator_index: usize, From 0922b1726717471fa2b00a2418b5091788f55811 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 2 Dec 2021 14:21:44 +1100 Subject: [PATCH 07/92] Pass root to execute_payload --- beacon_node/beacon_chain/src/block_verification.rs | 3 ++- beacon_node/beacon_chain/src/execution_payload.rs | 10 ++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c2dc0028e99..a0296b01fec 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1125,7 +1125,8 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // // It is important that this function is called *after* `per_slot_processing`, since the // `randao` may change. - let payload_verification_status = execute_payload(chain, &state, block.message())?; + let payload_verification_status = + execute_payload(chain, &state, block.message(), block_root)?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 545a4aaef1b..b0a2ba79dec 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -20,7 +20,6 @@ use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, }; -use tree_hash::TreeHash; use types::*; /// Verify that `execution_payload` contained by `block` is considered valid by an execution @@ -36,6 +35,7 @@ pub fn execute_payload( chain: &BeaconChain, state: &BeaconState, block: BeaconBlockRef, + block_root: Hash256, ) -> Result> { if !is_execution_enabled(state, block.body()) { return Ok(PayloadVerificationStatus::Irrelevant); @@ -61,25 +61,23 @@ pub fn execute_payload( Ok(status) => match status { ExecutePayloadResponseStatus::Valid { .. } => Ok(PayloadVerificationStatus::Verified), ExecutePayloadResponseStatus::Invalid { latest_valid_hash } => { - // TODO(paul): pass this value to avoid double hashing? - let invalid_root = block.tree_hash_root(); match chain .fork_choice .write() - .on_invalid_execution_payload(invalid_root, latest_valid_hash) + .on_invalid_execution_payload(block_root, latest_valid_hash) { Ok(()) => warn!( chain.log, "Invalid execution payload in block"; "latest_valid_hash" => ?latest_valid_hash, - "root" => ?invalid_root, + "root" => ?block_root, ), Err(e) => { crit!( chain.log, "Failed to process invalid payload"; "latest_valid_hash" => ?latest_valid_hash, - "root" => ?invalid_root, + "root" => ?block_root, ); return Err(BeaconChainError::from(e).into()); From 1fe63766962975b78eaa6f3f81f1f70be9c12989 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 2 Dec 2021 14:22:40 +1100 Subject: [PATCH 08/92] Add FixedPayloadResponse --- .../src/test_utils/handle_rpc.rs | 28 +++++++++++-------- .../execution_layer/src/test_utils/mod.rs | 20 +++++++++++-- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f19f1738341..c56e1481a63 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,4 +1,4 @@ -use super::Context; +use super::{Context, FixedPayloadResponse}; use crate::engine_api::{http::*, ExecutePayloadResponse, ExecutePayloadResponseStatus}; use crate::json_structures::*; use serde::de::DeserializeOwned; @@ -57,17 +57,23 @@ pub async fn handle_rpc( ENGINE_EXECUTE_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if *ctx.all_payloads_valid.lock() { - ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid { - latest_valid_hash: request.block_hash, - }, - validation_error: None, - } - } else { - ctx.execution_block_generator + let response = match *ctx.fixed_payload_response.lock() { + FixedPayloadResponse::None => ctx + .execution_block_generator .write() - .execute_payload(request.into()) + .execute_payload(request.into()), + FixedPayloadResponse::Valid => { + let latest_valid_hash = request.block_hash; + // Try to import the block, ignore the response. + ctx.execution_block_generator + .write() + .execute_payload(request.into()); + ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Valid { latest_valid_hash }, + validation_error: None, + } + } + FixedPayloadResponse::Invalid => unimplemented!(), }; let (status, latest_valid_hash) = match response.status { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index c4230d923cf..105a149d3ea 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -27,6 +27,12 @@ mod execution_block_generator; mod handle_rpc; mod mock_execution_layer; +pub enum FixedPayloadResponse { + None, + Valid, + Invalid, +} + pub struct MockServer { _shutdown_tx: oneshot::Sender<()>, listen_socket_addr: SocketAddr, @@ -61,7 +67,7 @@ impl MockServer { last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), preloaded_responses, - all_payloads_valid: <_>::default(), + fixed_payload_response: Arc::new(Mutex::new(FixedPayloadResponse::None)), _phantom: PhantomData, }); @@ -116,7 +122,15 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.all_payloads_valid.lock() = true; + *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::Valid; + } + + pub fn all_payloads_invalid(&self) { + *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::Invalid; + } + + pub fn full_payload_verification(&self) { + *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::None; } pub fn insert_pow_block( @@ -186,7 +200,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub all_payloads_valid: Arc>, + pub fixed_payload_response: Arc>, pub _phantom: PhantomData, } From 9b283895d20c5284cbc992e9b6175faff5969fac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 2 Dec 2021 14:22:59 +1100 Subject: [PATCH 09/92] Add non-passing invalid payload tests --- beacon_node/beacon_chain/tests/main.rs | 1 + .../tests/payload_invalidation.rs | 109 ++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 beacon_node/beacon_chain/tests/payload_invalidation.rs diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index fa31af84060..1c61e9927fc 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -3,6 +3,7 @@ mod attestation_verification; mod block_verification; mod merge; mod op_verification; +mod payload_invalidation; mod store_tests; mod sync_committee_verification; mod tests; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs new file mode 100644 index 00000000000..263e350d0b7 --- /dev/null +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -0,0 +1,109 @@ +// #![cfg(not(debug_assertions))] + +use beacon_chain::{ + test_utils::{BeaconChainHarness, EphemeralHarnessType}, + BlockError, ExecutionPayloadError, +}; +use std::collections::HashSet; +use types::*; + +const VALIDATOR_COUNT: usize = 32; + +type E = MainnetEthSpec; + +enum Payload { + Valid, + Invalid, +} + +struct InvalidPayloadRig { + harness: BeaconChainHarness>, + valid_blocks: HashSet, + invalid_blocks: HashSet, +} + +impl InvalidPayloadRig { + fn new() -> Self { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.merge_fork_epoch = Some(Epoch::new(0)); + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .deterministic_keypairs(VALIDATOR_COUNT) + .mock_execution_layer() + .fresh_ephemeral_store() + .build(); + + // Move to slot 1. + harness.advance_slot(); + + Self { + harness, + valid_blocks: <_>::default(), + invalid_blocks: <_>::default(), + } + } + + fn move_to_terminal_block(&self) { + let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + } + + fn import_block(&mut self, is_valid: Payload) -> Hash256 { + let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); + + let head = self.harness.chain.head().unwrap(); + let state = head.beacon_state; + let slot = state.slot() + 1; + let (block, _post_state) = self.harness.make_block(state, slot); + let block_root = block.canonical_root(); + + match is_valid { + Payload::Valid => { + mock_execution_layer.server.full_payload_verification(); + self.harness.process_block(slot, block.clone()).unwrap(); + self.valid_blocks.insert(block_root); + } + Payload::Invalid => { + mock_execution_layer.server.all_payloads_invalid(); + match self.harness.process_block(slot, block.clone()) { + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::RejectedByExecutionEngine, + )) => (), + Err(other) => { + panic!("expected invalid payload, got {:?}", other) + } + Ok(_) => panic!("block with invalid payload was imported"), + }; + self.invalid_blocks.insert(block_root); + } + } + + block_root + } +} + +#[test] +fn invalid_during_processing() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let roots = &[ + rig.import_block(Payload::Valid), + rig.import_block(Payload::Invalid), + rig.import_block(Payload::Valid), + ]; + + // 0 should be present in the chain. + assert!(rig.harness.chain.get_block(&roots[0]).unwrap().is_some()); + // 1 should *not* be present in the chain. + assert_eq!(rig.harness.chain.get_block(&roots[1]).unwrap(), None); + // 2 should be the head. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[2]); +} From 06eebc2202b6941032e4cb3a3cebb4957adc6d63 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 2 Dec 2021 15:25:34 +1100 Subject: [PATCH 10/92] Fix failing test --- .../beacon_chain/src/execution_payload.rs | 3 ++- .../tests/payload_invalidation.rs | 12 ++++++++- .../test_utils/execution_block_generator.rs | 2 +- .../src/test_utils/handle_rpc.rs | 5 +++- .../execution_layer/src/test_utils/mod.rs | 7 +++--- consensus/fork_choice/src/fork_choice.rs | 4 +-- consensus/proto_array/src/proto_array.rs | 25 ++++++++++++------- .../src/proto_array_fork_choice.rs | 4 +-- 8 files changed, 42 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b0a2ba79dec..b12af1e2816 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -61,10 +61,11 @@ pub fn execute_payload( Ok(status) => match status { ExecutePayloadResponseStatus::Valid { .. } => Ok(PayloadVerificationStatus::Verified), ExecutePayloadResponseStatus::Invalid { latest_valid_hash } => { + let head_block_root = block.parent_root(); match chain .fork_choice .write() - .on_invalid_execution_payload(block_root, latest_valid_hash) + .on_invalid_execution_payload(head_block_root, latest_valid_hash) { Ok(()) => warn!( chain.log, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 263e350d0b7..e9432f333de 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -70,7 +70,17 @@ impl InvalidPayloadRig { self.valid_blocks.insert(block_root); } Payload::Invalid => { - mock_execution_layer.server.all_payloads_invalid(); + let parent = self + .harness + .chain + .get_block(&block.message().parent_root()) + .unwrap() + .unwrap(); + let parent_payload = parent.message().body().execution_payload().unwrap(); + mock_execution_layer + .server + .all_payloads_invalid(parent_payload.block_hash); + match self.harness.process_block(slot, block.clone()) { Err(BlockError::ExecutionPayloadError( ExecutionPayloadError::RejectedByExecutionEngine, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f66a5c1d176..454993091ec 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -232,7 +232,7 @@ impl ExecutionBlockGenerator { } pub fn get_payload(&mut self, id: &PayloadId) -> Option> { - self.payload_ids.remove(id) + self.payload_ids.get(id).cloned() } pub fn execute_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index c56e1481a63..e3b2875cf11 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -73,7 +73,10 @@ pub async fn handle_rpc( validation_error: None, } } - FixedPayloadResponse::Invalid => unimplemented!(), + FixedPayloadResponse::Invalid { latest_valid_hash } => ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Invalid { latest_valid_hash }, + message: None, + }, }; let (status, latest_valid_hash) = match response.status { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 105a149d3ea..b7f0b98a9f6 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -30,7 +30,7 @@ mod mock_execution_layer; pub enum FixedPayloadResponse { None, Valid, - Invalid, + Invalid { latest_valid_hash: Hash256 }, } pub struct MockServer { @@ -125,8 +125,9 @@ impl MockServer { *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::Valid; } - pub fn all_payloads_invalid(&self) { - *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::Invalid; + pub fn all_payloads_invalid(&self, latest_valid_hash: Hash256) { + *self.ctx.fixed_payload_response.lock() = + FixedPayloadResponse::Invalid { latest_valid_hash }; } pub fn full_payload_verification(&self) { diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 2fe81fe1da8..463cadec245 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -467,11 +467,11 @@ where pub fn on_invalid_execution_payload( &mut self, - invalid_root: Hash256, + head_block_root: Hash256, latest_valid_ancestor_root: Hash256, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation(invalid_root, latest_valid_ancestor_root) + .process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) .map_err(Error::FailedToProcessInvalidExecutionPayload) } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 8a005d26ac6..53471874ec9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -311,14 +311,14 @@ impl ProtoArray { pub fn propagate_execution_payload_invalidation( &mut self, - invalid_root: Hash256, - latest_valid_ancestor_root: Hash256, + head_block_root: Hash256, + latest_valid_ancestor_hash: Hash256, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); let mut index = *self .indices - .get(&invalid_root) - .ok_or(Error::NodeUnknown(invalid_root))?; + .get(&head_block_root) + .ok_or(Error::NodeUnknown(head_block_root))?; let first_potential_descendant = index + 1; // Collect all *ancestors* which were declared invalid since they reside between the @@ -329,11 +329,18 @@ impl ProtoArray { .get_mut(index) .ok_or(Error::InvalidNodeIndex(index))?; - if node.root == latest_valid_ancestor_root { - // It might be new knowledge that this block is valid, ensure that it and all - // ancestors are marked as valid. - self.propagate_execution_payload_validation(index)?; - break; + match node.execution_status { + ExecutionStatus::Valid(hash) + | ExecutionStatus::Invalid(hash) + | ExecutionStatus::Unknown(hash) => { + if hash == latest_valid_ancestor_hash { + // It might be new knowledge that this block is valid, ensure that it and all + // ancestors are marked as valid. + self.propagate_execution_payload_validation(index)?; + break; + } + } + ExecutionStatus::Irrelevant(_) => break, } match &node.execution_status { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 14c13b37a69..1eaa7f8f65c 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -156,11 +156,11 @@ impl ProtoArrayForkChoice { pub fn process_execution_payload_invalidation( &mut self, - invalid_root: Hash256, + head_block_root: Hash256, latest_valid_ancestor_root: Hash256, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation(invalid_root, latest_valid_ancestor_root) + .propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } From e32d95e844c6c7ed57701ca6e0465ecb67699f5a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 2 Dec 2021 16:19:36 +1100 Subject: [PATCH 11/92] Add simple invalidation test --- .../tests/payload_invalidation.rs | 79 ++++++++++++++++--- .../src/test_utils/handle_rpc.rs | 10 +++ .../execution_layer/src/test_utils/mod.rs | 5 ++ 3 files changed, 81 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e9432f333de..4d89134495f 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -11,9 +11,11 @@ const VALIDATOR_COUNT: usize = 32; type E = MainnetEthSpec; +#[derive(PartialEq)] enum Payload { Valid, - Invalid, + Invalid { latest_valid_hash: Option }, + Syncing, } struct InvalidPayloadRig { @@ -45,6 +47,23 @@ impl InvalidPayloadRig { } } + fn block_hash(&self, block_root: Hash256) -> Hash256 { + self.harness + .chain + .get_block(&block_root) + .unwrap() + .unwrap() + .message() + .body() + .execution_payload() + .unwrap() + .block_hash + } + + fn fork_choice(&self) { + self.harness.chain.fork_choice().unwrap(); + } + fn move_to_terminal_block(&self) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); mock_execution_layer @@ -64,22 +83,23 @@ impl InvalidPayloadRig { let block_root = block.canonical_root(); match is_valid { - Payload::Valid => { - mock_execution_layer.server.full_payload_verification(); + Payload::Valid | Payload::Syncing => { + if is_valid == Payload::Syncing { + mock_execution_layer.server.all_payloads_syncing(); + } else { + mock_execution_layer.server.full_payload_verification(); + } self.harness.process_block(slot, block.clone()).unwrap(); self.valid_blocks.insert(block_root); + // TODO: check syncing blocks are optimistic. } - Payload::Invalid => { - let parent = self - .harness - .chain - .get_block(&block.message().parent_root()) - .unwrap() - .unwrap(); - let parent_payload = parent.message().body().execution_payload().unwrap(); + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + mock_execution_layer .server - .all_payloads_invalid(parent_payload.block_hash); + .all_payloads_invalid(latest_valid_hash); match self.harness.process_block(slot, block.clone()) { Err(BlockError::ExecutionPayloadError( @@ -105,7 +125,9 @@ fn invalid_during_processing() { let roots = &[ rig.import_block(Payload::Valid), - rig.import_block(Payload::Invalid), + rig.import_block(Payload::Invalid { + latest_valid_hash: None, + }), rig.import_block(Payload::Valid), ]; @@ -117,3 +139,34 @@ fn invalid_during_processing() { let head = rig.harness.chain.head_info().unwrap(); assert_eq!(head.block_root, roots[2]); } + +#[test] +fn invalid_after_optimistic_sync() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let mut roots = vec![ + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + ]; + + for root in &roots { + assert!(rig.harness.chain.get_block(root).unwrap().is_some()); + } + + // 2 should be the head. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[2]); + + roots.push(rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + })); + + // Running fork choice is necessary since a block has been invalidated. + rig.fork_choice(); + + // 1 should be the head, since 2 was invalidated. + let head = rig.harness.chain.head_info().unwrap(); + assert_eq!(head.block_root, roots[1]); +} diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e3b2875cf11..d327741fd32 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -77,6 +77,16 @@ pub async fn handle_rpc( status: ExecutePayloadResponseStatus::Invalid { latest_valid_hash }, message: None, }, + FixedPayloadResponse::Syncing => { + // Try to import the block, ignore the response. + ctx.execution_block_generator + .write() + .execute_payload(request.into()); + ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Syncing, + message: None, + } + } }; let (status, latest_valid_hash) = match response.status { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index b7f0b98a9f6..8fe69193cbb 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -31,6 +31,7 @@ pub enum FixedPayloadResponse { None, Valid, Invalid { latest_valid_hash: Hash256 }, + Syncing, } pub struct MockServer { @@ -130,6 +131,10 @@ impl MockServer { FixedPayloadResponse::Invalid { latest_valid_hash }; } + pub fn all_payloads_syncing(&self) { + *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::Syncing; + } + pub fn full_payload_verification(&self) { *self.ctx.fixed_payload_response.lock() = FixedPayloadResponse::None; } From 9aad03146e03041984803520ff5f7428646fdc63 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 22 Dec 2021 10:26:55 +1100 Subject: [PATCH 12/92] Fix errors after rebase --- beacon_node/beacon_chain/tests/payload_invalidation.rs | 4 ++-- beacon_node/execution_layer/src/test_utils/handle_rpc.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 4d89134495f..3eed22e3b31 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -89,7 +89,7 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - self.harness.process_block(slot, block.clone()).unwrap(); + self.harness.process_block(slot, block).unwrap(); self.valid_blocks.insert(block_root); // TODO: check syncing blocks are optimistic. } @@ -101,7 +101,7 @@ impl InvalidPayloadRig { .server .all_payloads_invalid(latest_valid_hash); - match self.harness.process_block(slot, block.clone()) { + match self.harness.process_block(slot, block) { Err(BlockError::ExecutionPayloadError( ExecutionPayloadError::RejectedByExecutionEngine, )) => (), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index d327741fd32..3729d669530 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -75,7 +75,7 @@ pub async fn handle_rpc( } FixedPayloadResponse::Invalid { latest_valid_hash } => ExecutePayloadResponse { status: ExecutePayloadResponseStatus::Invalid { latest_valid_hash }, - message: None, + validation_error: None, }, FixedPayloadResponse::Syncing => { // Try to import the block, ignore the response. @@ -84,7 +84,7 @@ pub async fn handle_rpc( .execute_payload(request.into()); ExecutePayloadResponse { status: ExecutePayloadResponseStatus::Syncing, - message: None, + validation_error: None, } } }; From 1e86e2229f659289106f020593cf6eb2d4ecc5f9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 8 Feb 2022 13:51:57 +1100 Subject: [PATCH 13/92] Unify invalid payload processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 76 +++++++++++++++++++ beacon_node/beacon_chain/src/errors.rs | 3 + .../beacon_chain/src/execution_payload.rs | 33 +++----- .../tests/payload_invalidation.rs | 2 +- 4 files changed, 90 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a65a943b93c..012c1cdf1f7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3173,6 +3173,82 @@ impl BeaconChain { Ok((block, state)) } + /// This method must be called whenever an execution engine indicates that a payload is + /// invalid. + /// + /// All beacon blocks between `latest_root` and `latest_valid_hash` will be + /// invalidated in fork choice. Conversely, the `last_valid_hash` and all ancestors will be + /// validated. + /// + /// ## Notes + /// + /// Use these rules to set `latest_root`: + /// + /// - When `forkchoiceUpdated` indicates an invalid block, set `latest_root` to be the + /// block root that was the head of the chain when `forkchoiceUpdated` was called. + /// - When `executePayload` returns an invalid block *during* block import, set + /// `latest_root` to be the parent of the beacon block containing the invalid + /// payload (because the block containing the payload is not present in fork choice). + /// - When `executePayload` returns an invalid block *after* block import, set + /// `latest_root` to be root of the beacon block containing the invalid payload. + pub fn process_invalid_execution_payload( + &self, + latest_root: Hash256, + latest_valid_hash: Hash256, + ) -> Result<(), Error> { + debug!( + self.log, + "Invalid execution payload in block"; + "latest_valid_hash" => ?latest_valid_hash, + "latest_root" => ?latest_root, + ); + + // Allow fork choice to invalidate blocks. + let result = self + .fork_choice + .write() + .on_invalid_execution_payload(latest_root, latest_valid_hash); + + if let Err(e) = &result { + crit!( + self.log, + "Failed to process invalid payload"; + "error" => ?e, + "latest_valid_hash" => ?latest_valid_hash, + "latest_root" => ?latest_root, + ); + } + + // Run fork choice since it's possible that the payload invalidation might result in a new + // head. + self.fork_choice()?; + + // Check to ensure the justified checkpoint does not have an invalid payload. If so, try + // to kill the client. + let head_info = self.head_info()?; + let justified_root = head_info.current_justified_checkpoint.root; + if let Some(proto_block) = self.fork_choice.read().get_block(&justified_root) { + if proto_block.execution_status.is_invalid() { + let mut shutdown_sender = self.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Justified block has an invalid execution payload.", + )) + .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; + + // Return an error here to try and prevent progression by upstream functions. + return Err(Error::JustifiedHasInvalidPayload { justified_root }); + } + } else { + crit!( + self.log, + "Justified block is not in fork choice"; + ); + } + + result.map_err(Into::into) + } + /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 6920c06039d..7ac06a86d7e 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -143,6 +143,9 @@ pub enum BeaconChainError { HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayloadShutdownError(TrySendError), + JustifiedHasInvalidPayload { + justified_root: Hash256, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b12af1e2816..0109c0c65ec 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -14,7 +14,7 @@ use crate::{ use execution_layer::ExecutePayloadResponseStatus; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; -use slog::{crit, debug, warn}; +use slog::debug; use slot_clock::SlotClock; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, @@ -61,29 +61,16 @@ pub fn execute_payload( Ok(status) => match status { ExecutePayloadResponseStatus::Valid { .. } => Ok(PayloadVerificationStatus::Verified), ExecutePayloadResponseStatus::Invalid { latest_valid_hash } => { - let head_block_root = block.parent_root(); - match chain - .fork_choice - .write() - .on_invalid_execution_payload(head_block_root, latest_valid_hash) - { - Ok(()) => warn!( - chain.log, - "Invalid execution payload in block"; - "latest_valid_hash" => ?latest_valid_hash, - "root" => ?block_root, - ), - Err(e) => { - crit!( - chain.log, - "Failed to process invalid payload"; - "latest_valid_hash" => ?latest_valid_hash, - "root" => ?block_root, - ); + debug!( + chain.log, + "Invalid execution payload in block"; + "block_root" => ?block_root, + ); + // This block has not yet been applied to fork choice, so the latest block that was + // imported to fork choice was the parent. + let latest_root = block.parent_root(); + chain.process_invalid_execution_payload(latest_root, latest_valid_hash)?; - return Err(BeaconChainError::from(e).into()); - } - } Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) } ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 3eed22e3b31..aab696ac183 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -28,7 +28,7 @@ impl InvalidPayloadRig { fn new() -> Self { let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.merge_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) From d4f67a885c49d555671c4cef113bc6fdecd43f4c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 8 Feb 2022 14:40:12 +1100 Subject: [PATCH 14/92] Tidy, improve tests --- .../tests/payload_invalidation.rs | 61 +++++++++++++++---- .../src/proto_array_fork_choice.rs | 8 +++ 2 files changed, 57 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index aab696ac183..80ef3205fef 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -4,7 +4,6 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, BlockError, ExecutionPayloadError, }; -use std::collections::HashSet; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -20,8 +19,6 @@ enum Payload { struct InvalidPayloadRig { harness: BeaconChainHarness>, - valid_blocks: HashSet, - invalid_blocks: HashSet, } impl InvalidPayloadRig { @@ -40,11 +37,7 @@ impl InvalidPayloadRig { // Move to slot 1. harness.advance_slot(); - Self { - harness, - valid_blocks: <_>::default(), - invalid_blocks: <_>::default(), - } + Self { harness } } fn block_hash(&self, block_root: Hash256) -> Hash256 { @@ -89,9 +82,28 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - self.harness.process_block(slot, block).unwrap(); - self.valid_blocks.insert(block_root); - // TODO: check syncing blocks are optimistic. + let root = self.harness.process_block(slot, block.clone()).unwrap(); + + let execution_status = self + .harness + .chain + .fork_choice + .read() + .get_block(&root.into()) + .unwrap() + .execution_status; + + match is_valid { + Payload::Syncing => assert!(execution_status.is_not_verified()), + Payload::Valid => assert!(execution_status.is_valid()), + Payload::Invalid { .. } => unreachable!(), + } + + assert_eq!( + self.harness.chain.get_block(&block_root).unwrap().unwrap(), + block, + "block from db must match block imported" + ); } Payload::Invalid { latest_valid_hash } => { let latest_valid_hash = latest_valid_hash @@ -110,7 +122,20 @@ impl InvalidPayloadRig { } Ok(_) => panic!("block with invalid payload was imported"), }; - self.invalid_blocks.insert(block_root); + + assert!( + self.harness + .chain + .fork_choice + .read() + .get_block(&block_root) + .is_none(), + "invalid block must not exist in fork choice" + ); + assert!( + self.harness.chain.get_block(&block_root).unwrap().is_none(), + "invalid block cannot be accessed via get_block" + ); } } @@ -118,6 +143,18 @@ impl InvalidPayloadRig { } } +#[test] +fn payload_valid_invalid_syncing() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + rig.import_block(Payload::Valid); + rig.import_block(Payload::Invalid { + latest_valid_hash: None, + }); + rig.import_block(Payload::Syncing); +} + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 1eaa7f8f65c..c27e14dca01 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -50,6 +50,14 @@ impl ExecutionStatus { } } + pub fn is_valid(&self) -> bool { + matches!(self, ExecutionStatus::Valid(_)) + } + + pub fn is_not_verified(&self) -> bool { + matches!(self, ExecutionStatus::Unknown(_)) + } + pub fn is_invalid(&self) -> bool { matches!(self, ExecutionStatus::Invalid(_)) } From c98caad1e11c130911d756fe968a2e94b7f531e3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 9 Feb 2022 07:36:15 +1100 Subject: [PATCH 15/92] Add test for shutdown --- beacon_node/beacon_chain/src/beacon_chain.rs | 37 +++-- beacon_node/beacon_chain/src/errors.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 15 +- .../tests/payload_invalidation.rs | 153 ++++++++++++++++-- common/task_executor/src/lib.rs | 2 +- 6 files changed, 181 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 012c1cdf1f7..ffbf66336cf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -112,6 +112,10 @@ pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); /// Defines how old a block can be before it's no longer a candidate for the early attester cache. const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; +/// Reported to the user when the justified block has an invalid execution payload. +pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = + "Justified block has an invalid execution payload."; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -3203,13 +3207,12 @@ impl BeaconChain { "latest_root" => ?latest_root, ); - // Allow fork choice to invalidate blocks. - let result = self + // Update fork choice. + if let Err(e) = self .fork_choice .write() - .on_invalid_execution_payload(latest_root, latest_valid_hash); - - if let Err(e) = &result { + .on_invalid_execution_payload(latest_root, latest_valid_hash) + { crit!( self.log, "Failed to process invalid payload"; @@ -3221,7 +3224,16 @@ impl BeaconChain { // Run fork choice since it's possible that the payload invalidation might result in a new // head. - self.fork_choice()?; + // + // Don't return early though, since invalidating the justified checkpoint might cause an + // error here. + if let Err(e) = self.fork_choice() { + crit!( + self.log, + "Failed to run fork choice routine"; + "error" => ?e, + ); + } // Check to ensure the justified checkpoint does not have an invalid payload. If so, try // to kill the client. @@ -3229,15 +3241,22 @@ impl BeaconChain { let justified_root = head_info.current_justified_checkpoint.root; if let Some(proto_block) = self.fork_choice.read().get_block(&justified_root) { if proto_block.execution_status.is_invalid() { + crit!( + self.log, + "The justified checkpoint is invalid"; + "msg" => "ensure you are not connected to a malicious network. this error is not \ + recoverable, please reach out to the developers for assistance." + ); + let mut shutdown_sender = self.shutdown_sender(); shutdown_sender .try_send(ShutdownReason::Failure( - "Justified block has an invalid execution payload.", + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, )) .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; // Return an error here to try and prevent progression by upstream functions. - return Err(Error::JustifiedHasInvalidPayload { justified_root }); + return Err(Error::JustifiedPayloadInvalid { justified_root }); } } else { crit!( @@ -3246,7 +3265,7 @@ impl BeaconChain { ); } - result.map_err(Into::into) + Ok(()) } /// Execute the fork choice algorithm and enthrone the result as the canonical head. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7ac06a86d7e..7aea717877d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -143,7 +143,7 @@ pub enum BeaconChainError { HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayloadShutdownError(TrySendError), - JustifiedHasInvalidPayload { + JustifiedPayloadInvalid { justified_root: Hash256, }, } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d41c1a5cc52..74649bdee36 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -41,7 +41,7 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, HeadInfo, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped, - MAXIMUM_GOSSIP_CLOCK_DISPARITY, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 574895296dd..0142a0f0bb1 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -432,7 +432,7 @@ where spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, - shutdown_receiver, + shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), mock_execution_layer: self.mock_execution_layer, execution_layer_runtime: self.execution_layer_runtime, rng: make_rng(), @@ -449,7 +449,7 @@ pub struct BeaconChainHarness { pub chain: Arc>, pub spec: ChainSpec, - pub shutdown_receiver: Receiver, + pub shutdown_receiver: Arc>>, pub mock_execution_layer: Option>, pub execution_layer_runtime: Option, @@ -502,6 +502,17 @@ where epoch.start_slot(E::slots_per_epoch()).into() } + pub fn shutdown_reasons(&self) -> Vec { + let mutex = self.shutdown_receiver.clone(); + let mut receiver = mutex.lock(); + std::iter::from_fn(move || match receiver.try_next() { + Ok(Some(s)) => Some(s), + Ok(None) => panic!("shutdown sender dropped"), + Err(_) => None, + }) + .collect() + } + pub fn get_current_state(&self) -> BeaconState { self.chain.head().unwrap().beacon_state } diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 80ef3205fef..e10d3be769a 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,15 +2,18 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BlockError, ExecutionPayloadError, + BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; +use proto_array::ExecutionStatus; +use task_executor::ShutdownReason; use types::*; const VALIDATOR_COUNT: usize = 32; type E = MainnetEthSpec; -#[derive(PartialEq)] +#[derive(PartialEq, Clone)] enum Payload { Valid, Invalid { latest_valid_hash: Option }, @@ -19,6 +22,7 @@ enum Payload { struct InvalidPayloadRig { harness: BeaconChainHarness>, + enable_attestations: bool, } impl InvalidPayloadRig { @@ -37,7 +41,15 @@ impl InvalidPayloadRig { // Move to slot 1. harness.advance_slot(); - Self { harness } + Self { + harness, + enable_attestations: false, + } + } + + fn enable_attestations(mut self) -> Self { + self.enable_attestations = true; + self } fn block_hash(&self, block_root: Hash256) -> Hash256 { @@ -53,10 +65,24 @@ impl InvalidPayloadRig { .block_hash } + fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { + self.harness + .chain + .fork_choice + .read() + .get_block(&block_root) + .unwrap() + .execution_status + } + fn fork_choice(&self) { self.harness.chain.fork_choice().unwrap(); } + fn head_info(&self) -> HeadInfo { + self.harness.chain.head_info().unwrap() + } + fn move_to_terminal_block(&self) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); mock_execution_layer @@ -66,13 +92,36 @@ impl InvalidPayloadRig { .unwrap(); } + fn move_to_first_justification(&mut self, is_valid: Payload) { + let slots_till_justification = E::slots_per_epoch() * 3; + for _ in 0..slots_till_justification { + self.import_block(is_valid.clone()); + } + + let justified_checkpoint = self.head_info().current_justified_checkpoint; + assert_eq!(justified_checkpoint.epoch, 2); + } + fn import_block(&mut self, is_valid: Payload) -> Hash256 { + self.import_block_parametric(is_valid, |error| { + matches!( + error, + BlockError::ExecutionPayloadError(ExecutionPayloadError::RejectedByExecutionEngine) + ) + }) + } + + fn import_block_parametric) -> bool>( + &mut self, + is_valid: Payload, + evaluate_error: F, + ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let head = self.harness.chain.head().unwrap(); let state = head.beacon_state; let slot = state.slot() + 1; - let (block, _post_state) = self.harness.make_block(state, slot); + let (block, post_state) = self.harness.make_block(state, slot); let block_root = block.canonical_root(); match is_valid { @@ -84,14 +133,18 @@ impl InvalidPayloadRig { } let root = self.harness.process_block(slot, block.clone()).unwrap(); - let execution_status = self - .harness - .chain - .fork_choice - .read() - .get_block(&root.into()) - .unwrap() - .execution_status; + if self.enable_attestations { + let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); + self.harness.attest_block( + &post_state, + block.state_root(), + block_root.into(), + &block, + &all_validators, + ); + } + + let execution_status = self.execution_status(root.into()); match is_valid { Payload::Syncing => assert!(execution_status.is_not_verified()), @@ -114,9 +167,7 @@ impl InvalidPayloadRig { .all_payloads_invalid(latest_valid_hash); match self.harness.process_block(slot, block) { - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::RejectedByExecutionEngine, - )) => (), + Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("expected invalid payload, got {:?}", other) } @@ -144,7 +195,7 @@ impl InvalidPayloadRig { } #[test] -fn payload_valid_invalid_syncing() { +fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); @@ -155,6 +206,76 @@ fn payload_valid_invalid_syncing() { rig.import_block(Payload::Syncing); } +#[test] +fn invalid_payload_invalidates_parent() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + + let roots = vec![ + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing), + ]; + + let latest_valid_hash = rig.block_hash(roots[0]); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + assert!(rig.execution_status(roots[0]).is_valid()); + assert!(rig.execution_status(roots[1]).is_invalid()); + assert!(rig.execution_status(roots[2]).is_invalid()); + + assert_eq!(rig.head_info().block_root, roots[0]); +} + +#[test] +fn justified_checkpoint_becomes_invalid() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.move_to_first_justification(Payload::Syncing); + + let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let parent_root_of_justified = rig + .harness + .chain + .get_block(&justified_checkpoint.root) + .unwrap() + .unwrap() + .parent_root(); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + // Import a block that will invalidate the justified checkpoint. + rig.import_block_parametric( + Payload::Invalid { + latest_valid_hash: Some(parent_root_of_justified), + }, + |error| { + matches!( + error, + // The block import should fail since the beacon chain knows the justified payload + // is invalid. + BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) + ) + }, + ); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON + )] + ); +} + +/* + * TODO: add a test where the latest_valid_hash is a pre-finalization hash. + */ + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 6874966abd1..2d3e941a3eb 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,7 +7,7 @@ use std::sync::Weak; use tokio::runtime::Runtime; /// Provides a reason when Lighthouse is shut down. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { /// The node shut down successfully. Success(&'static str), From 6cd0ac175c065dae429c82373a48c2d698c21356 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 9 Feb 2022 07:56:59 +1100 Subject: [PATCH 16/92] Add test for pre-finalized invalid --- .../tests/payload_invalidation.rs | 56 ++++++++++++++++--- consensus/proto_array/src/proto_array.rs | 3 + 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e10d3be769a..c01377a5443 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,7 +2,7 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, + BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use proto_array::ExecutionStatus; @@ -92,11 +92,15 @@ impl InvalidPayloadRig { .unwrap(); } - fn move_to_first_justification(&mut self, is_valid: Payload) { - let slots_till_justification = E::slots_per_epoch() * 3; - for _ in 0..slots_till_justification { + fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) { + for _ in 0..num_blocks { self.import_block(is_valid.clone()); } + } + + fn move_to_first_justification(&mut self, is_valid: Payload) { + let slots_till_justification = E::slots_per_epoch() * 3; + self.build_blocks(slots_till_justification, is_valid); let justified_checkpoint = self.head_info().current_justified_checkpoint; assert_eq!(justified_checkpoint.epoch, 2); @@ -272,9 +276,47 @@ fn justified_checkpoint_becomes_invalid() { ); } -/* - * TODO: add a test where the latest_valid_hash is a pre-finalization hash. - */ +#[test] +fn ancient_latest_valid_hash() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.build_blocks(E::slots_per_epoch() * 4, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, 2); + + let ancient_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + // Import a block that will invalidate the justified checkpoint. + rig.import_block_parametric( + Payload::Invalid { + latest_valid_hash: Some(ancient_block_root), + }, + |error| { + matches!( + error, + // The block import should fail since the beacon chain knows the justified payload + // is invalid. + BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) + ) + }, + ); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON + )] + ); +} #[test] fn invalid_during_processing() { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 53471874ec9..cbc9c75655f 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -368,6 +368,9 @@ impl ProtoArray { if let Some(parent_index) = node.parent { index = parent_index } else { + // The root of the block tree has been reached (aka the finalized block), without + // matching `latest_valid_ancestor_hash`. It's not possible or useful to go any + // further back: the finalized checkpoint is invalid so all is lost! break; } } From b6453f7cf77b8509384bc9cd729a6cc2ba547ce6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 9 Feb 2022 15:32:24 +1100 Subject: [PATCH 17/92] Add another test --- .../tests/payload_invalidation.rs | 68 +++++++++++++++---- consensus/proto_array/src/proto_array.rs | 2 - 2 files changed, 56 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index c01377a5443..41f6cc30f4b 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -92,10 +92,10 @@ impl InvalidPayloadRig { .unwrap(); } - fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) { - for _ in 0..num_blocks { - self.import_block(is_valid.clone()); - } + fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + (0..num_blocks) + .map(|_| self.import_block(is_valid.clone())) + .collect() } fn move_to_first_justification(&mut self, is_valid: Payload) { @@ -115,6 +115,13 @@ impl InvalidPayloadRig { }) } + fn block_root_at_slot(&self, slot: Slot) -> Option { + self.harness + .chain + .block_root_at_slot(slot, WhenSlotSkipped::None) + .unwrap() + } + fn import_block_parametric) -> bool>( &mut self, is_valid: Payload, @@ -277,19 +284,14 @@ fn justified_checkpoint_becomes_invalid() { } #[test] -fn ancient_latest_valid_hash() { +fn pre_finalized_latest_valid_hash() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); rig.build_blocks(E::slots_per_epoch() * 4, Payload::Syncing); assert_eq!(rig.head_info().finalized_checkpoint.epoch, 2); - let ancient_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); + let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -297,7 +299,7 @@ fn ancient_latest_valid_hash() { // Import a block that will invalidate the justified checkpoint. rig.import_block_parametric( Payload::Invalid { - latest_valid_hash: Some(ancient_block_root), + latest_valid_hash: Some(pre_finalized_block_root), }, |error| { matches!( @@ -318,6 +320,48 @@ fn ancient_latest_valid_hash() { ); } +/* + * TODO: test with a junk `latest_valid_hash`. + */ + +#[test] +fn latest_valid_hash_will_validate() { + const LATEST_VALID_SLOT: u64 = 3; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(4, Payload::Syncing); + + let latest_valid_root = rig + .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) + .unwrap(); + let latest_valid_hash = rig.block_hash(latest_valid_root); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + + for slot in 0..=4 { + let slot = Slot::new(slot); + let root = if slot > 0 { + // If not the genesis slot, check the blocks we just produced. + blocks[slot.as_usize() - 1] + } else { + // Genesis slot + rig.block_root_at_slot(slot).unwrap() + }; + let execution_status = rig.execution_status(root); + + if slot > LATEST_VALID_SLOT { + assert!(execution_status.is_invalid()) + } else { + assert!(execution_status.is_valid()) + } + } +} + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index cbc9c75655f..51b78edc9e9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -295,8 +295,6 @@ impl ProtoArray { } // An ancestor of the valid payload was invalid. This is a serious error which // indicates a consensus failure in the execution node. This is unrecoverable. - // - // TODO(paul): relax this? ExecutionStatus::Invalid(ancestor_payload_block_hash) => { return Err(Error::InvalidAncestorOfValidPayload { ancestor_block_root: node.root, From a88815ff7dd0c5e039ebfc46751527c1ff0e8667 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 10 Feb 2022 17:39:34 +1100 Subject: [PATCH 18/92] Add handling for unknown latest valid hash --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 ++ .../tests/payload_invalidation.rs | 86 +++++++++++++------ consensus/fork_choice/src/fork_choice.rs | 9 ++ consensus/proto_array/src/error.rs | 4 + consensus/proto_array/src/proto_array.rs | 60 ++++++++++++- .../src/proto_array_fork_choice.rs | 28 +++--- 6 files changed, 148 insertions(+), 46 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ffbf66336cf..906a9ba793f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3239,6 +3239,13 @@ impl BeaconChain { // to kill the client. let head_info = self.head_info()?; let justified_root = head_info.current_justified_checkpoint.root; + // De-alias 0x00..00 to the genesis block. + let justified_root = if justified_root == Hash256::zero() { + self.genesis_block_root + } else { + justified_root + }; + if let Some(proto_block) = self.fork_choice.read().get_block(&justified_root) { if proto_block.execution_status.is_invalid() { crit!( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 41f6cc30f4b..5ef85b26b29 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -180,7 +180,7 @@ impl InvalidPayloadRig { match self.harness.process_block(slot, block) { Err(error) if evaluate_error(&error) => (), Err(other) => { - panic!("expected invalid payload, got {:?}", other) + panic!("evaluate_error returned false with {:?}", other) } Ok(_) => panic!("block with invalid payload was imported"), }; @@ -205,6 +205,7 @@ impl InvalidPayloadRig { } } +/// Simple test of the different import types. #[test] fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); @@ -217,6 +218,8 @@ fn valid_invalid_syncing() { rig.import_block(Payload::Syncing); } +/// Ensure that an invalid payload can invalidate its parent too (give then right +/// `latest_valid_hash`. #[test] fn invalid_payload_invalidates_parent() { let mut rig = InvalidPayloadRig::new(); @@ -241,6 +244,7 @@ fn invalid_payload_invalidates_parent() { assert_eq!(rig.head_info().block_root, roots[0]); } +/// Ensure the client tries to exit when the justified checkpoint is invalidated. #[test] fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -255,6 +259,7 @@ fn justified_checkpoint_becomes_invalid() { .unwrap() .unwrap() .parent_root(); + let parent_hash_of_justified = rig.block_hash(parent_root_of_justified); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -262,7 +267,7 @@ fn justified_checkpoint_becomes_invalid() { // Import a block that will invalidate the justified checkpoint. rig.import_block_parametric( Payload::Invalid { - latest_valid_hash: Some(parent_root_of_justified), + latest_valid_hash: Some(parent_hash_of_justified), }, |error| { matches!( @@ -283,13 +288,17 @@ fn justified_checkpoint_becomes_invalid() { ); } +/// Ensure that a `latest_valid_hash` for a pre-finality block only revert a single block. #[test] fn pre_finalized_latest_valid_hash() { + let num_blocks = E::slots_per_epoch() * 4; + let finalized_epoch = 2; + let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.build_blocks(E::slots_per_epoch() * 4, Payload::Syncing); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, 2); + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); @@ -297,33 +306,28 @@ fn pre_finalized_latest_valid_hash() { assert!(rig.harness.shutdown_reasons().is_empty()); // Import a block that will invalidate the justified checkpoint. - rig.import_block_parametric( - Payload::Invalid { - latest_valid_hash: Some(pre_finalized_block_root), - }, - |error| { - matches!( - error, - // The block import should fail since the beacon chain knows the justified payload - // is invalid. - BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) - ) - }, - ); + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(pre_finalized_block_root), + }); - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON - )] - ); -} + // The latest imported block should be the head. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); -/* - * TODO: test with a junk `latest_valid_hash`. - */ + // The beacon chain should *not* have triggered a shutdown. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // All blocks should still be unverified. + for i in E::slots_per_epoch() * finalized_epoch..num_blocks { + let slot = Slot::new(i); + let root = rig.block_root_at_slot(slot).unwrap(); + assert!(rig.execution_status(root).is_not_verified()); + } +} + +/// Ensure that a `latest_valid_hash` will: +/// +/// - Invalidate descendants of `latest_valid_root`. +/// - Validate `latest_valid_root` and its ancestors. #[test] fn latest_valid_hash_will_validate() { const LATEST_VALID_SLOT: u64 = 3; @@ -362,6 +366,32 @@ fn latest_valid_hash_will_validate() { } } +/// Check behaviour when the `latest_valid_hash` is a junk value. +#[test] +fn latest_valid_hash_is_junk() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.build_blocks(E::slots_per_epoch() * 4, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, 2); + + // No service should have triggered a shutdown, yet. + assert!(rig.harness.shutdown_reasons().is_empty()); + + let junk_hash = Hash256::from_low_u64_be(42); + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(junk_hash), + }); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON + )] + ); +} + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 463cadec245..868549b20bd 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -886,6 +886,15 @@ where } } + /// See `ProtoArray` documentation. + pub fn execution_block_hash_to_beacon_block_root<'a>( + &'a self, + block_hash: &Hash256, + ) -> Option { + self.proto_array + .execution_block_hash_to_beacon_block_root(block_hash) + } + /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { self.proto_array diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 185fbf9ce89..cdf80a9accb 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -37,6 +37,10 @@ pub enum Error { InvalidJustifiedCheckpointExecutionStatus { justified_root: Hash256, }, + UnknownLatestValidAncestorHash { + block_root: Hash256, + latest_valid_ancestor_hash: Hash256, + }, } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 51b78edc9e9..f8c06b33b6e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -319,6 +319,16 @@ impl ProtoArray { .ok_or(Error::NodeUnknown(head_block_root))?; let first_potential_descendant = index + 1; + // Set to `true` if both conditions are satisfied: + // + // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` + // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. + let latest_valid_ancestor_is_descendant = self + .execution_block_hash_to_beacon_block_root(&latest_valid_ancestor_hash) + .map_or(false, |ancestor_root| { + self.is_descendant(ancestor_root, head_block_root) + }); + // Collect all *ancestors* which were declared invalid since they reside between the // `invalid_root` and the `latest_valid_ancestor_root`. loop { @@ -331,7 +341,19 @@ impl ProtoArray { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) | ExecutionStatus::Unknown(hash) => { - if hash == latest_valid_ancestor_hash { + // If we're no longer processing the `head_block_root` and the last valid + // ancestor is known, exit now with an error. + // + // In effect, this means that if an unknown hash (junk or pre-finalization) is + // supplied, we only invalidate a single block and no ancestors. The alternative + // is to invalidate *all* ancestors, which would likely involve shutting down + // the client due to an invalid justified checkpoint. + if !latest_valid_ancestor_is_descendant && node.root != head_block_root { + return Err(Error::UnknownLatestValidAncestorHash { + block_root: node.root, + latest_valid_ancestor_hash, + }); + } else if hash == latest_valid_ancestor_hash { // It might be new knowledge that this block is valid, ensure that it and all // ancestors are marked as valid. self.propagate_execution_payload_validation(index)?; @@ -689,6 +711,42 @@ impl ProtoArray { self.iter_nodes(block_root) .map(|node| (node.root, node.slot)) } + + /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always + /// returns `false` if either input roots are unknown. + /// + /// ## Notes + /// + /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.indices + .get(&ancestor_root) + .and_then(|ancestor_index| self.nodes.get(*ancestor_index)) + .and_then(|ancestor| { + self.iter_block_roots(&descendant_root) + .take_while(|(_root, slot)| *slot >= ancestor.slot) + .find(|(_root, slot)| *slot == ancestor.slot) + .map(|(root, _slot)| root == ancestor_root) + }) + .unwrap_or(false) + } + + /// Returns the first *beacon block root* which contains an execution payload with the given + /// `block_hash`, if any. + pub fn execution_block_hash_to_beacon_block_root<'a>( + &'a self, + block_hash: &Hash256, + ) -> Option { + self.nodes + .iter() + .rev() + .find(|node| { + node.execution_status + .block_hash() + .map_or(false, |node_block_hash| node_block_hash == *block_hash) + }) + .map(|node| node.root) + } } /// A helper method to calculate the proposer boost based on the given `validator_balances`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index c27e14dca01..cc0548ea256 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -289,25 +289,10 @@ impl ProtoArrayForkChoice { } } - /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always - /// returns `false` if either input roots are unknown. - /// - /// ## Notes - /// - /// Still returns `true` if `ancestor_root` is known and `ancestor_root == descendant_root`. + /// See `ProtoArray` documentation. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.proto_array - .indices - .get(&ancestor_root) - .and_then(|ancestor_index| self.proto_array.nodes.get(*ancestor_index)) - .and_then(|ancestor| { - self.proto_array - .iter_block_roots(&descendant_root) - .take_while(|(_root, slot)| *slot >= ancestor.slot) - .find(|(_root, slot)| *slot == ancestor.slot) - .map(|(root, _slot)| root == ancestor_root) - }) - .unwrap_or(false) + .is_descendant(ancestor_root, descendant_root) } pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { @@ -347,6 +332,15 @@ impl ProtoArrayForkChoice { pub fn core_proto_array_mut(&mut self) -> &mut ProtoArray { &mut self.proto_array } + + /// See `ProtoArray` documentation. + pub fn execution_block_hash_to_beacon_block_root<'a>( + &'a self, + block_hash: &Hash256, + ) -> Option { + self.proto_array + .execution_block_hash_to_beacon_block_root(block_hash) + } } /// Returns a list of `deltas`, where there is one delta for each of the indices in From 679b8becfe87e6c11bca7d5bff046bd5d4ef838b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 10 Feb 2022 17:43:00 +1100 Subject: [PATCH 19/92] Fix failing test, tidy --- .../tests/payload_invalidation.rs | 26 ++++++++++++------- consensus/fork_choice/src/fork_choice.rs | 9 ------- consensus/proto_array/src/proto_array.rs | 4 +-- .../src/proto_array_fork_choice.rs | 9 ------- 4 files changed, 19 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 5ef85b26b29..507f7c0080a 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -369,11 +369,14 @@ fn latest_valid_hash_will_validate() { /// Check behaviour when the `latest_valid_hash` is a junk value. #[test] fn latest_valid_hash_is_junk() { + let num_blocks = E::slots_per_epoch() * 5; + let finalized_epoch = 3; + let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.build_blocks(E::slots_per_epoch() * 4, Payload::Syncing); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, 2); + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -383,13 +386,18 @@ fn latest_valid_hash_is_junk() { latest_valid_hash: Some(junk_hash), }); - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON - )] - ); + // The latest imported block should be the head. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // The beacon chain should *not* have triggered a shutdown. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // All blocks should still be unverified. + for i in E::slots_per_epoch() * finalized_epoch..num_blocks { + let slot = Slot::new(i); + let root = rig.block_root_at_slot(slot).unwrap(); + assert!(rig.execution_status(root).is_not_verified()); + } } #[test] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 868549b20bd..463cadec245 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -886,15 +886,6 @@ where } } - /// See `ProtoArray` documentation. - pub fn execution_block_hash_to_beacon_block_root<'a>( - &'a self, - block_hash: &Hash256, - ) -> Option { - self.proto_array - .execution_block_hash_to_beacon_block_root(block_hash) - } - /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { self.proto_array diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f8c06b33b6e..176d68e7970 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -733,8 +733,8 @@ impl ProtoArray { /// Returns the first *beacon block root* which contains an execution payload with the given /// `block_hash`, if any. - pub fn execution_block_hash_to_beacon_block_root<'a>( - &'a self, + pub fn execution_block_hash_to_beacon_block_root( + &self, block_hash: &Hash256, ) -> Option { self.nodes diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index cc0548ea256..30294b160df 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -332,15 +332,6 @@ impl ProtoArrayForkChoice { pub fn core_proto_array_mut(&mut self) -> &mut ProtoArray { &mut self.proto_array } - - /// See `ProtoArray` documentation. - pub fn execution_block_hash_to_beacon_block_root<'a>( - &'a self, - block_hash: &Hash256, - ) -> Option { - self.proto_array - .execution_block_hash_to_beacon_block_root(block_hash) - } } /// Returns a list of `deltas`, where there is one delta for each of the indices in From da4a54f048664a4f5828c0ef8c120624b63e22b8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 11 Feb 2022 10:19:28 +1100 Subject: [PATCH 20/92] Fix compile error in tests --- consensus/fork_choice/tests/tests.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 42b56f6abf0..fc431f29aa7 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -122,17 +122,23 @@ impl ForkChoiceTest { } /// Assert there was a shutdown signal sent by the beacon chain. - pub fn assert_shutdown_signal_sent(mut self) -> Self { - self.harness.shutdown_receiver.close(); - let msg = self.harness.shutdown_receiver.try_next().unwrap(); + pub fn assert_shutdown_signal_sent(self) -> Self { + let mutex = self.harness.shutdown_receiver.clone(); + let mut shutdown_receiver = mutex.lock(); + + shutdown_receiver.close(); + let msg = shutdown_receiver.try_next().unwrap(); assert!(msg.is_some()); self } /// Assert no shutdown was signal sent by the beacon chain. - pub fn assert_shutdown_signal_not_sent(mut self) -> Self { - self.harness.shutdown_receiver.close(); - let msg = self.harness.shutdown_receiver.try_next().unwrap(); + pub fn assert_shutdown_signal_not_sent(self) -> Self { + let mutex = self.harness.shutdown_receiver.clone(); + let mut shutdown_receiver = mutex.lock(); + + shutdown_receiver.close(); + let msg = shutdown_receiver.try_next().unwrap(); assert!(msg.is_none()); self } From 4de63cb137e3c9f494be006b126ac8c4dd035c25 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 10 Feb 2022 21:46:20 -0600 Subject: [PATCH 21/92] Update Execution API to v1.0.0-alpha.6 --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++- beacon_node/beacon_chain/src/errors.rs | 1 + .../beacon_chain/src/execution_payload.rs | 17 +- beacon_node/execution_layer/src/engine_api.rs | 20 +- .../execution_layer/src/engine_api/http.rs | 76 ++++---- .../src/engine_api/json_structures.rs | 99 +++++----- beacon_node/execution_layer/src/engines.rs | 8 +- beacon_node/execution_layer/src/lib.rs | 181 +++++++++++++----- .../test_utils/execution_block_generator.rs | 17 +- .../src/test_utils/handle_rpc.rs | 24 ++- .../src/test_utils/mock_execution_layer.rs | 6 +- .../execution_layer/src/test_utils/mod.rs | 8 +- 12 files changed, 291 insertions(+), 189 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6fe96540bad..9b2d8386d64 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -52,7 +52,7 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{ EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, }; -use execution_layer::ExecutionLayer; +use execution_layer::{ExecutionLayer, PayloadStatusV1Status}; use fork_choice::{AttestationFromBlock, ForkChoice}; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -3593,6 +3593,12 @@ impl BeaconChain { ) .await { + if let BeaconChainError::ExecutionForkChoiceUpdateInvalidHead(Some( + ref _latest_valid_hashes, + )) = e + { + // TODO(merge): invalidate any invalid ancestors of this block in fork choice. + } debug!( log, "Failed to update execution head"; @@ -3630,14 +3636,25 @@ impl BeaconChain { .map(|ep| ep.block_hash) .unwrap_or_else(Hash256::zero); - execution_layer + let forkchoice_updated_response = execution_layer .notify_forkchoice_updated( head_execution_block_hash, finalized_execution_block_hash, None, ) .await - .map_err(Error::ExecutionForkChoiceUpdateFailed) + .map_err(Error::ExecutionForkChoiceUpdateFailed); + + match forkchoice_updated_response { + Ok((status, latest_valid_hash)) => match status { + PayloadStatusV1Status::Valid | PayloadStatusV1Status::Syncing => Ok(()), + PayloadStatusV1Status::Invalid => Err( + BeaconChainError::ExecutionForkChoiceUpdateInvalidHead(latest_valid_hash), + ), + status => panic!("Unrecognized status from forkchoice_updated: {:?}", status), + }, + Err(e) => Err(e), + } } /// Returns the status of the current head block, regarding the validity of the execution diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 6920c06039d..e82456ffdd4 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -137,6 +137,7 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), + ExecutionForkChoiceUpdateInvalidHead(Option>), BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 21d51be99dc..befe9ec3a54 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::ExecutePayloadResponseStatus; +use execution_layer::PayloadStatusV1Status; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -53,17 +53,18 @@ pub fn execute_payload( .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let execute_payload_response = execution_layer - .block_on(|execution_layer| execution_layer.execute_payload(execution_payload)); + let new_payload_response = execution_layer + .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); - match execute_payload_response { + match new_payload_response { Ok((status, _latest_valid_hash)) => match status { - ExecutePayloadResponseStatus::Valid => Ok(PayloadVerificationStatus::Verified), - // TODO(merge): invalidate any invalid ancestors of this block in fork choice. - ExecutePayloadResponseStatus::Invalid => { + PayloadStatusV1Status::Valid => Ok(PayloadVerificationStatus::Verified), + PayloadStatusV1Status::Invalid => { + // TODO(merge): invalidate any invalid ancestors of this block in fork choice. Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) } - ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), + PayloadStatusV1Status::Syncing => Ok(PayloadVerificationStatus::NotVerified), + status => panic!("Unrecognized status from new_payload: {:?}", status), }, Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index f9654a497bc..d6877b13a26 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -55,10 +55,10 @@ pub trait EngineApi { block_hash: Hash256, ) -> Result, Error>; - async fn execute_payload_v1( + async fn new_payload_v1( &self, execution_payload: ExecutionPayload, - ) -> Result; + ) -> Result; async fn get_payload_v1( &self, @@ -73,15 +73,18 @@ pub trait EngineApi { } #[derive(Clone, Copy, Debug, PartialEq)] -pub enum ExecutePayloadResponseStatus { +pub enum PayloadStatusV1Status { Valid, Invalid, Syncing, + Accepted, + InvalidBlockHash, + InvalidTerminalBlock, } #[derive(Clone, Debug, PartialEq)] -pub struct ExecutePayloadResponse { - pub status: ExecutePayloadResponseStatus, +pub struct PayloadStatusV1 { + pub status: PayloadStatusV1Status, pub latest_valid_hash: Option, pub validation_error: Option, } @@ -110,13 +113,8 @@ pub struct PayloadAttributes { pub suggested_fee_recipient: Address, } -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum ForkchoiceUpdatedResponseStatus { - Success, - Syncing, -} #[derive(Clone, Debug, PartialEq)] pub struct ForkchoiceUpdatedResponse { - pub status: ForkchoiceUpdatedResponseStatus, + pub payload_status: PayloadStatusV1, pub payload_id: Option, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index c7c60a90062..9e0d6348e04 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -27,8 +27,8 @@ pub const ETH_GET_BLOCK_BY_HASH_TIMEOUT: Duration = Duration::from_secs(1); pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); -pub const ENGINE_EXECUTE_PAYLOAD_V1: &str = "engine_executePayloadV1"; -pub const ENGINE_EXECUTE_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; +pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); @@ -133,18 +133,14 @@ impl EngineApi for HttpJsonRpc { .await } - async fn execute_payload_v1( + async fn new_payload_v1( &self, execution_payload: ExecutionPayload, - ) -> Result { + ) -> Result { let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); - let response: JsonExecutePayloadV1Response = self - .rpc_request( - ENGINE_EXECUTE_PAYLOAD_V1, - params, - ENGINE_EXECUTE_PAYLOAD_TIMEOUT, - ) + let response: JsonPayloadStatusV1 = self + .rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT) .await?; Ok(response.into()) @@ -486,12 +482,12 @@ mod test { } #[tokio::test] - async fn execute_payload_v1_request() { + async fn new_payload_v1_request() { Tester::new() .assert_request_equals( |client| async move { let _ = client - .execute_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload { parent_hash: Hash256::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), @@ -512,7 +508,7 @@ mod test { json!({ "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, - "method": ENGINE_EXECUTE_PAYLOAD_V1, + "method": ENGINE_NEW_PAYLOAD_V1, "params": [{ "parentHash": HASH_00, "feeRecipient": ADDRESS_01, @@ -627,7 +623,11 @@ mod test { "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, "result": { - "status": "SUCCESS", + "payloadStatus": { + "status": "VALID", + "latestValidHash": HASH_00, + "validationError": "" + }, "payloadId": "0xa247243752eb10b4" } })], @@ -648,7 +648,11 @@ mod test { .await .unwrap(); assert_eq!(response, ForkchoiceUpdatedResponse { - status: ForkchoiceUpdatedResponseStatus::Success, + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(Hash256::zero()), + validation_error: Some(String::new()), + }, payload_id: Some(str_to_payload_id("0xa247243752eb10b4")), }); @@ -683,12 +687,12 @@ mod test { "logsBloom": LOGS_BLOOM_00, "random": HASH_00, "blockNumber":"0x1", - "gasLimit":"0x1c9c380", + "gasLimit":"0x1c95111", "gasUsed":"0x0", "timestamp":"0x5", "extraData":"0x", "baseFeePerGas":"0x7", - "blockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "blockHash":"0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c", "transactions":[] } })], @@ -706,12 +710,12 @@ mod test { logs_bloom: vec![0; 256].into(), random: Hash256::zero(), block_number: 1, - gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), + gas_limit: u64::from_str_radix("1c95111",16).unwrap(), gas_used: 0, timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + block_hash: Hash256::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), }; @@ -720,10 +724,10 @@ mod test { ) .await .assert_request_equals( - // engine_executePayloadV1 REQUEST validation + // engine_newPayloadV1 REQUEST validation |client| async move { let _ = client - .execute_payload_v1::(ExecutionPayload { + .new_payload_v1::(ExecutionPayload { parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), @@ -744,7 +748,7 @@ mod test { json!({ "id": STATIC_ID, "jsonrpc": JSONRPC_VERSION, - "method": ENGINE_EXECUTE_PAYLOAD_V1, + "method": ENGINE_NEW_PAYLOAD_V1, "params": [{ "parentHash":"0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", "feeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", @@ -765,26 +769,27 @@ mod test { ) .await .with_preloaded_responses( - // engine_executePayloadV1 RESPONSE validation + // engine_newPayloadV1 RESPONSE validation vec![json!({ "jsonrpc": JSONRPC_VERSION, "id": STATIC_ID, "result":{ "status":"VALID", - "latestValidHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858" + "latestValidHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "validationError":"", } })], |client| async move { let response = client - .execute_payload_v1::(ExecutionPayload::default()) + .new_payload_v1::(ExecutionPayload::default()) .await .unwrap(); assert_eq!(response, - ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), - validation_error: None + validation_error: Some(String::new()), } ); }, @@ -819,14 +824,15 @@ mod test { .await .with_preloaded_responses( // engine_forkchoiceUpdatedV1 RESPONSE validation - // - // Note: this test was modified to provide `null` rather than `0x`. The geth vectors - // are invalid. vec![json!({ "jsonrpc": JSONRPC_VERSION, "id": STATIC_ID, "result": { - "status":"SUCCESS", + "payloadStatus": { + "status": "VALID", + "latestValidHash": HASH_00, + "validationError": "" + }, "payloadId": JSON_NULL, } })], @@ -843,7 +849,11 @@ mod test { .await .unwrap(); assert_eq!(response, ForkchoiceUpdatedResponse { - status: ForkchoiceUpdatedResponseStatus::Success, + payload_status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(Hash256::zero()), + validation_error: Some(String::new()), + }, payload_id: None, }); }, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index ae6d730fa5a..4a604fdea6a 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -247,47 +247,60 @@ impl From for ForkChoiceState { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonExecutePayloadV1ResponseStatus { +pub enum JsonPayloadStatusV1Status { Valid, Invalid, Syncing, + Accepted, + InvalidBlockHash, + InvalidTerminalBlock, } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct JsonExecutePayloadV1Response { - pub status: JsonExecutePayloadV1ResponseStatus, +pub struct JsonPayloadStatusV1 { + pub status: JsonPayloadStatusV1Status, pub latest_valid_hash: Option, pub validation_error: Option, } -impl From for JsonExecutePayloadV1ResponseStatus { - fn from(e: ExecutePayloadResponseStatus) -> Self { +impl From for JsonPayloadStatusV1Status { + fn from(e: PayloadStatusV1Status) -> Self { match e { - ExecutePayloadResponseStatus::Valid => JsonExecutePayloadV1ResponseStatus::Valid, - ExecutePayloadResponseStatus::Invalid => JsonExecutePayloadV1ResponseStatus::Invalid, - ExecutePayloadResponseStatus::Syncing => JsonExecutePayloadV1ResponseStatus::Syncing, + PayloadStatusV1Status::Valid => JsonPayloadStatusV1Status::Valid, + PayloadStatusV1Status::Invalid => JsonPayloadStatusV1Status::Invalid, + PayloadStatusV1Status::Syncing => JsonPayloadStatusV1Status::Syncing, + PayloadStatusV1Status::Accepted => JsonPayloadStatusV1Status::Accepted, + PayloadStatusV1Status::InvalidBlockHash => JsonPayloadStatusV1Status::InvalidBlockHash, + PayloadStatusV1Status::InvalidTerminalBlock => { + JsonPayloadStatusV1Status::InvalidTerminalBlock + } } } } -impl From for ExecutePayloadResponseStatus { - fn from(j: JsonExecutePayloadV1ResponseStatus) -> Self { +impl From for PayloadStatusV1Status { + fn from(j: JsonPayloadStatusV1Status) -> Self { match j { - JsonExecutePayloadV1ResponseStatus::Valid => ExecutePayloadResponseStatus::Valid, - JsonExecutePayloadV1ResponseStatus::Invalid => ExecutePayloadResponseStatus::Invalid, - JsonExecutePayloadV1ResponseStatus::Syncing => ExecutePayloadResponseStatus::Syncing, + JsonPayloadStatusV1Status::Valid => PayloadStatusV1Status::Valid, + JsonPayloadStatusV1Status::Invalid => PayloadStatusV1Status::Invalid, + JsonPayloadStatusV1Status::Syncing => PayloadStatusV1Status::Syncing, + JsonPayloadStatusV1Status::Accepted => PayloadStatusV1Status::Accepted, + JsonPayloadStatusV1Status::InvalidBlockHash => PayloadStatusV1Status::InvalidBlockHash, + JsonPayloadStatusV1Status::InvalidTerminalBlock => { + PayloadStatusV1Status::InvalidTerminalBlock + } } } } -impl From for JsonExecutePayloadV1Response { - fn from(e: ExecutePayloadResponse) -> Self { +impl From for JsonPayloadStatusV1 { + fn from(p: PayloadStatusV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ExecutePayloadResponse { + let PayloadStatusV1 { status, latest_valid_hash, validation_error, - } = e; + } = p; Self { status: status.into(), @@ -297,10 +310,10 @@ impl From for JsonExecutePayloadV1Response { } } -impl From for ExecutePayloadResponse { - fn from(j: JsonExecutePayloadV1Response) -> Self { +impl From for PayloadStatusV1 { + fn from(j: JsonPayloadStatusV1) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonExecutePayloadV1Response { + let JsonPayloadStatusV1 { status, latest_valid_hash, validation_error, @@ -314,50 +327,23 @@ impl From for ExecutePayloadResponse { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonForkchoiceUpdatedV1ResponseStatus { - Success, - Syncing, -} #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceUpdatedV1Response { - pub status: JsonForkchoiceUpdatedV1ResponseStatus, + pub payload_status: JsonPayloadStatusV1, pub payload_id: Option, } -impl From for ForkchoiceUpdatedResponseStatus { - fn from(j: JsonForkchoiceUpdatedV1ResponseStatus) -> Self { - match j { - JsonForkchoiceUpdatedV1ResponseStatus::Success => { - ForkchoiceUpdatedResponseStatus::Success - } - JsonForkchoiceUpdatedV1ResponseStatus::Syncing => { - ForkchoiceUpdatedResponseStatus::Syncing - } - } - } -} -impl From for JsonForkchoiceUpdatedV1ResponseStatus { - fn from(f: ForkchoiceUpdatedResponseStatus) -> Self { - match f { - ForkchoiceUpdatedResponseStatus::Success => { - JsonForkchoiceUpdatedV1ResponseStatus::Success - } - ForkchoiceUpdatedResponseStatus::Syncing => { - JsonForkchoiceUpdatedV1ResponseStatus::Syncing - } - } - } -} impl From for ForkchoiceUpdatedResponse { fn from(j: JsonForkchoiceUpdatedV1Response) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let JsonForkchoiceUpdatedV1Response { status, payload_id } = j; + let JsonForkchoiceUpdatedV1Response { + payload_status: status, + payload_id, + } = j; Self { - status: status.into(), + payload_status: status.into(), payload_id: payload_id.map(Into::into), } } @@ -365,10 +351,13 @@ impl From for ForkchoiceUpdatedResponse { impl From for JsonForkchoiceUpdatedV1Response { fn from(f: ForkchoiceUpdatedResponse) -> Self { // Use this verbose deconstruction pattern to ensure no field is left unused. - let ForkchoiceUpdatedResponse { status, payload_id } = f; + let ForkchoiceUpdatedResponse { + payload_status: status, + payload_id, + } = f; Self { - status: status.into(), + payload_status: status.into(), payload_id: payload_id.map(Into::into), } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 5db00d37f6a..03801f3168d 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,6 +1,8 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. -use crate::engine_api::{EngineApi, Error as EngineApiError, PayloadAttributes, PayloadId}; +use crate::engine_api::{ + EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, +}; use futures::future::join_all; use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; @@ -97,7 +99,7 @@ impl Engine { forkchoice_state: ForkChoiceState, payload_attributes: Option, log: &Logger, - ) -> Result, EngineApiError> { + ) -> Result { let response = self .api .forkchoice_updated_v1(forkchoice_state, payload_attributes) @@ -117,7 +119,7 @@ impl Engine { } } - Ok(response.payload_id) + Ok(response) } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cb267e5f0a4..44a9d86d871 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use lru::LruCache; use sensitive_url::SensitiveUrl; use slog::{crit, debug, error, info, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::future::Future; use std::sync::Arc; use std::time::Duration; @@ -21,7 +21,7 @@ use tokio::{ }; use types::{ChainSpec, Epoch, ProposerPreparationData}; -pub use engine_api::{http::HttpJsonRpc, ExecutePayloadResponseStatus}; +pub use engine_api::{http::HttpJsonRpc, PayloadStatusV1Status}; mod engine_api; mod engines; @@ -49,6 +49,7 @@ pub enum Error { NotSynced, ShuttingDown, FeeRecipientUnspecified, + ConsensusFailure, } impl From for Error { @@ -431,7 +432,8 @@ impl ExecutionLayer { Some(payload_attributes), self.log(), ) - .await? + .await + .map(|response| response.payload_id)? .ok_or(ApiError::PayloadIdUnavailable)? }; @@ -441,7 +443,7 @@ impl ExecutionLayer { .map_err(Error::EngineErrors) } - /// Maps to the `engine_executePayload` JSON-RPC call. + /// Maps to the `engine_newPayload` JSON-RPC call. /// /// ## Fallback Behaviour /// @@ -449,17 +451,18 @@ impl ExecutionLayer { /// failure) from all nodes and then return based on the first of these conditions which /// returns true: /// + /// - Error::ConsensusFailure if some nodes return valid and some return invalid /// - Valid, if any nodes return valid. /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. - pub async fn execute_payload( + pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, - ) -> Result<(ExecutePayloadResponseStatus, Option), Error> { + ) -> Result<(PayloadStatusV1Status, Option>), Error> { debug!( self.log(), - "Issuing engine_executePayload"; + "Issuing engine_newPayload"; "parent_hash" => ?execution_payload.parent_hash, "block_hash" => ?execution_payload.block_hash, "block_number" => execution_payload.block_number, @@ -467,46 +470,53 @@ impl ExecutionLayer { let broadcast_results = self .engines() - .broadcast(|engine| engine.api.execute_payload_v1(execution_payload.clone())) + .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; let mut errors = vec![]; let mut valid = 0; let mut invalid = 0; let mut syncing = 0; - let mut invalid_latest_valid_hash = vec![]; + let mut invalid_latest_valid_hash = HashSet::new(); for result in broadcast_results { - match result.map(|response| (response.latest_valid_hash, response.status)) { - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Valid)) => { - if latest_hash == execution_payload.block_hash { - valid += 1; - } else { + match result { + Ok(response) => match (&response.latest_valid_hash, &response.status) { + (Some(latest_hash), &PayloadStatusV1Status::Valid) => { + if latest_hash == &execution_payload.block_hash { + valid += 1; + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse( + format!( + "new_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + execution_payload.block_hash, + latest_hash, + ) + ), + }); + // I've commented out the lines below because this is a malformed response + // I don't think we should treat it the same as an INVALID response + // invalid += 1; + // invalid_latest_valid_hash.insert(*latest_hash); + } + } + (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { invalid += 1; - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "execute_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", - execution_payload.block_hash, - latest_hash, - ) - ), - }); - invalid_latest_valid_hash.push(latest_hash); + invalid_latest_valid_hash.insert(*latest_hash); } - } - Ok((Some(latest_hash), ExecutePayloadResponseStatus::Invalid)) => { - invalid += 1; - invalid_latest_valid_hash.push(latest_hash); - } - Ok((_, ExecutePayloadResponseStatus::Syncing)) => syncing += 1, - Ok((None, status)) => errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "execute_payload: status {:?} returned with null latest_valid_hash", - status - )), - }), + (None, &PayloadStatusV1Status::InvalidBlockHash) + | (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, + (None, &PayloadStatusV1Status::Syncing) + | (None, &PayloadStatusV1Status::Accepted) => syncing += 1, + _ => errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse(format!( + "new_payload: response does not conform to engine API spec: {:?}", + response, + )), + }), + }, Err(e) => errors.push(e), } } @@ -515,19 +525,24 @@ impl ExecutionLayer { crit!( self.log(), "Consensus failure between execution nodes"; - "method" => "execute_payload" + "method" => "new_payload" ); + // In this situation, better to have a failure of liveness than vote on a potentially invalid chain + return Err(Error::ConsensusFailure); } if valid > 0 { Ok(( - ExecutePayloadResponseStatus::Valid, - Some(execution_payload.block_hash), + PayloadStatusV1Status::Valid, + Some(vec![execution_payload.block_hash]), )) } else if invalid > 0 { - Ok((ExecutePayloadResponseStatus::Invalid, None)) + Ok(( + PayloadStatusV1Status::Invalid, + Some(invalid_latest_valid_hash.into_iter().collect()), + )) } else if syncing > 0 { - Ok((ExecutePayloadResponseStatus::Syncing, None)) + Ok((PayloadStatusV1Status::Syncing, None)) } else { Err(Error::EngineErrors(errors)) } @@ -541,14 +556,17 @@ impl ExecutionLayer { /// failure) from all nodes and then return based on the first of these conditions which /// returns true: /// - /// - Ok, if any node returns successfully. + /// - Error::ConsensusFailure if some nodes return valid and some return invalid + /// - Valid, if any nodes return valid. + /// - Invalid, if any nodes return invalid. + /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, head_block_hash: Hash256, finalized_block_hash: Hash256, payload_attributes: Option, - ) -> Result<(), Error> { + ) -> Result<(PayloadStatusV1Status, Option>), Error> { debug!( self.log(), "Issuing engine_forkchoiceUpdated"; @@ -577,13 +595,74 @@ impl ExecutionLayer { }) .await; - if broadcast_results.iter().any(Result::is_ok) { - Ok(()) + let mut errors = vec![]; + let mut valid = 0; + let mut invalid = 0; + let mut syncing = 0; + let mut invalid_latest_valid_hash = HashSet::new(); + for result in broadcast_results { + match result { + Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { + (Some(latest_hash), &PayloadStatusV1Status::Valid) => { + if latest_hash == &head_block_hash { + valid += 1; + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse( + format!( + "forkchoice_updated: payload_status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + *latest_hash, + ) + ), + }); + // I've commented out the lines below because this is a malformed response + // I don't think we should treat it the same as an INVALID response + // invalid += 1; + // invalid_latest_valid_hash.insert(*latest_hash); + } + } + (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { + invalid += 1; + invalid_latest_valid_hash.insert(*latest_hash); + } + (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, + (None, &PayloadStatusV1Status::Syncing) => syncing += 1, + _ => { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse(format!( + "forkchoice_updated: response does not conform to engine API spec: {:?}", + response + )), + }) + } + } + Err(e) => errors.push(e), + } + } + + if valid > 0 && invalid > 0 { + crit!( + self.log(), + "Consensus failure between execution nodes"; + "method" => "forkchoice_updated" + ); + // In this situation, better to have a failure of liveness than vote on a potentially invalid chain + return Err(Error::ConsensusFailure); + } + + if valid > 0 { + Ok((PayloadStatusV1Status::Valid, Some(vec![head_block_hash]))) + } else if invalid > 0 { + Ok(( + PayloadStatusV1Status::Invalid, + Some(invalid_latest_valid_hash.into_iter().collect()), + )) + } else if syncing > 0 { + Ok((PayloadStatusV1Status::Syncing, None)) } else { - let errors = broadcast_results - .into_iter() - .filter_map(Result::err) - .collect(); Err(Error::EngineErrors(errors)) } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 552bea0ea48..bb459297ba3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,6 +1,5 @@ use crate::engine_api::{ - ExecutePayloadResponse, ExecutePayloadResponseStatus, ExecutionBlock, PayloadAttributes, - PayloadId, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }; use crate::engines::ForkChoiceState; use serde::{Deserialize, Serialize}; @@ -235,20 +234,20 @@ impl ExecutionBlockGenerator { self.payload_ids.remove(id) } - pub fn execute_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { + pub fn new_payload(&mut self, payload: ExecutionPayload) -> PayloadStatusV1 { let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { parent } else { - return ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Syncing, + return PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, latest_valid_hash: None, validation_error: None, }; }; if payload.block_number != parent.block_number() + 1 { - return ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Invalid, + return PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, latest_valid_hash: Some(parent.block_hash()), validation_error: Some("invalid block number".to_string()), }; @@ -257,8 +256,8 @@ impl ExecutionBlockGenerator { let valid_hash = payload.block_hash; self.pending_payloads.insert(payload.block_hash, payload); - ExecutePayloadResponse { - status: ExecutePayloadResponseStatus::Valid, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, latest_valid_hash: Some(valid_hash), validation_error: None, } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 131bc8ba0af..746d96e293c 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,5 +1,5 @@ use super::Context; -use crate::engine_api::{http::*, ExecutePayloadResponse, ExecutePayloadResponseStatus}; +use crate::engine_api::{http::*, PayloadStatusV1, PayloadStatusV1Status}; use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; @@ -54,30 +54,30 @@ pub async fn handle_rpc( ) .unwrap()) } - ENGINE_EXECUTE_PAYLOAD_V1 => { + ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_execute_payload_response.lock() { + let response = if let Some(status) = *ctx.static_new_payload_response.lock() { match status { - ExecutePayloadResponseStatus::Valid => ExecutePayloadResponse { + PayloadStatusV1Status::Valid => PayloadStatusV1 { status, latest_valid_hash: Some(request.block_hash), validation_error: None, }, - ExecutePayloadResponseStatus::Syncing => ExecutePayloadResponse { + PayloadStatusV1Status::Syncing => PayloadStatusV1 { status, latest_valid_hash: None, validation_error: None, }, - _ => unimplemented!("invalid static executePayloadResponse"), + _ => unimplemented!("invalid static newPayloadResponse"), } } else { ctx.execution_block_generator .write() - .execute_payload(request.into()) + .new_payload(request.into()) }; - Ok(serde_json::to_value(JsonExecutePayloadV1Response::from(response)).unwrap()) + Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 => { let request: JsonPayloadIdRequest = get_param(params, 0)?; @@ -94,6 +94,8 @@ pub async fn handle_rpc( ENGINE_FORKCHOICE_UPDATED_V1 => { let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; + + let head_block_hash = forkchoice_state.head_block_hash; let id = ctx .execution_block_generator .write() @@ -103,7 +105,11 @@ pub async fn handle_rpc( )?; Ok(serde_json::to_value(JsonForkchoiceUpdatedV1Response { - status: JsonForkchoiceUpdatedV1ResponseStatus::Success, + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid, + latest_valid_hash: Some(head_block_hash), + validation_error: None, + }, payload_id: id.map(Into::into), }) .unwrap()) diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 295e82914bb..0622da473fc 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -146,9 +146,9 @@ impl MockExecutionLayer { assert_eq!(payload.random, random); let (payload_response, latest_valid_hash) = - self.el.execute_payload(&payload).await.unwrap(); - assert_eq!(payload_response, ExecutePayloadResponseStatus::Valid); - assert_eq!(latest_valid_hash, Some(payload.block_hash)); + self.el.notify_new_payload(&payload).await.unwrap(); + assert_eq!(payload_response, PayloadStatusV1Status::Valid); + assert_eq!(latest_valid_hash, Some(vec![payload.block_hash])); self.el .notify_forkchoice_updated(block_hash, Hash256::zero(), None) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index cd45d34a1f7..a4b96177640 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -1,7 +1,7 @@ //! Provides a mock execution engine HTTP JSON-RPC API for use in testing. use crate::engine_api::http::JSONRPC_VERSION; -use crate::engine_api::ExecutePayloadResponseStatus; +use crate::engine_api::PayloadStatusV1Status; use bytes::Bytes; use environment::null_logger; use execution_block_generator::{Block, PoWBlock}; @@ -62,7 +62,7 @@ impl MockServer { last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), preloaded_responses, - static_execute_payload_response: <_>::default(), + static_new_payload_response: <_>::default(), _phantom: PhantomData, }); @@ -117,7 +117,7 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_execute_payload_response.lock() = Some(ExecutePayloadResponseStatus::Valid) + *self.ctx.static_new_payload_response.lock() = Some(PayloadStatusV1Status::Valid) } pub fn insert_pow_block( @@ -187,7 +187,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_execute_payload_response: Arc>>, + pub static_new_payload_response: Arc>>, pub _phantom: PhantomData, } From e56dee8fe686e7e38d04862c2e5518c48b43b502 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 11 Feb 2022 16:13:05 +1100 Subject: [PATCH 22/92] Add test, address bug --- .../tests/payload_invalidation.rs | 74 ++++++++++++++++++- consensus/proto_array/src/proto_array.rs | 61 ++++++++------- 2 files changed, 108 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 507f7c0080a..a575d92de3c 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,8 +2,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, WhenSlotSkipped, - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, + WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use proto_array::ExecutionStatus; use task_executor::ShutdownReason; @@ -400,6 +400,76 @@ fn latest_valid_hash_is_junk() { } } +/// Check behaviour when the `latest_valid_hash` is a junk value. +#[test] +fn invalidates_all_descendants() { + let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; + let finalized_epoch = 2; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // Apply a block which conflicts with the canonical chain. + let fork_slot = Slot::new(4 * E::slots_per_epoch() + 1); + let fork_parent_slot = fork_slot - 1; + let fork_parent_state = rig + .harness + .chain + .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + assert_eq!(fork_parent_state.slot(), fork_parent_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); + rig.fork_choice(); + + // The latest valid hash will be set to the grandparent of the fork block. This means that the + // parent of the fork block will become invalid. + let latest_valid_slot = fork_parent_slot - 1; + let latest_valid_root = rig + .harness + .chain + .block_root_at_slot(latest_valid_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(); + assert!(blocks.contains(&latest_valid_root)); + let latest_valid_hash = rig.block_hash(latest_valid_root); + + // The new block should not become the head, the old head should remain. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + // The block before the fork should become the head. + dbg!(latest_valid_slot); + dbg!(fork_slot); + dbg!(fork_block_root); + dbg!(rig.head_info()); + assert_eq!(rig.head_info().block_root, latest_valid_root); + + // The fork block should be invalidated, even though it's not an ancestor of the block that + // triggered the INVALID response from the EL. + assert!(rig.execution_status(fork_block_root).is_invalid()); + + for root in blocks { + let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + let execution_status = rig.execution_status(root); + + if slot < fork_slot { + // Blocks prior to the fork are valid. + assert!(execution_status.is_valid()); + } else { + // Blocks after the fork are valid. + assert!(execution_status.is_valid()); + } + } +} + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 176d68e7970..f77047ada2b 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -317,14 +317,16 @@ impl ProtoArray { .indices .get(&head_block_root) .ok_or(Error::NodeUnknown(head_block_root))?; - let first_potential_descendant = index + 1; + + // Try to map the ancestor payload *hash* to an ancestor beacon block *root*. + let latest_valid_ancestor_root = + self.execution_block_hash_to_beacon_block_root(&latest_valid_ancestor_hash); // Set to `true` if both conditions are satisfied: // // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. - let latest_valid_ancestor_is_descendant = self - .execution_block_hash_to_beacon_block_root(&latest_valid_ancestor_hash) + let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root .map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) }); @@ -373,6 +375,7 @@ impl ProtoArray { }) } ExecutionStatus::Unknown(hash) => { + dbg!(node.slot); node.execution_status = ExecutionStatus::Invalid(*hash) } // The block is already invalid, but keep going backwards to ensure all ancestors @@ -395,30 +398,38 @@ impl ProtoArray { } } - // Collect all *descendants* which declared invalid since they're the descendant of a block - // with an invalid execution payload. - for index in first_potential_descendant..self.nodes.len() { - let node = self - .nodes - .get_mut(index) - .ok_or(Error::InvalidNodeIndex(index))?; - - if let Some(parent_index) = node.parent { - if invalidated_indices.contains(&parent_index) { - match &node.execution_status { - ExecutionStatus::Valid(hash) => { - return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, - }) - } - ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) + if let Some(latest_valid_ancestor_root) = latest_valid_ancestor_root { + let latest_valid_ancestor_index = *self + .indices + .get(&latest_valid_ancestor_root) + .ok_or(Error::NodeUnknown(latest_valid_ancestor_root))?; + let first_potential_descendant = latest_valid_ancestor_index + 1; + + // Collect all *descendants* which declared invalid since they're the descendant of a block + // with an invalid execution payload. + for index in first_potential_descendant..self.nodes.len() { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + + if let Some(parent_index) = node.parent { + if invalidated_indices.contains(&parent_index) { + match &node.execution_status { + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash) + } + ExecutionStatus::Irrelevant(_) => (), } - ExecutionStatus::Irrelevant(_) => (), - } - invalidated_indices.insert(index); + invalidated_indices.insert(index); + } } } } From 353dd1fd10509739feb2f275d839439e23383016 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 14 Feb 2022 13:54:48 +1100 Subject: [PATCH 23/92] Remove dbgs --- beacon_node/beacon_chain/tests/payload_invalidation.rs | 4 ---- consensus/proto_array/src/proto_array.rs | 1 - 2 files changed, 5 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index a575d92de3c..e143adb8dab 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -446,10 +446,6 @@ fn invalidates_all_descendants() { }); // The block before the fork should become the head. - dbg!(latest_valid_slot); - dbg!(fork_slot); - dbg!(fork_block_root); - dbg!(rig.head_info()); assert_eq!(rig.head_info().block_root, latest_valid_root); // The fork block should be invalidated, even though it's not an ancestor of the block that diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f77047ada2b..aa842c53b87 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -375,7 +375,6 @@ impl ProtoArray { }) } ExecutionStatus::Unknown(hash) => { - dbg!(node.slot); node.execution_status = ExecutionStatus::Invalid(*hash) } // The block is already invalid, but keep going backwards to ensure all ancestors From 4bc0c2049e106989f308169ed1bffcffede7f67c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 14 Feb 2022 17:44:40 +1100 Subject: [PATCH 24/92] Fix failing tests by null-ing best child/desc --- .../beacon_chain/tests/payload_invalidation.rs | 18 ++++++++++++------ consensus/proto_array/src/proto_array.rs | 13 ++++++++++++- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e143adb8dab..32c835aac44 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -405,6 +405,7 @@ fn latest_valid_hash_is_junk() { fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; + let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); @@ -414,7 +415,7 @@ fn invalidates_all_descendants() { assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. - let fork_slot = Slot::new(4 * E::slots_per_epoch() + 1); + let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); let fork_parent_slot = fork_slot - 1; let fork_parent_state = rig .harness @@ -454,14 +455,19 @@ fn invalidates_all_descendants() { for root in blocks { let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); - let execution_status = rig.execution_status(root); - if slot < fork_slot { - // Blocks prior to the fork are valid. + // Fork choice doesn't have info about pre-finalization, nothing to check here. + if slot < finalized_slot { + continue; + } + + let execution_status = rig.execution_status(root); + if slot <= latest_valid_slot { + // Blocks prior to the latest valid hash are valid. assert!(execution_status.is_valid()); } else { - // Blocks after the fork are valid. - assert!(execution_status.is_valid()); + // Blocks after the latest valid hash are invalid. + assert!(execution_status.is_invalid()); } } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index aa842c53b87..d8b32830745 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -356,6 +356,15 @@ impl ProtoArray { latest_valid_ancestor_hash, }); } else if hash == latest_valid_ancestor_hash { + // If the `best_child` or `best_descendant` of the latest valid hash was + // invalidated, set those fields to `None`. + node.best_child = node + .best_child + .filter(|best_child| invalidated_indices.contains(&best_child)); + node.best_descendant = node.best_descendant.filter(|best_descendant| { + invalidated_indices.contains(&best_descendant) + }); + // It might be new knowledge that this block is valid, ensure that it and all // ancestors are marked as valid. self.propagate_execution_payload_validation(index)?; @@ -375,7 +384,9 @@ impl ProtoArray { }) } ExecutionStatus::Unknown(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) + node.execution_status = ExecutionStatus::Invalid(*hash); + node.best_child = None; + node.best_descendant = None; } // The block is already invalid, but keep going backwards to ensure all ancestors // are updated. From a6d9ae47a2331b5c8b7e7251e1772ce51470e201 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 14 Feb 2022 17:52:29 +1100 Subject: [PATCH 25/92] Add another test --- .../tests/payload_invalidation.rs | 65 ++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 32c835aac44..3d0694ede83 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -400,7 +400,7 @@ fn latest_valid_hash_is_junk() { } } -/// Check behaviour when the `latest_valid_hash` is a junk value. +/// Check that descendants of invalid blocks are also invalidated. #[test] fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; @@ -472,6 +472,69 @@ fn invalidates_all_descendants() { } } +/// Check that the head will switch after the canonical branch is invalidated. +#[test] +fn switches_heads() { + let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; + let finalized_epoch = 2; + let finalized_slot = E::slots_per_epoch() * 2; + + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + + assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + // Apply a block which conflicts with the canonical chain. + let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); + let fork_parent_slot = fork_slot - 1; + let fork_parent_state = rig + .harness + .chain + .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + assert_eq!(fork_parent_state.slot(), fork_parent_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let fork_parent_root = fork_block.parent_root(); + let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); + rig.fork_choice(); + + let latest_valid_slot = fork_parent_slot; + let latest_valid_hash = rig.block_hash(fork_parent_root); + + // The new block should not become the head, the old head should remain. + assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(latest_valid_hash), + }); + + // The fork block should become the head. + assert_eq!(rig.head_info().block_root, fork_block_root); + + // The fork block has not yet been validated. + assert!(rig.execution_status(fork_block_root).is_not_verified()); + + for root in blocks { + let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + + // Fork choice doesn't have info about pre-finalization, nothing to check here. + if slot < finalized_slot { + continue; + } + + let execution_status = rig.execution_status(root); + if slot <= latest_valid_slot { + // Blocks prior to the latest valid hash are valid. + assert!(execution_status.is_valid()); + } else { + // Blocks after the latest valid hash are invalid. + assert!(execution_status.is_invalid()); + } + } +} + #[test] fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); From b3d033db66a433e529f34694b5ccc6377ce67ac8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 08:57:50 +1100 Subject: [PATCH 26/92] Address clippy lint --- consensus/proto_array/src/proto_array.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index d8b32830745..3aae97e5541 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -360,9 +360,9 @@ impl ProtoArray { // invalidated, set those fields to `None`. node.best_child = node .best_child - .filter(|best_child| invalidated_indices.contains(&best_child)); + .filter(|best_child| invalidated_indices.contains(best_child)); node.best_descendant = node.best_descendant.filter(|best_descendant| { - invalidated_indices.contains(&best_descendant) + invalidated_indices.contains(best_descendant) }); // It might be new knowledge that this block is valid, ensure that it and all From bcd74b804ef11e544fd2e6597f79f4bf9e04d0fc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 11:00:07 +1100 Subject: [PATCH 27/92] Remove panic from beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 38 ++++++++++++++------ beacon_node/beacon_chain/src/errors.rs | 6 +++- beacon_node/client/src/builder.rs | 1 + 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9b2d8386d64..67aed4b4841 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3590,16 +3590,11 @@ impl BeaconChain { store, new_finalized_checkpoint.root, new_head_execution_block_hash, + &log, ) .await { - if let BeaconChainError::ExecutionForkChoiceUpdateInvalidHead(Some( - ref _latest_valid_hashes, - )) = e - { - // TODO(merge): invalidate any invalid ancestors of this block in fork choice. - } - debug!( + crit!( log, "Failed to update execution head"; "error" => ?e @@ -3619,6 +3614,7 @@ impl BeaconChain { store: BeaconStore, finalized_beacon_block_root: Hash256, head_execution_block_hash: Hash256, + log: &Logger, ) -> Result<(), Error> { // Loading the finalized block from the store is not ideal. Perhaps it would be better to // store it on fork-choice so we can do a lookup without hitting the database. @@ -3648,10 +3644,30 @@ impl BeaconChain { match forkchoice_updated_response { Ok((status, latest_valid_hash)) => match status { PayloadStatusV1Status::Valid | PayloadStatusV1Status::Syncing => Ok(()), - PayloadStatusV1Status::Invalid => Err( - BeaconChainError::ExecutionForkChoiceUpdateInvalidHead(latest_valid_hash), - ), - status => panic!("Unrecognized status from forkchoice_updated: {:?}", status), + // The specification doesn't list `ACCEPTED` as a valid response to a fork choice + // update. This response *seems* innocent enough, so we won't return early with an + // error. However, we create a log to bring attention to the issue. + PayloadStatusV1Status::Accepted => { + warn!( + log, + "Fork choice update received ACCEPTED"; + "msg" => "execution engine provided an unexpected response to a fork \ + choice update. although this is not a serious issue, please raise \ + an issue." + ); + Ok(()) + } + PayloadStatusV1Status::Invalid + | PayloadStatusV1Status::InvalidTerminalBlock + | PayloadStatusV1Status::InvalidBlockHash => { + // TODO(bellatrix): process the invalid payload. + // + // See: https://github.com/sigp/lighthouse/pull/2837 + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { + status, + latest_valid_hash, + }) + } }, Err(e) => Err(e), } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e82456ffdd4..4ca15979326 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,6 +8,7 @@ use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; +use execution_layer::PayloadStatusV1Status; use futures::channel::mpsc::TrySendError; use operation_pool::OpPoolError; use safe_arith::ArithError; @@ -137,7 +138,10 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), - ExecutionForkChoiceUpdateInvalidHead(Option>), + ExecutionForkChoiceUpdateInvalid { + status: PayloadStatusV1Status, + latest_valid_hash: Option>, + }, BlockRewardSlotError, BlockRewardAttestationError, BlockRewardSyncError, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index f5045418ab8..c3e0f8af5c0 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -681,6 +681,7 @@ where store, head.finalized_checkpoint.root, block_hash, + &log, ) .await; From f7401c67a4e40ce1c4c98991bd0302d786d5c99a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 11:14:04 +1100 Subject: [PATCH 28/92] Handle errors on newPayload --- .../beacon_chain/src/block_verification.rs | 6 ++++- .../beacon_chain/src/execution_payload.rs | 23 +++++++++++++------ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c2dc0028e99..ef60145d8b8 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,6 +54,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::EventKind; +use execution_layer::PayloadStatusV1Status; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -269,7 +270,10 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty - RejectedByExecutionEngine, + RejectedByExecutionEngine { + status: PayloadStatusV1Status, + latest_valid_hash: Option>, + }, /// The execution payload timestamp does not match the slot /// /// ## Peer scoring diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index befe9ec3a54..ffe463e8cb6 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -57,16 +57,25 @@ pub fn execute_payload( .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); match new_payload_response { - Ok((status, _latest_valid_hash)) => match status { + Ok((status, latest_valid_hash)) => match status { PayloadStatusV1Status::Valid => Ok(PayloadVerificationStatus::Verified), - PayloadStatusV1Status::Invalid => { - // TODO(merge): invalidate any invalid ancestors of this block in fork choice. - Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) + PayloadStatusV1Status::Syncing | PayloadStatusV1Status::Accepted => { + Ok(PayloadVerificationStatus::NotVerified) + } + PayloadStatusV1Status::Invalid + | PayloadStatusV1Status::InvalidTerminalBlock + | PayloadStatusV1Status::InvalidBlockHash => { + // TODO(bellatrix): process the invalid payload. + // + // See: https://github.com/sigp/lighthouse/pull/2837 + Err(ExecutionPayloadError::RejectedByExecutionEngine { + status, + latest_valid_hash, + } + .into()) } - PayloadStatusV1Status::Syncing => Ok(PayloadVerificationStatus::NotVerified), - status => panic!("Unrecognized status from new_payload: {:?}", status), }, - Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), + Err(e) => Err(ExecutionPayloadError::RequestFailed(e).into()), } } From a2f9fba4c8ab91cd371d2742fb4e788544e99e05 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 11:14:15 +1100 Subject: [PATCH 29/92] Clean commented out code --- beacon_node/execution_layer/src/lib.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 44a9d86d871..9bc7ac48ff6 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -482,6 +482,12 @@ impl ExecutionLayer { match result { Ok(response) => match (&response.latest_valid_hash, &response.status) { (Some(latest_hash), &PayloadStatusV1Status::Valid) => { + // According to a strict interpretation of the spec, the EE should never + // respond with `VALID` *and* a `latest_valid_hash`. + // + // For the sake of being liberal with what we accept, we will accept a + // `latest_valid_hash` *only if* it matches the submitted payload. + // Otherwise, register an error. if latest_hash == &execution_payload.block_hash { valid += 1; } else { @@ -495,10 +501,6 @@ impl ExecutionLayer { ) ), }); - // I've commented out the lines below because this is a malformed response - // I don't think we should treat it the same as an INVALID response - // invalid += 1; - // invalid_latest_valid_hash.insert(*latest_hash); } } (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { @@ -607,6 +609,12 @@ impl ExecutionLayer { if latest_hash == &head_block_hash { valid += 1; } else { + // According to a strict interpretation of the spec, the EE should never + // respond with `VALID` *and* a `latest_valid_hash`. + // + // For the sake of being liberal with what we accept, we will accept a + // `latest_valid_hash` *only if* it matches the submitted payload. + // Otherwise, register an error. errors.push(EngineError::Api { id: "unknown".to_string(), error: engine_api::Error::BadResponse( @@ -617,10 +625,6 @@ impl ExecutionLayer { ) ), }); - // I've commented out the lines below because this is a malformed response - // I don't think we should treat it the same as an INVALID response - // invalid += 1; - // invalid_latest_valid_hash.insert(*latest_hash); } } (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { From e9ece6c9bd5809173e40667f45bc6893557596f7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 15:14:53 +1100 Subject: [PATCH 30/92] Start adding merge_sim --- Cargo.lock | 22 ++ Cargo.toml | 1 + beacon_node/execution_layer/src/lib.rs | 4 +- testing/merge_sim/.gitignore | 1 + testing/merge_sim/Cargo.toml | 16 ++ testing/merge_sim/build.rs | 62 +++++ testing/merge_sim/src/genesis_json.rs | 42 +++ testing/merge_sim/src/main.rs | 366 +++++++++++++++++++++++++ 8 files changed, 512 insertions(+), 2 deletions(-) create mode 100644 testing/merge_sim/.gitignore create mode 100644 testing/merge_sim/Cargo.toml create mode 100644 testing/merge_sim/build.rs create mode 100644 testing/merge_sim/src/genesis_json.rs create mode 100644 testing/merge_sim/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 4e33767dec7..f11254fd84d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2723,6 +2723,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + [[package]] name = "jsonrpc-core" version = "18.0.0" @@ -3533,6 +3539,22 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "merge_sim" +version = "0.1.0" +dependencies = [ + "environment", + "execution_layer", + "exit-future", + "futures", + "json", + "sensitive_url", + "task_executor", + "tempfile", + "tokio", + "types", +] + [[package]] name = "merkle_proof" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index d27c1dc132e..ffc6c38f8e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,6 +74,7 @@ members = [ "testing/ef_tests", "testing/eth1_test_rig", + "testing/merge_sim", "testing/node_test_rig", "testing/simulator", "testing/test-test_logger", diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 9bc7ac48ff6..cde89fb2f66 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -21,7 +21,7 @@ use tokio::{ }; use types::{ChainSpec, Epoch, ProposerPreparationData}; -pub use engine_api::{http::HttpJsonRpc, PayloadStatusV1Status}; +pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status}; mod engine_api; mod engines; @@ -250,7 +250,7 @@ impl ExecutionLayer { } /// Performs a single execution of the watchdog routine. - async fn watchdog_task(&self) { + pub async fn watchdog_task(&self) { // Disable logging since this runs frequently and may get annoying. self.engines().upcheck_not_synced(Logging::Disabled).await; } diff --git a/testing/merge_sim/.gitignore b/testing/merge_sim/.gitignore new file mode 100644 index 00000000000..07ea3a7ff28 --- /dev/null +++ b/testing/merge_sim/.gitignore @@ -0,0 +1 @@ +execution_clients/ diff --git a/testing/merge_sim/Cargo.toml b/testing/merge_sim/Cargo.toml new file mode 100644 index 00000000000..fff73ac88ae --- /dev/null +++ b/testing/merge_sim/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "merge_sim" +version = "0.1.0" +edition = "2021" + +[dependencies] +tempfile = "3.1.0" +json = "0.12.4" +task_executor = { path = "../../common/task_executor" } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } +futures = "0.3.7" +exit-future = "0.2.0" +environment = { path = "../../lighthouse/environment" } +execution_layer = { path = "../../beacon_node/execution_layer" } +sensitive_url = { path = "../../common/sensitive_url" } +types = { path = "../../consensus/types" } diff --git a/testing/merge_sim/build.rs b/testing/merge_sim/build.rs new file mode 100644 index 00000000000..cfa2f21a5fa --- /dev/null +++ b/testing/merge_sim/build.rs @@ -0,0 +1,62 @@ +use std::env; +use std::fs; +use std::path::PathBuf; +use std::process::Command; + +const GETH_BRANCH: &str = "merge-kiln"; +const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum"; + +fn main() { + let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); + let execution_clients_dir = manifest_dir.join("execution_clients"); + + if !execution_clients_dir.exists() { + fs::create_dir(&execution_clients_dir).unwrap(); + } + + build_geth(&execution_clients_dir); +} + +fn build_geth(execution_clients_dir: &PathBuf) { + let repo_dir = execution_clients_dir.join("go-ethereum"); + + if !repo_dir.exists() { + // Clone the repo + assert!(Command::new("git") + .arg("clone") + .arg(GETH_REPO_URL) + .current_dir(&execution_clients_dir) + .output() + .expect("failed to clone geth repo") + .status + .success()); + } + + // Checkout the correct branch + assert!(Command::new("git") + .arg("checkout") + .arg(GETH_BRANCH) + .current_dir(&repo_dir) + .output() + .expect("failed to checkout geth branch") + .status + .success()); + + // Update the branch + assert!(Command::new("git") + .arg("pull") + .current_dir(&repo_dir) + .output() + .expect("failed to update geth branch") + .status + .success()); + + // Build geth + assert!(Command::new("make") + .arg("geth") + .current_dir(&repo_dir) + .output() + .expect("failed to make geth") + .status + .success()); +} diff --git a/testing/merge_sim/src/genesis_json.rs b/testing/merge_sim/src/genesis_json.rs new file mode 100644 index 00000000000..71e2c29ea7c --- /dev/null +++ b/testing/merge_sim/src/genesis_json.rs @@ -0,0 +1,42 @@ +use json::JsonValue; + +/// Sourced from: +/// +/// https://notes.ethereum.org/rmVErCfCRPKGqGkUe89-Kg +pub fn geth_genesis_json() -> JsonValue { + json::object! { + "config": { + "chainId":1, + "homesteadBlock":0, + "eip150Block":0, + "eip155Block":0, + "eip158Block":0, + "byzantiumBlock":0, + "constantinopleBlock":0, + "petersburgBlock":0, + "istanbulBlock":0, + "muirGlacierBlock":0, + "berlinBlock":0, + "londonBlock":0, + "clique": { + "period": 5, + "epoch": 30000 + }, + "terminalTotalDifficulty":0 + }, + "nonce":"0x42", + "timestamp":"0x0", + "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit":"0x1C9C380", + "difficulty":"0x400000000", + "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase":"0x0000000000000000000000000000000000000000", + "alloc":{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"} + }, + "number":"0x0", + "gasUsed":"0x0", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas":"0x7" + } +} diff --git a/testing/merge_sim/src/main.rs b/testing/merge_sim/src/main.rs new file mode 100644 index 00000000000..a6d61a1ad9e --- /dev/null +++ b/testing/merge_sim/src/main.rs @@ -0,0 +1,366 @@ +use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status}; +use genesis_json::geth_genesis_json; +use sensitive_url::SensitiveUrl; +use std::net::{TcpListener, UdpSocket}; +use std::path::PathBuf; +use std::process::{Child, Command, Output}; +use std::sync::Arc; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::{env, fs::File}; +use task_executor::TaskExecutor; +use tempfile::TempDir; +use tokio::time::sleep; +use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; + +mod genesis_json; + +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); + +struct ExecutionEngine { + engine: E, + datadir: TempDir, + http_port: u16, + child: Child, +} + +impl Drop for ExecutionEngine { + fn drop(&mut self) { + self.child.kill().unwrap() + } +} + +impl ExecutionEngine { + pub fn new(engine: E) -> Self { + let datadir = E::init_datadir(); + let http_port = unused_port("tcp").unwrap(); + let child = E::start_client(&datadir, http_port); + Self { + engine, + datadir, + http_port, + child, + } + } + + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } +} + +struct Geth; + +impl Geth { + fn binary_path() -> PathBuf { + let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); + manifest_dir + .join("execution_clients") + .join("go-ethereum") + .join("build") + .join("bin") + .join("geth") + } +} + +pub trait GenericExecutionEngine { + fn init_datadir() -> TempDir; + fn start_client(datadir: &TempDir, http_port: u16) -> Child; +} + +impl GenericExecutionEngine for Geth { + fn init_datadir() -> TempDir { + let datadir = TempDir::new().unwrap(); + + let genesis_json_path = datadir.path().join("genesis.json"); + let mut file = File::create(&genesis_json_path).unwrap(); + let json = geth_genesis_json(); + json.write(&mut file).unwrap(); + + let output = Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("init") + .arg(genesis_json_path.to_str().unwrap()) + .output() + .expect("failed to init geth"); + + check_command_output(output, "geth init failed"); + + datadir + } + + fn start_client(datadir: &TempDir, http_port: u16) -> Child { + Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("--http") + .arg("--http.api") + .arg("engine,eth") + .arg("--http.port") + .arg(http_port.to_string()) + .spawn() + .expect("failed to start beacon node") + } +} + +struct TestRig { + runtime: Arc, + execution_layer: ExecutionLayer, + execution_engine: ExecutionEngine, + spec: ChainSpec, + _runtime_shutdown: exit_future::Signal, +} + +impl TestRig { + pub fn new(execution_engine: ExecutionEngine) -> Self { + let log = environment::null_logger().unwrap(); + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(), + ); + let (runtime_shutdown, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let mut urls = vec![]; + urls.push(execution_engine.http_url()); + + let fee_recipient = None; + let execution_layer = + ExecutionLayer::from_urls(urls, fee_recipient, executor, log).unwrap(); + + let mut spec = MainnetEthSpec::default_spec(); + spec.terminal_total_difficulty = Uint256::zero(); + + Self { + runtime, + execution_layer, + execution_engine, + spec, + _runtime_shutdown: runtime_shutdown, + } + } + + pub fn perform_tests_blocking(&self) { + self.execution_layer + .block_on_generic(|_| async { self.perform_tests().await }) + .unwrap() + } + + pub async fn wait_until_synced(&self) { + let start_instant = Instant::now(); + + loop { + // Run the routine to check for online nodes. + self.execution_layer.watchdog_task().await; + + if self.execution_layer.is_synced().await { + break; + } else { + if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() { + sleep(Duration::from_millis(500)).await; + } else { + panic!("timeout waiting for execution engines to come online") + } + } + } + } + + pub async fn perform_tests(&self) { + self.wait_until_synced().await; + + let terminal_pow_block_hash = self + .execution_layer + .get_terminal_pow_block_hash(&self.spec) + .await + .unwrap() + .unwrap(); + + /* + * Produce a valid payload atop the terminal block. + */ + + let parent_hash = terminal_pow_block_hash; + let timestamp = timestamp_now(); + let random = Hash256::zero(); + let finalized_block_hash = Hash256::zero(); + let proposer_index = 0; + let valid_payload = self + .execution_layer + .get_payload::( + parent_hash, + timestamp, + random, + finalized_block_hash, + proposer_index, + ) + .await + .unwrap(); + + /* + * Indicate that the payload is the head of the chain, before submitting a + * `notify_new_payload`. + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Provide the valid payload back to the EE again. + */ + + let (status, _) = self + .execution_layer + .notify_new_payload(&valid_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Indicate that the payload is the head of the chain. + * + * Do not provide payload attributes (we'll test that later). + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Provide an invalidated payload to the EE. + */ + + let mut invalid_payload = valid_payload.clone(); + invalid_payload.random = Hash256::from_low_u64_be(42); + let (status, _) = self + .execution_layer + .notify_new_payload(&invalid_payload) + .await + .unwrap(); + assert!(matches!( + status, + PayloadStatusV1Status::Invalid | PayloadStatusV1Status::InvalidBlockHash + )); + + /* + * Produce another payload atop the previous one. + */ + + let parent_hash = valid_payload.block_hash; + let timestamp = valid_payload.timestamp + 1; + let random = Hash256::zero(); + let finalized_block_hash = Hash256::zero(); + let proposer_index = 0; + let second_payload = self + .execution_layer + .get_payload::( + parent_hash, + timestamp, + random, + finalized_block_hash, + proposer_index, + ) + .await + .unwrap(); + + /* + * Provide the second payload back to the EE again. + */ + + let (status, _) = self + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Indicate that the payload is the head of the chain, providing payload attributes. + */ + let head_block_hash = valid_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = PayloadAttributes { + timestamp: second_payload.timestamp + 1, + random: Hash256::zero(), + suggested_fee_recipient: Address::zero(), + }; + let (status, _) = self + .execution_layer + .notify_forkchoice_updated( + head_block_hash, + finalized_block_hash, + Some(payload_attributes), + ) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + } +} + +fn main() { + let geth_rig = TestRig::new(ExecutionEngine::new(Geth)); + geth_rig.perform_tests_blocking(); +} + +fn check_command_output(output: Output, failure_msg: &'static str) { + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + dbg!(stdout); + dbg!(stderr); + panic!("{}", failure_msg); + } +} + +/// A bit of hack to find an unused port. +/// +/// Does not guarantee that the given port is unused after the function exits, just that it was +/// unused before the function started (i.e., it does not reserve a port). +pub fn unused_port(transport: &str) -> Result { + let local_addr = match transport { + "tcp" => { + let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + format!("Failed to create TCP listener to find unused port: {:?}", e) + })?; + listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })? + } + "udp" => { + let socket = UdpSocket::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; + socket.local_addr().map_err(|e| { + format!( + "Failed to read UDP socket local_addr to find unused port: {:?}", + e + ) + })? + } + _ => return Err("Invalid transport to find unused port".into()), + }; + Ok(local_addr.port()) +} + +/// Returns the duration since the unix epoch. +pub fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} From e4cb559d2a1cfa8c6c027e458980da10384264e4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 15:15:08 +1100 Subject: [PATCH 31/92] Fix pattern match on exec layer --- beacon_node/execution_layer/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cde89fb2f66..42013ad23b3 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -605,6 +605,7 @@ impl ExecutionLayer { for result in broadcast_results { match result { Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { + (None, &PayloadStatusV1Status::Valid) => valid += 1, (Some(latest_hash), &PayloadStatusV1Status::Valid) => { if latest_hash == &head_block_hash { valid += 1; From 717d5f10323844a3ab9eb8d5ee77fbdb25943b43 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 15:23:53 +1100 Subject: [PATCH 32/92] Rename merge_sim to execution_engine_integration --- Cargo.lock | 32 ++-- Cargo.toml | 2 +- .../.gitignore | 0 .../Cargo.toml | 2 +- .../build.rs | 0 .../src/execution_engine.rs | 138 ++++++++++++++++ .../src/genesis_json.rs | 0 .../execution_engine_integration/src/main.rs | 11 ++ .../src/test_rig.rs} | 148 +----------------- 9 files changed, 171 insertions(+), 162 deletions(-) rename testing/{merge_sim => execution_engine_integration}/.gitignore (100%) rename testing/{merge_sim => execution_engine_integration}/Cargo.toml (92%) rename testing/{merge_sim => execution_engine_integration}/build.rs (100%) create mode 100644 testing/execution_engine_integration/src/execution_engine.rs rename testing/{merge_sim => execution_engine_integration}/src/genesis_json.rs (100%) create mode 100644 testing/execution_engine_integration/src/main.rs rename testing/{merge_sim/src/main.rs => execution_engine_integration/src/test_rig.rs} (62%) diff --git a/Cargo.lock b/Cargo.lock index f11254fd84d..7803103d511 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1804,6 +1804,22 @@ dependencies = [ "uint 0.9.3", ] +[[package]] +name = "execution_engine_integration" +version = "0.1.0" +dependencies = [ + "environment", + "execution_layer", + "exit-future", + "futures", + "json", + "sensitive_url", + "task_executor", + "tempfile", + "tokio", + "types", +] + [[package]] name = "execution_layer" version = "0.1.0" @@ -3539,22 +3555,6 @@ dependencies = [ "autocfg 1.1.0", ] -[[package]] -name = "merge_sim" -version = "0.1.0" -dependencies = [ - "environment", - "execution_layer", - "exit-future", - "futures", - "json", - "sensitive_url", - "task_executor", - "tempfile", - "tokio", - "types", -] - [[package]] name = "merkle_proof" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index ffc6c38f8e2..a65d8773523 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ members = [ "testing/ef_tests", "testing/eth1_test_rig", - "testing/merge_sim", + "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", "testing/test-test_logger", diff --git a/testing/merge_sim/.gitignore b/testing/execution_engine_integration/.gitignore similarity index 100% rename from testing/merge_sim/.gitignore rename to testing/execution_engine_integration/.gitignore diff --git a/testing/merge_sim/Cargo.toml b/testing/execution_engine_integration/Cargo.toml similarity index 92% rename from testing/merge_sim/Cargo.toml rename to testing/execution_engine_integration/Cargo.toml index fff73ac88ae..75589bf7715 100644 --- a/testing/merge_sim/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "merge_sim" +name = "execution_engine_integration" version = "0.1.0" edition = "2021" diff --git a/testing/merge_sim/build.rs b/testing/execution_engine_integration/build.rs similarity index 100% rename from testing/merge_sim/build.rs rename to testing/execution_engine_integration/build.rs diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs new file mode 100644 index 00000000000..abe57b914cc --- /dev/null +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -0,0 +1,138 @@ +use crate::genesis_json::geth_genesis_json; +use sensitive_url::SensitiveUrl; +use std::net::{TcpListener, UdpSocket}; +use std::path::PathBuf; +use std::process::{Child, Command, Output}; +use std::{env, fs::File}; +use tempfile::TempDir; + +pub struct ExecutionEngine { + #[allow(dead_code)] + engine: E, + #[allow(dead_code)] + datadir: TempDir, + http_port: u16, + child: Child, +} + +impl Drop for ExecutionEngine { + fn drop(&mut self) { + self.child.kill().unwrap() + } +} + +impl ExecutionEngine { + pub fn new(engine: E) -> Self { + let datadir = E::init_datadir(); + let http_port = unused_port("tcp").unwrap(); + let child = E::start_client(&datadir, http_port); + Self { + engine, + datadir, + http_port, + child, + } + } + + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } +} + +pub struct Geth; + +impl Geth { + fn binary_path() -> PathBuf { + let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); + manifest_dir + .join("execution_clients") + .join("go-ethereum") + .join("build") + .join("bin") + .join("geth") + } +} + +pub trait GenericExecutionEngine { + fn init_datadir() -> TempDir; + fn start_client(datadir: &TempDir, http_port: u16) -> Child; +} + +impl GenericExecutionEngine for Geth { + fn init_datadir() -> TempDir { + let datadir = TempDir::new().unwrap(); + + let genesis_json_path = datadir.path().join("genesis.json"); + let mut file = File::create(&genesis_json_path).unwrap(); + let json = geth_genesis_json(); + json.write(&mut file).unwrap(); + + let output = Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("init") + .arg(genesis_json_path.to_str().unwrap()) + .output() + .expect("failed to init geth"); + + check_command_output(output, "geth init failed"); + + datadir + } + + fn start_client(datadir: &TempDir, http_port: u16) -> Child { + Command::new(Self::binary_path()) + .arg("--datadir") + .arg(datadir.path().to_str().unwrap()) + .arg("--http") + .arg("--http.api") + .arg("engine,eth") + .arg("--http.port") + .arg(http_port.to_string()) + .spawn() + .expect("failed to start beacon node") + } +} + +fn check_command_output(output: Output, failure_msg: &'static str) { + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + dbg!(stdout); + dbg!(stderr); + panic!("{}", failure_msg); + } +} + +/// A bit of hack to find an unused port. +/// +/// Does not guarantee that the given port is unused after the function exits, just that it was +/// unused before the function started (i.e., it does not reserve a port). +pub fn unused_port(transport: &str) -> Result { + let local_addr = match transport { + "tcp" => { + let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + format!("Failed to create TCP listener to find unused port: {:?}", e) + })?; + listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })? + } + "udp" => { + let socket = UdpSocket::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; + socket.local_addr().map_err(|e| { + format!( + "Failed to read UDP socket local_addr to find unused port: {:?}", + e + ) + })? + } + _ => return Err("Invalid transport to find unused port".into()), + }; + Ok(local_addr.port()) +} diff --git a/testing/merge_sim/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs similarity index 100% rename from testing/merge_sim/src/genesis_json.rs rename to testing/execution_engine_integration/src/genesis_json.rs diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs new file mode 100644 index 00000000000..ac0c011ca4e --- /dev/null +++ b/testing/execution_engine_integration/src/main.rs @@ -0,0 +1,11 @@ +use execution_engine::{ExecutionEngine, Geth}; +use test_rig::TestRig; + +mod execution_engine; +mod genesis_json; +mod test_rig; + +fn main() { + let geth_rig = TestRig::new(ExecutionEngine::new(Geth)); + geth_rig.perform_tests_blocking(); +} diff --git a/testing/merge_sim/src/main.rs b/testing/execution_engine_integration/src/test_rig.rs similarity index 62% rename from testing/merge_sim/src/main.rs rename to testing/execution_engine_integration/src/test_rig.rs index a6d61a1ad9e..345c780f6fb 100644 --- a/testing/merge_sim/src/main.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,110 +1,18 @@ +use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status}; -use genesis_json::geth_genesis_json; -use sensitive_url::SensitiveUrl; -use std::net::{TcpListener, UdpSocket}; -use std::path::PathBuf; -use std::process::{Child, Command, Output}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use std::{env, fs::File}; use task_executor::TaskExecutor; -use tempfile::TempDir; use tokio::time::sleep; use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; -mod genesis_json; - const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); -struct ExecutionEngine { - engine: E, - datadir: TempDir, - http_port: u16, - child: Child, -} - -impl Drop for ExecutionEngine { - fn drop(&mut self) { - self.child.kill().unwrap() - } -} - -impl ExecutionEngine { - pub fn new(engine: E) -> Self { - let datadir = E::init_datadir(); - let http_port = unused_port("tcp").unwrap(); - let child = E::start_client(&datadir, http_port); - Self { - engine, - datadir, - http_port, - child, - } - } - - pub fn http_url(&self) -> SensitiveUrl { - SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() - } -} - -struct Geth; - -impl Geth { - fn binary_path() -> PathBuf { - let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); - manifest_dir - .join("execution_clients") - .join("go-ethereum") - .join("build") - .join("bin") - .join("geth") - } -} - -pub trait GenericExecutionEngine { - fn init_datadir() -> TempDir; - fn start_client(datadir: &TempDir, http_port: u16) -> Child; -} - -impl GenericExecutionEngine for Geth { - fn init_datadir() -> TempDir { - let datadir = TempDir::new().unwrap(); - - let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); - let json = geth_genesis_json(); - json.write(&mut file).unwrap(); - - let output = Command::new(Self::binary_path()) - .arg("--datadir") - .arg(datadir.path().to_str().unwrap()) - .arg("init") - .arg(genesis_json_path.to_str().unwrap()) - .output() - .expect("failed to init geth"); - - check_command_output(output, "geth init failed"); - - datadir - } - - fn start_client(datadir: &TempDir, http_port: u16) -> Child { - Command::new(Self::binary_path()) - .arg("--datadir") - .arg(datadir.path().to_str().unwrap()) - .arg("--http") - .arg("--http.api") - .arg("engine,eth") - .arg("--http.port") - .arg(http_port.to_string()) - .spawn() - .expect("failed to start beacon node") - } -} - -struct TestRig { +pub struct TestRig { + #[allow(dead_code)] runtime: Arc, execution_layer: ExecutionLayer, + #[allow(dead_code)] execution_engine: ExecutionEngine, spec: ChainSpec, _runtime_shutdown: exit_future::Signal, @@ -309,54 +217,6 @@ impl TestRig { } } -fn main() { - let geth_rig = TestRig::new(ExecutionEngine::new(Geth)); - geth_rig.perform_tests_blocking(); -} - -fn check_command_output(output: Output, failure_msg: &'static str) { - if !output.status.success() { - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - - dbg!(stdout); - dbg!(stderr); - panic!("{}", failure_msg); - } -} - -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} - /// Returns the duration since the unix epoch. pub fn timestamp_now() -> u64 { SystemTime::now() From 09d02d02c15d3c9fb6f8bd56f8c5f064e8bf78e0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 16:00:29 +1100 Subject: [PATCH 33/92] Tidy, improve testing --- testing/execution_engine_integration/build.rs | 4 +- .../src/execution_engine.rs | 15 +- .../execution_engine_integration/src/main.rs | 4 +- .../src/test_rig.rs | 183 +++++++++++++++--- 4 files changed, 171 insertions(+), 35 deletions(-) diff --git a/testing/execution_engine_integration/build.rs b/testing/execution_engine_integration/build.rs index cfa2f21a5fa..bedf74fbd15 100644 --- a/testing/execution_engine_integration/build.rs +++ b/testing/execution_engine_integration/build.rs @@ -1,6 +1,6 @@ use std::env; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::Command; const GETH_BRANCH: &str = "merge-kiln"; @@ -17,7 +17,7 @@ fn main() { build_geth(&execution_clients_dir); } -fn build_geth(execution_clients_dir: &PathBuf) { +fn build_geth(execution_clients_dir: &Path) { let repo_dir = execution_clients_dir.join("go-ethereum"); if !repo_dir.exists() { diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index abe57b914cc..684d8d63be4 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -6,6 +6,11 @@ use std::process::{Child, Command, Output}; use std::{env, fs::File}; use tempfile::TempDir; +pub trait GenericExecutionEngine: Clone { + fn init_datadir() -> TempDir; + fn start_client(datadir: &TempDir, http_port: u16) -> Child; +} + pub struct ExecutionEngine { #[allow(dead_code)] engine: E, @@ -39,6 +44,7 @@ impl ExecutionEngine { } } +#[derive(Clone)] pub struct Geth; impl Geth { @@ -53,11 +59,6 @@ impl Geth { } } -pub trait GenericExecutionEngine { - fn init_datadir() -> TempDir; - fn start_client(datadir: &TempDir, http_port: u16) -> Child; -} - impl GenericExecutionEngine for Geth { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); @@ -81,6 +82,8 @@ impl GenericExecutionEngine for Geth { } fn start_client(datadir: &TempDir, http_port: u16) -> Child { + let network_port = unused_port("tcp").unwrap(); + Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) @@ -89,6 +92,8 @@ impl GenericExecutionEngine for Geth { .arg("engine,eth") .arg("--http.port") .arg(http_port.to_string()) + .arg("--port") + .arg(network_port.to_string()) .spawn() .expect("failed to start beacon node") } diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index ac0c011ca4e..d5ed480e962 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,4 +1,4 @@ -use execution_engine::{ExecutionEngine, Geth}; +use execution_engine::Geth; use test_rig::TestRig; mod execution_engine; @@ -6,6 +6,6 @@ mod genesis_json; mod test_rig; fn main() { - let geth_rig = TestRig::new(ExecutionEngine::new(Geth)); + let geth_rig = TestRig::new(Geth); geth_rig.perform_tests_blocking(); } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 345c780f6fb..18b520ce9da 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -8,18 +8,23 @@ use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); -pub struct TestRig { - #[allow(dead_code)] - runtime: Arc, +struct ExecutionPair { execution_layer: ExecutionLayer, #[allow(dead_code)] execution_engine: ExecutionEngine, +} + +pub struct TestRig { + #[allow(dead_code)] + runtime: Arc, + ee_a: ExecutionPair, + ee_b: ExecutionPair, spec: ChainSpec, _runtime_shutdown: exit_future::Signal, } impl TestRig { - pub fn new(execution_engine: ExecutionEngine) -> Self { + pub fn new(generic_engine: E) -> Self { let log = environment::null_logger().unwrap(); let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() @@ -31,27 +36,46 @@ impl TestRig { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - let mut urls = vec![]; - urls.push(execution_engine.http_url()); - let fee_recipient = None; - let execution_layer = - ExecutionLayer::from_urls(urls, fee_recipient, executor, log).unwrap(); + + let ee_a = { + let execution_engine = ExecutionEngine::new(generic_engine.clone()); + let urls = vec![execution_engine.http_url()]; + let execution_layer = + ExecutionLayer::from_urls(urls, fee_recipient, executor.clone(), log.clone()) + .unwrap(); + ExecutionPair { + execution_engine, + execution_layer, + } + }; + + let ee_b = { + let execution_engine = ExecutionEngine::new(generic_engine); + let urls = vec![execution_engine.http_url()]; + let execution_layer = + ExecutionLayer::from_urls(urls, fee_recipient, executor, log).unwrap(); + ExecutionPair { + execution_engine, + execution_layer, + } + }; let mut spec = MainnetEthSpec::default_spec(); spec.terminal_total_difficulty = Uint256::zero(); Self { runtime, - execution_layer, - execution_engine, + ee_a, + ee_b, spec, _runtime_shutdown: runtime_shutdown, } } pub fn perform_tests_blocking(&self) { - self.execution_layer + self.ee_a + .execution_layer .block_on_generic(|_| async { self.perform_tests().await }) .unwrap() } @@ -59,14 +83,14 @@ impl TestRig { pub async fn wait_until_synced(&self) { let start_instant = Instant::now(); - loop { - // Run the routine to check for online nodes. - self.execution_layer.watchdog_task().await; + for pair in [&self.ee_a, &self.ee_b] { + loop { + // Run the routine to check for online nodes. + pair.execution_layer.watchdog_task().await; - if self.execution_layer.is_synced().await { - break; - } else { - if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() { + if pair.execution_layer.is_synced().await { + break; + } else if start_instant + EXECUTION_ENGINE_START_TIMEOUT > Instant::now() { sleep(Duration::from_millis(500)).await; } else { panic!("timeout waiting for execution engines to come online") @@ -78,14 +102,31 @@ impl TestRig { pub async fn perform_tests(&self) { self.wait_until_synced().await; + /* + * Read the terminal block hash from both pairs, check it's equal. + */ + let terminal_pow_block_hash = self + .ee_a .execution_layer .get_terminal_pow_block_hash(&self.spec) .await .unwrap() .unwrap(); + assert_eq!( + terminal_pow_block_hash, + self.ee_b + .execution_layer + .get_terminal_pow_block_hash(&self.spec) + .await + .unwrap() + .unwrap() + ); + /* + * Execution Engine A: + * * Produce a valid payload atop the terminal block. */ @@ -95,6 +136,7 @@ impl TestRig { let finalized_block_hash = Hash256::zero(); let proposer_index = 0; let valid_payload = self + .ee_a .execution_layer .get_payload::( parent_hash, @@ -107,6 +149,8 @@ impl TestRig { .unwrap(); /* + * Execution Engine A: + * * Indicate that the payload is the head of the chain, before submitting a * `notify_new_payload`. */ @@ -114,6 +158,7 @@ impl TestRig { let finalized_block_hash = Hash256::zero(); let payload_attributes = None; let (status, _) = self + .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await @@ -121,10 +166,13 @@ impl TestRig { assert_eq!(status, PayloadStatusV1Status::Syncing); /* + * Execution Engine A: + * * Provide the valid payload back to the EE again. */ let (status, _) = self + .ee_a .execution_layer .notify_new_payload(&valid_payload) .await @@ -132,6 +180,8 @@ impl TestRig { assert_eq!(status, PayloadStatusV1Status::Valid); /* + * Execution Engine A: + * * Indicate that the payload is the head of the chain. * * Do not provide payload attributes (we'll test that later). @@ -140,6 +190,7 @@ impl TestRig { let finalized_block_hash = Hash256::zero(); let payload_attributes = None; let (status, _) = self + .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await @@ -147,12 +198,15 @@ impl TestRig { assert_eq!(status, PayloadStatusV1Status::Valid); /* + * Execution Engine A: + * * Provide an invalidated payload to the EE. */ let mut invalid_payload = valid_payload.clone(); invalid_payload.random = Hash256::from_low_u64_be(42); let (status, _) = self + .ee_a .execution_layer .notify_new_payload(&invalid_payload) .await @@ -163,6 +217,8 @@ impl TestRig { )); /* + * Execution Engine A: + * * Produce another payload atop the previous one. */ @@ -172,6 +228,7 @@ impl TestRig { let finalized_block_hash = Hash256::zero(); let proposer_index = 0; let second_payload = self + .ee_a .execution_layer .get_payload::( parent_hash, @@ -184,10 +241,13 @@ impl TestRig { .unwrap(); /* + * Execution Engine A: + * * Provide the second payload back to the EE again. */ let (status, _) = self + .ee_a .execution_layer .notify_new_payload(&second_payload) .await @@ -195,22 +255,93 @@ impl TestRig { assert_eq!(status, PayloadStatusV1Status::Valid); /* + * Execution Engine A: + * * Indicate that the payload is the head of the chain, providing payload attributes. */ let head_block_hash = valid_payload.block_hash; let finalized_block_hash = Hash256::zero(); - let payload_attributes = PayloadAttributes { + let payload_attributes = Some(PayloadAttributes { timestamp: second_payload.timestamp + 1, random: Hash256::zero(), suggested_fee_recipient: Address::zero(), - }; + }); let (status, _) = self + .ee_a .execution_layer - .notify_forkchoice_updated( - head_block_hash, - finalized_block_hash, - Some(payload_attributes), - ) + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Provide the second payload, without providing the first. + */ + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Execution Engine B: + * + * Set the second payload as the head, without providing payload attributes. + */ + let head_block_hash = second_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_b + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Syncing); + + /* + * Execution Engine B: + * + * Provide the first payload to the EE. + */ + + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&valid_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Provide the second payload, now the first has been provided. + */ + let (status, _) = self + .ee_b + .execution_layer + .notify_new_payload(&second_payload) + .await + .unwrap(); + assert_eq!(status, PayloadStatusV1Status::Valid); + + /* + * Execution Engine B: + * + * Set the second payload as the head, without providing payload attributes. + */ + let head_block_hash = second_payload.block_hash; + let finalized_block_hash = Hash256::zero(); + let payload_attributes = None; + let (status, _) = self + .ee_b + .execution_layer + .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); assert_eq!(status, PayloadStatusV1Status::Valid); From e2a09cc85108be18a48a5211bd92d55ef66901d6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 16:07:21 +1100 Subject: [PATCH 34/92] Add comments, tidy --- .../execution_engine_integration/src/execution_engine.rs | 7 +++++++ testing/execution_engine_integration/src/main.rs | 4 ++++ testing/execution_engine_integration/src/test_rig.rs | 6 ++++++ 3 files changed, 17 insertions(+) diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 684d8d63be4..5b0b9f4b83c 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -6,11 +6,13 @@ use std::process::{Child, Command, Output}; use std::{env, fs::File}; use tempfile::TempDir; +/// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { fn init_datadir() -> TempDir; fn start_client(datadir: &TempDir, http_port: u16) -> Child; } +/// Holds handle to a running EE process, plus some other metadata. pub struct ExecutionEngine { #[allow(dead_code)] engine: E, @@ -22,6 +24,7 @@ pub struct ExecutionEngine { impl Drop for ExecutionEngine { fn drop(&mut self) { + // Ensure the EE process is killed on drop. self.child.kill().unwrap() } } @@ -44,6 +47,10 @@ impl ExecutionEngine { } } +/* + * Geth-specific Implementation + */ + #[derive(Clone)] pub struct Geth; diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index d5ed480e962..62fd1eac7d4 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,7 @@ +/// This binary will run integration tests between Lighthouse and other execution engines. +/// +/// If successful, the binary will exit with success (code 0). Any other return code indicates a +/// failure. use execution_engine::Geth; use test_rig::TestRig; diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 18b520ce9da..e8253036fbf 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -9,11 +9,17 @@ use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); struct ExecutionPair { + /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. execution_layer: ExecutionLayer, + /// A handle to external EE process, once this is dropped the process will be killed. #[allow(dead_code)] execution_engine: ExecutionEngine, } +/// A rig that holds two EE processes for testing. +/// +/// There are two EEs held here so that we can test out-of-order application of payloads, and other +/// edge-cases. pub struct TestRig { #[allow(dead_code)] runtime: Arc, From 9e36525fa2f621dc9d345194d25f6dbf0724bc24 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 19:07:39 +1100 Subject: [PATCH 35/92] Add to Makefile and tests --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a92da9bcc8a..bc607304af5 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ EF_TESTS = "testing/ef_tests" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" +EXECUTION_ENGINE_INTEGRATION = "testing/execution_engine_integration" GIT_TAG := $(shell git describe --tags --candidates 1) BIN_DIR = "bin" @@ -123,12 +124,16 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Runs tests checking interop between Lighthouse and execution clients. +test-exec-engine: + make -C $(EXECUTION_ENGINE_INTEGRATION) test + # Runs the full workspace tests in release, without downloading any additional # test vectors. test: test-release # Runs the entire test suite, downloading test vectors if required. -test-full: cargo-fmt test-release test-debug test-ef +test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. From 8416d592ecf9201d7d2021f6ad67595ae3b25cce Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 19:10:36 +1100 Subject: [PATCH 36/92] Add to github actions --- .github/workflows/test-suite.yml | 10 ++++++++++ testing/execution_engine_integration/Makefile | 5 +++++ 2 files changed, 15 insertions(+) create mode 100644 testing/execution_engine_integration/Makefile diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8b590f4e6ec..89f66d50d69 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -184,6 +184,16 @@ jobs: run: | cd scripts/tests ./doppelganger_protection.sh failure + execution-engine-integration-ubuntu: + name: execution-engine-integration-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run exec engine integration tests in release + run: make test-exec-engine check-benchmarks: name: check-benchmarks runs-on: ubuntu-latest diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile new file mode 100644 index 00000000000..70620650666 --- /dev/null +++ b/testing/execution_engine_integration/Makefile @@ -0,0 +1,5 @@ +test: + cargo run --release --locked + +clean: + rm -rf execution_clients From 2094c7d46bd3631055eeace6b05c0de4d1c7c313 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 19:31:37 +1100 Subject: [PATCH 37/92] Remove github actions changes --- .github/workflows/test-suite.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 89f66d50d69..8b590f4e6ec 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -184,16 +184,6 @@ jobs: run: | cd scripts/tests ./doppelganger_protection.sh failure - execution-engine-integration-ubuntu: - name: execution-engine-integration-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run exec engine integration tests in release - run: make test-exec-engine check-benchmarks: name: check-benchmarks runs-on: ubuntu-latest From b7fa2581781c7d8f956fe6dd31d904f598db4ef5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 15 Feb 2022 19:33:29 +1100 Subject: [PATCH 38/92] Add exec tests to github actions again --- .github/workflows/test-suite.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8b590f4e6ec..89f66d50d69 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -184,6 +184,16 @@ jobs: run: | cd scripts/tests ./doppelganger_protection.sh failure + execution-engine-integration-ubuntu: + name: execution-engine-integration-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run exec engine integration tests in release + run: make test-exec-engine check-benchmarks: name: check-benchmarks runs-on: ubuntu-latest From 6d38c37922b8db7922a43b9a23f2ac82a9280471 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 08:13:28 +1100 Subject: [PATCH 39/92] Use serde_json instead of json --- Cargo.lock | 8 +------- testing/execution_engine_integration/Cargo.toml | 2 +- .../execution_engine_integration/src/execution_engine.rs | 2 +- testing/execution_engine_integration/src/genesis_json.rs | 8 ++++---- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fff51167014..e2c2b63f99c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1812,8 +1812,8 @@ dependencies = [ "execution_layer", "exit-future", "futures", - "json", "sensitive_url", + "serde_json", "task_executor", "tempfile", "tokio", @@ -2739,12 +2739,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" - [[package]] name = "jsonrpc-core" version = "18.0.0" diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 75589bf7715..a8987ca41c1 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] tempfile = "3.1.0" -json = "0.12.4" +serde_json = "1.0.58" task_executor = { path = "../../common/task_executor" } tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } futures = "0.3.7" diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 5b0b9f4b83c..5e637a1caa6 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -73,7 +73,7 @@ impl GenericExecutionEngine for Geth { let genesis_json_path = datadir.path().join("genesis.json"); let mut file = File::create(&genesis_json_path).unwrap(); let json = geth_genesis_json(); - json.write(&mut file).unwrap(); + serde_json::to_writer(&mut file, &json).unwrap(); let output = Command::new(Self::binary_path()) .arg("--datadir") diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 71e2c29ea7c..87fdaec14a4 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -1,10 +1,10 @@ -use json::JsonValue; +use serde_json::{json, Value}; /// Sourced from: /// /// https://notes.ethereum.org/rmVErCfCRPKGqGkUe89-Kg -pub fn geth_genesis_json() -> JsonValue { - json::object! { +pub fn geth_genesis_json() -> Value { + json!({ "config": { "chainId":1, "homesteadBlock":0, @@ -38,5 +38,5 @@ pub fn geth_genesis_json() -> JsonValue { "gasUsed":"0x0", "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", "baseFeePerGas":"0x7" - } + }) } From 0e5d0724ad39515b262312e267d862a9f95d06ec Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 08:23:07 +1100 Subject: [PATCH 40/92] Switch away from binary, suppress logs --- testing/execution_engine_integration/Cargo.toml | 2 ++ .../src/execution_engine.rs | 15 +++++++++++++-- testing/execution_engine_integration/src/lib.rs | 12 ++++++++++++ testing/execution_engine_integration/src/main.rs | 15 --------------- .../execution_engine_integration/tests/tests.rs | 7 +++++++ testing/web3signer_tests/src/lib.rs | 2 +- 6 files changed, 35 insertions(+), 18 deletions(-) create mode 100644 testing/execution_engine_integration/src/lib.rs delete mode 100644 testing/execution_engine_integration/src/main.rs create mode 100644 testing/execution_engine_integration/tests/tests.rs diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index a8987ca41c1..1ab71613e66 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -3,6 +3,8 @@ name = "execution_engine_integration" version = "0.1.0" edition = "2021" +build = "build.rs" + [dependencies] tempfile = "3.1.0" serde_json = "1.0.58" diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 5e637a1caa6..8804655a549 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,8 +1,8 @@ -use crate::genesis_json::geth_genesis_json; +use crate::{genesis_json::geth_genesis_json, SUPPRESS_LOGS}; use sensitive_url::SensitiveUrl; use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; -use std::process::{Child, Command, Output}; +use std::process::{Child, Command, Output, Stdio}; use std::{env, fs::File}; use tempfile::TempDir; @@ -101,6 +101,8 @@ impl GenericExecutionEngine for Geth { .arg(http_port.to_string()) .arg("--port") .arg(network_port.to_string()) + .stdout(build_stdio()) + .stderr(build_stdio()) .spawn() .expect("failed to start beacon node") } @@ -148,3 +150,12 @@ pub fn unused_port(transport: &str) -> Result { }; Ok(local_addr.port()) } + +/// Builds the stdout/stderr handler for commands which might output to the terminal. +fn build_stdio() -> Stdio { + if SUPPRESS_LOGS { + Stdio::null() + } else { + Stdio::inherit() + } +} diff --git a/testing/execution_engine_integration/src/lib.rs b/testing/execution_engine_integration/src/lib.rs new file mode 100644 index 00000000000..19a73e6bf29 --- /dev/null +++ b/testing/execution_engine_integration/src/lib.rs @@ -0,0 +1,12 @@ +/// This library provides integration testing between Lighthouse and other execution engines. +/// +/// See the `tests/tests.rs` file to run tests. +mod execution_engine; +mod genesis_json; +mod test_rig; + +pub use execution_engine::Geth; +pub use test_rig::TestRig; + +/// Set to `false` to send logs to the console during tests. Logs are useful when debugging. +const SUPPRESS_LOGS: bool = true; diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs deleted file mode 100644 index 62fd1eac7d4..00000000000 --- a/testing/execution_engine_integration/src/main.rs +++ /dev/null @@ -1,15 +0,0 @@ -/// This binary will run integration tests between Lighthouse and other execution engines. -/// -/// If successful, the binary will exit with success (code 0). Any other return code indicates a -/// failure. -use execution_engine::Geth; -use test_rig::TestRig; - -mod execution_engine; -mod genesis_json; -mod test_rig; - -fn main() { - let geth_rig = TestRig::new(Geth); - geth_rig.perform_tests_blocking(); -} diff --git a/testing/execution_engine_integration/tests/tests.rs b/testing/execution_engine_integration/tests/tests.rs new file mode 100644 index 00000000000..a20890be0f4 --- /dev/null +++ b/testing/execution_engine_integration/tests/tests.rs @@ -0,0 +1,7 @@ +use execution_engine_integration::{Geth, TestRig}; + +#[test] +fn geth() { + let geth_rig = TestRig::new(Geth); + geth_rig.perform_tests_blocking(); +} diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index d73e4a762d0..128c4a6fe92 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -45,7 +45,7 @@ mod tests { /// assume it failed to start. const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20); - /// Set to `true` to send the Web3Signer logs to the console during tests. Logs are useful when + /// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when /// debugging. const SUPPRESS_WEB3SIGNER_LOGS: bool = true; From 51b90d857b137059f9e91ee955d726b8d9f28244 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 08:24:09 +1100 Subject: [PATCH 41/92] Update Makefile for tests --- testing/execution_engine_integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile index 70620650666..8bb2b592332 100644 --- a/testing/execution_engine_integration/Makefile +++ b/testing/execution_engine_integration/Makefile @@ -1,5 +1,5 @@ test: - cargo run --release --locked + cargo test --release --locked clean: rm -rf execution_clients From 16d9219f4fb931232556ae7ae6c829d5d3c93ff8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 08:26:38 +1100 Subject: [PATCH 42/92] Tidy --- testing/execution_engine_integration/tests/tests.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testing/execution_engine_integration/tests/tests.rs b/testing/execution_engine_integration/tests/tests.rs index a20890be0f4..30c68f6df0e 100644 --- a/testing/execution_engine_integration/tests/tests.rs +++ b/testing/execution_engine_integration/tests/tests.rs @@ -2,6 +2,5 @@ use execution_engine_integration::{Geth, TestRig}; #[test] fn geth() { - let geth_rig = TestRig::new(Geth); - geth_rig.perform_tests_blocking(); + TestRig::new(Geth).perform_tests_blocking() } From b3bac84b503f3296e032c3b07834fc96e777b109 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 08:46:18 +1100 Subject: [PATCH 43/92] Add `unused_port` crate --- Cargo.lock | 9 +++ Cargo.toml | 1 + beacon_node/Cargo.toml | 1 + beacon_node/lighthouse_network/Cargo.toml | 1 + .../lighthouse_network/src/discovery/mod.rs | 10 +--- .../lighthouse_network/tests/common/mod.rs | 36 +---------- beacon_node/src/config.rs | 47 +-------------- common/unused_port/Cargo.toml | 8 +++ common/unused_port/src/lib.rs | 55 +++++++++++++++++ lighthouse/Cargo.toml | 1 + lighthouse/tests/beacon_node.rs | 59 +++++-------------- lighthouse/tests/boot_node.rs | 14 +---- testing/eth1_test_rig/Cargo.toml | 1 + testing/eth1_test_rig/src/ganache.rs | 24 +------- .../execution_engine_integration/Cargo.toml | 1 + .../src/execution_engine.rs | 38 +----------- 16 files changed, 108 insertions(+), 198 deletions(-) create mode 100644 common/unused_port/Cargo.toml create mode 100644 common/unused_port/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index e2c2b63f99c..1c0d21c7d56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,6 +361,7 @@ dependencies = [ "store", "task_executor", "types", + "unused_port", ] [[package]] @@ -1499,6 +1500,7 @@ dependencies = [ "serde_json", "tokio", "types", + "unused_port", "web3", ] @@ -1818,6 +1820,7 @@ dependencies = [ "tempfile", "tokio", "types", + "unused_port", ] [[package]] @@ -3341,6 +3344,7 @@ dependencies = [ "task_executor", "tempfile", "types", + "unused_port", "validator_client", "validator_dir", ] @@ -3396,6 +3400,7 @@ dependencies = [ "tokio-util", "types", "unsigned-varint 0.6.0", + "unused_port", "void", ] @@ -6615,6 +6620,10 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "unused_port" +version = "0.1.0" + [[package]] name = "url" version = "2.2.2" diff --git a/Cargo.toml b/Cargo.toml index a65d8773523..aee6755da14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ members = [ "common/task_executor", "common/target_check", "common/test_random_derive", + "common/unused_port", "common/validator_dir", "common/warp_utils", "common/fallback", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9710c8ccff9..46ff5ba2286 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -39,3 +39,4 @@ slasher = { path = "../slasher" } monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } http_api = { path = "http_api" } +unused_port = { path = "../common/unused_port" } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index bb30aac55af..0cc53c09e41 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -39,6 +39,7 @@ regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } superstruct = "0.4.0" prometheus-client = "0.15.0" +unused_port = { path = "../../common/unused_port" } [dependencies.libp2p] git = "https://github.com/sigp/rust-libp2p" diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 5cc059c2a81..4f7ec432b72 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1049,17 +1049,11 @@ mod tests { use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; use slog::{o, Drain}; - use std::net::UdpSocket; use types::{BitVector, MinimalEthSpec, SubnetId}; + use unused_port::unused_udp_port; type E = MinimalEthSpec; - pub fn unused_port() -> u16 { - let socket = UdpSocket::bind("127.0.0.1:0").expect("should create udp socket"); - let local_addr = socket.local_addr().expect("should read udp socket"); - local_addr.port() - } - pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); @@ -1075,7 +1069,7 @@ mod tests { async fn build_discovery() -> Discovery { let keypair = libp2p::identity::Keypair::generate_secp256k1(); let config = NetworkConfig { - discovery_port: unused_port(), + discovery_port: unused_udp_port().unwrap(), ..Default::default() }; let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 7deb2108b07..5656cf07897 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -6,12 +6,12 @@ use lighthouse_network::Multiaddr; use lighthouse_network::Service as LibP2PService; use lighthouse_network::{Libp2pEvent, NetworkConfig}; use slog::{debug, error, o, Drain}; -use std::net::{TcpListener, UdpSocket}; use std::sync::Arc; use std::sync::Weak; use std::time::Duration; use tokio::runtime::Runtime; use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, Hash256, MinimalEthSpec}; +use unused_port::unused_tcp_port; #[allow(clippy::type_complexity)] #[allow(unused)] @@ -61,38 +61,6 @@ pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { } } -// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} - pub fn build_config(port: u16, mut boot_nodes: Vec) -> NetworkConfig { let mut config = NetworkConfig::default(); let path = TempBuilder::new() @@ -121,7 +89,7 @@ pub async fn build_libp2p_instance( boot_nodes: Vec, log: slog::Logger, ) -> Libp2pInstance { - let port = unused_port("tcp").unwrap(); + let port = unused_tcp_port().unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7487acbde09..33603b94e27 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -11,10 +11,10 @@ use std::cmp; use std::cmp::max; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; -use std::net::{TcpListener, UdpSocket}; use std::path::{Path, PathBuf}; use std::str::FromStr; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; +use unused_port::{unused_tcp_port, unused_udp_port}; /// Gets the fully-initialized global client. /// @@ -293,9 +293,9 @@ pub fn get_config( client_config.network.enr_address = None } client_config.network.libp2p_port = - unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; + unused_tcp_port().map_err(|e| format!("Failed to get port for libp2p: {}", e))?; client_config.network.discovery_port = - unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; + unused_udp_port().map_err(|e| format!("Failed to get port for discovery: {}", e))?; client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -785,44 +785,3 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { }) .unwrap_or_else(|| PathBuf::from(".")) } - -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -/// -/// Used for passing unused ports to libp2 so that lighthouse won't have to update -/// its own ENR. -/// -/// NOTE: It is possible that libp2p/discv5 is unable to bind to the -/// ports returned by this function as the OS has a buffer period where -/// it doesn't allow binding to the same port even after the socket is closed. -/// We might have to use SO_REUSEADDR socket option from `std::net2` crate in -/// that case. -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml new file mode 100644 index 00000000000..06c1ca8f58e --- /dev/null +++ b/common/unused_port/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "unused_port" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/common/unused_port/src/lib.rs b/common/unused_port/src/lib.rs new file mode 100644 index 00000000000..4a8cf17380d --- /dev/null +++ b/common/unused_port/src/lib.rs @@ -0,0 +1,55 @@ +use std::net::{TcpListener, UdpSocket}; + +#[derive(Copy, Clone)] +pub enum Transport { + Tcp, + Udp, +} + +/// A convenience function for `unused_port(Transport::Tcp)`. +pub fn unused_tcp_port() -> Result { + unused_port(Transport::Tcp) +} + +/// A convenience function for `unused_port(Transport::Tcp)`. +pub fn unused_udp_port() -> Result { + unused_port(Transport::Udp) +} + +/// A bit of hack to find an unused port. +/// +/// Does not guarantee that the given port is unused after the function exits, just that it was +/// unused before the function started (i.e., it does not reserve a port). +/// +/// ## Notes +/// +/// It is possible that users are unable to bind to the ports returned by this function as the OS +/// has a buffer period where it doesn't allow binding to the same port even after the socket is +/// closed. We might have to use SO_REUSEADDR socket option from `std::net2` crate in that case. +pub fn unused_port(transport: Transport) -> Result { + let local_addr = match transport { + Transport::Tcp => { + let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { + format!("Failed to create TCP listener to find unused port: {:?}", e) + })?; + listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })? + } + Transport::Udp => { + let socket = UdpSocket::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; + socket.local_addr().map_err(|e| { + format!( + "Failed to read UDP socket local_addr to find unused port: {:?}", + e + ) + })? + } + }; + + Ok(local_addr.port()) +} diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b0ee994ec61..5cf04b3b4fc 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -44,6 +44,7 @@ serde_json = "1.0.59" task_executor = { path = "../common/task_executor" } malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } +unused_port = { path = "../common/unused_port" } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f630ed8e73b..cb69e5ca3cd 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -12,6 +12,7 @@ use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, Hash256}; +use unused_port::{unused_tcp_port, unused_udp_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -279,7 +280,7 @@ fn network_listen_address_flag() { } #[test] fn network_port_flag() { - let port = unused_port("tcp").expect("Unable to find unused port."); + let port = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() @@ -290,8 +291,8 @@ fn network_port_flag() { } #[test] fn network_port_and_discovery_port_flags() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("udp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("port", Some(port1.to_string().as_str())) .flag("discovery-port", Some(port2.to_string().as_str())) @@ -414,7 +415,7 @@ fn zero_ports_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flags() { - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -422,7 +423,7 @@ fn enr_udp_port_flags() { } #[test] fn enr_tcp_port_flags() { - let port = unused_port("tcp").expect("Unable to find unused port."); + let port = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -431,8 +432,8 @@ fn enr_tcp_port_flags() { #[test] fn enr_match_flag() { let addr = "127.0.0.2".parse::().unwrap(); - let port1 = unused_port("udp").expect("Unable to find unused port."); - let port2 = unused_port("udp").expect("Unable to find unused port."); + let port1 = unused_udp_port().expect("Unable to find unused port."); + let port2 = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -449,7 +450,7 @@ fn enr_match_flag() { #[test] fn enr_address_flag() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -463,7 +464,7 @@ fn enr_address_flag() { fn enr_address_dns_flag() { let addr = "127.0.0.1".parse::().unwrap(); let ipv6addr = "::1".parse::().unwrap(); - let port = unused_port("udp").expect("Unable to find unused port."); + let port = unused_udp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -502,8 +503,8 @@ fn http_address_flag() { } #[test] fn http_port_flag() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("tcp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("http-port", Some(port1.to_string().as_str())) .flag("port", Some(port2.to_string().as_str())) @@ -573,8 +574,8 @@ fn metrics_address_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_port("tcp").expect("Unable to find unused port."); - let port2 = unused_port("tcp").expect("Unable to find unused port."); + let port1 = unused_tcp_port().expect("Unable to find unused port."); + let port2 = unused_tcp_port().expect("Unable to find unused port."); CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) @@ -856,35 +857,3 @@ fn ensure_panic_on_failed_launch() { assert_eq!(slasher_config.chunk_size, 10); }); } - -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 7b3c3acb3ca..6ea25ca096d 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -12,6 +12,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; use tempfile::TempDir; +use unused_port::unused_udp_port; const IP_ADDRESS: &str = "192.168.2.108"; @@ -51,15 +52,6 @@ impl CommandLineTestExec for CommandLineTest { } } -fn unused_port() -> u16 { - let socket = - UdpSocket::bind("127.0.0.1:0").expect("should create udp socket to find unused port"); - let local_addr = socket - .local_addr() - .expect("should read udp socket to find unused port"); - local_addr.port() -} - #[test] fn enr_address_arg() { let mut test = CommandLineTest::new(); @@ -70,7 +62,7 @@ fn enr_address_arg() { #[test] fn port_flag() { - let port = unused_port(); + let port = unused_udp_port(); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run_with_ip() @@ -130,7 +122,7 @@ fn boot_nodes_flag() { #[test] fn enr_port_flag() { - let port = unused_port(); + let port = unused_udp_port(); CommandLineTest::new() .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index a04f63f3725..787a571e8f5 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -10,3 +10,4 @@ web3 = { version = "0.17.0", default-features = false, features = ["http-tls", " types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} +unused_port = { path = "../../common/unused_port" } diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index c48f011a689..505c010437d 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -1,9 +1,9 @@ use serde_json::json; use std::io::prelude::*; use std::io::BufReader; -use std::net::TcpListener; use std::process::{Child, Command, Stdio}; use std::time::{Duration, Instant}; +use unused_port::unused_tcp_port; use web3::{transports::Http, Transport, Web3}; /// How long we will wait for ganache to indicate that it is ready. @@ -72,7 +72,7 @@ impl GanacheInstance { /// Start a new `ganache-cli` process, waiting until it indicates that it is ready to accept /// RPC connections. pub fn new(network_id: u64, chain_id: u64) -> Result { - let port = unused_port()?; + let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache-cli.cmd", false => "ganache-cli", @@ -108,7 +108,7 @@ impl GanacheInstance { } pub fn fork(&self) -> Result { - let port = unused_port()?; + let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache-cli.cmd", false => "ganache-cli", @@ -188,24 +188,6 @@ fn endpoint(port: u16) -> String { format!("http://localhost:{}", port) } -/// A bit of hack to find an unused TCP port. -/// -/// Does not guarantee that the given port is unused after the function exists, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port() -> Result { - let listener = TcpListener::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create TCP listener to find unused port: {:?}", e))?; - - let local_addr = listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })?; - - Ok(local_addr.port()) -} - impl Drop for GanacheInstance { fn drop(&mut self) { if cfg!(windows) { diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 1ab71613e66..cd9836dd6cd 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -16,3 +16,4 @@ environment = { path = "../../lighthouse/environment" } execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } +unused_port = { path = "../../common/unused_port" } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 8804655a549..7d13c279660 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,10 +1,10 @@ use crate::{genesis_json::geth_genesis_json, SUPPRESS_LOGS}; use sensitive_url::SensitiveUrl; -use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; use std::process::{Child, Command, Output, Stdio}; use std::{env, fs::File}; use tempfile::TempDir; +use unused_port::unused_tcp_port; /// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { @@ -32,7 +32,7 @@ impl Drop for ExecutionEngine { impl ExecutionEngine { pub fn new(engine: E) -> Self { let datadir = E::init_datadir(); - let http_port = unused_port("tcp").unwrap(); + let http_port = unused_tcp_port().unwrap(); let child = E::start_client(&datadir, http_port); Self { engine, @@ -89,7 +89,7 @@ impl GenericExecutionEngine for Geth { } fn start_client(datadir: &TempDir, http_port: u16) -> Child { - let network_port = unused_port("tcp").unwrap(); + let network_port = unused_tcp_port().unwrap(); Command::new(Self::binary_path()) .arg("--datadir") @@ -119,38 +119,6 @@ fn check_command_output(output: Output, failure_msg: &'static str) { } } -/// A bit of hack to find an unused port. -/// -/// Does not guarantee that the given port is unused after the function exits, just that it was -/// unused before the function started (i.e., it does not reserve a port). -pub fn unused_port(transport: &str) -> Result { - let local_addr = match transport { - "tcp" => { - let listener = TcpListener::bind("127.0.0.1:0").map_err(|e| { - format!("Failed to create TCP listener to find unused port: {:?}", e) - })?; - listener.local_addr().map_err(|e| { - format!( - "Failed to read TCP listener local_addr to find unused port: {:?}", - e - ) - })? - } - "udp" => { - let socket = UdpSocket::bind("127.0.0.1:0") - .map_err(|e| format!("Failed to create UDP socket to find unused port: {:?}", e))?; - socket.local_addr().map_err(|e| { - format!( - "Failed to read UDP socket local_addr to find unused port: {:?}", - e - ) - })? - } - _ => return Err("Invalid transport to find unused port".into()), - }; - Ok(local_addr.port()) -} - /// Builds the stdout/stderr handler for commands which might output to the terminal. fn build_stdio() -> Stdio { if SUPPRESS_LOGS { From 26809f8cf2ce91ea87515f67d2121e2ff7c96875 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 09:39:28 +1100 Subject: [PATCH 44/92] Fix warnings and compile errors --- lighthouse/tests/beacon_node.rs | 1 - lighthouse/tests/boot_node.rs | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index cb69e5ca3cd..37c43594538 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -5,7 +5,6 @@ use lighthouse_network::PeerId; use std::fs::File; use std::io::Write; use std::net::{IpAddr, Ipv4Addr}; -use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 6ea25ca096d..1c11ae046e6 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -7,7 +7,7 @@ use lighthouse_network::discovery::ENR_FILENAME; use lighthouse_network::Enr; use std::fs::File; use std::io::Write; -use std::net::{Ipv4Addr, UdpSocket}; +use std::net::Ipv4Addr; use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; @@ -62,7 +62,7 @@ fn enr_address_arg() { #[test] fn port_flag() { - let port = unused_udp_port(); + let port = unused_udp_port().unwrap(); CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run_with_ip() @@ -122,7 +122,7 @@ fn boot_nodes_flag() { #[test] fn enr_port_flag() { - let port = unused_udp_port(); + let port = unused_udp_port().unwrap(); CommandLineTest::new() .flag("enr-port", Some(port.to_string().as_str())) .run_with_ip() From 6eb5b2b9dc1fc1f43800015f7a071878c9f4eb18 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 12:16:26 +1100 Subject: [PATCH 45/92] Unify payload status parsing --- beacon_node/execution_layer/src/lib.rs | 243 ++---------------- .../execution_layer/src/payload_status.rs | 182 +++++++++++++ 2 files changed, 200 insertions(+), 225 deletions(-) create mode 100644 beacon_node/execution_layer/src/payload_status.rs diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5f97c80768f..fcb5c63b0e0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,10 +7,11 @@ use engine_api::{Error as ApiError, *}; use engines::{Engine, EngineError, Engines, ForkChoiceState, Logging}; use lru::LruCache; +use payload_status::{process_multiple_payload_statuses, PayloadStatus}; use sensitive_url::SensitiveUrl; -use slog::{crit, debug, error, info, warn, Logger}; +use slog::{crit, debug, error, info, Logger}; use slot_clock::SlotClock; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::future::Future; use std::sync::Arc; use std::time::Duration; @@ -25,6 +26,7 @@ pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status mod engine_api; mod engines; +mod payload_status; pub mod test_utils; /// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block @@ -59,25 +61,6 @@ impl From for Error { } } -/// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. It -/// primarily ensures that the `latest_valid_hash` is always present when required. -#[derive(Debug, Clone, PartialEq)] -pub enum PayloadStatus { - Valid, - Invalid { - latest_valid_hash: Hash256, - validation_error: Option, - }, - Syncing, - Accepted, - InvalidBlockHash { - validation_error: Option, - }, - InvalidTerminalBlock { - validation_error: Option, - }, -} - #[derive(Clone)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, @@ -493,137 +476,11 @@ impl ExecutionLayer { .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - let mut errors = vec![]; - let mut valid_statuses = vec![]; - let mut invalid_statuses = vec![]; - let mut other_statuses = vec![]; - - for result in broadcast_results { - match result { - Err(e) => errors.push(e), - Ok(response) => match &response.status { - PayloadStatusV1Status::Valid => { - if response - .latest_valid_hash - .map_or(false, |h| h == execution_payload.block_hash) - { - // The response is only valid if `latest_valid_hash` is not `null` and - // equal to the provided `block_hash`. - valid_statuses.push(PayloadStatus::Valid) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", - execution_payload.block_hash, - response.latest_valid_hash, - ) - ), - }); - } - } - PayloadStatusV1Status::Invalid => { - if let Some(latest_valid_hash) = response.latest_valid_hash { - // The response is only valid if `latest_valid_hash` is not `null`. - invalid_statuses.push(PayloadStatus::Invalid { - latest_valid_hash, - validation_error: response.validation_error.clone(), - }) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - "new_payload: response.status = INVALID but null latest_valid_hash".to_string() - ), - }); - } - } - PayloadStatusV1Status::InvalidBlockHash => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - self.log(), - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidBlockHash { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::InvalidTerminalBlock => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - self.log(), - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::Syncing => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - self.log(), - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Syncing) - } - PayloadStatusV1Status::Accepted => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - self.log(), - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Accepted) - } - }, - } - } - - if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "invalid_statuses" => ?invalid_statuses, - "valid_statuses" => ?valid_statuses, - "method" => "new_payload", - ); - - // Choose to exit and ignore the valid response. This preferences correctness over - // liveness. - return Err(Error::ConsensusFailure); - } - - valid_statuses - .first() - .or_else(|| invalid_statuses.first()) - .or_else(|| other_statuses.first()) - .cloned() - .map(Result::Ok) - .unwrap_or_else(|| Err(Error::EngineErrors(errors))) + process_multiple_payload_statuses( + execution_payload.block_hash, + broadcast_results.into_iter(), + self.log(), + ) } /// Maps to the `engine_consensusValidated` JSON-RPC call. @@ -644,7 +501,7 @@ impl ExecutionLayer { head_block_hash: Hash256, finalized_block_hash: Hash256, payload_attributes: Option, - ) -> Result<(PayloadStatusV1Status, Option>), Error> { + ) -> Result { debug!( self.log(), "Issuing engine_forkchoiceUpdated"; @@ -673,79 +530,15 @@ impl ExecutionLayer { }) .await; - let mut errors = vec![]; - let mut valid = 0; - let mut invalid = 0; - let mut syncing = 0; - let mut invalid_latest_valid_hash = HashSet::new(); - for result in broadcast_results { - match result { - Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { - (None, &PayloadStatusV1Status::Valid) => valid += 1, - (Some(latest_hash), &PayloadStatusV1Status::Valid) => { - if latest_hash == &head_block_hash { - valid += 1; - } else { - // According to a strict interpretation of the spec, the EE should never - // respond with `VALID` *and* a `latest_valid_hash`. - // - // For the sake of being liberal with what we accept, we will accept a - // `latest_valid_hash` *only if* it matches the submitted payload. - // Otherwise, register an error. - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse( - format!( - "forkchoice_updated: payload_status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - *latest_hash, - ) - ), - }); - } - } - (Some(latest_hash), &PayloadStatusV1Status::Invalid) => { - invalid += 1; - invalid_latest_valid_hash.insert(*latest_hash); - } - (None, &PayloadStatusV1Status::InvalidTerminalBlock) => invalid += 1, - (None, &PayloadStatusV1Status::Syncing) => syncing += 1, - _ => { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: engine_api::Error::BadResponse(format!( - "forkchoice_updated: response does not conform to engine API spec: {:?}", - response - )), - }) - } - } - Err(e) => errors.push(e), - } - } - - if valid > 0 && invalid > 0 { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "method" => "forkchoice_updated" - ); - // In this situation, better to have a failure of liveness than vote on a potentially invalid chain - return Err(Error::ConsensusFailure); - } + // TODO: process payload_ids - if valid > 0 { - Ok((PayloadStatusV1Status::Valid, Some(vec![head_block_hash]))) - } else if invalid > 0 { - Ok(( - PayloadStatusV1Status::Invalid, - Some(invalid_latest_valid_hash.into_iter().collect()), - )) - } else if syncing > 0 { - Ok((PayloadStatusV1Status::Syncing, None)) - } else { - Err(Error::EngineErrors(errors)) - } + process_multiple_payload_statuses( + head_block_hash, + broadcast_results + .into_iter() + .map(|result| result.map(|response| response.payload_status)), + self.log(), + ) } /// Used during block production to determine if the merge has been triggered. diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs new file mode 100644 index 00000000000..2ada8b84d8d --- /dev/null +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -0,0 +1,182 @@ +use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; +use crate::engines::EngineError; +use crate::Error; +use slog::{crit, warn, Logger}; +use types::Hash256; + +/// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. +/// +/// It primarily ensures that the `latest_valid_hash` is always present when relevant. +#[derive(Debug, Clone, PartialEq)] +pub enum PayloadStatus { + Valid, + Invalid { + latest_valid_hash: Hash256, + validation_error: Option, + }, + Syncing, + Accepted, + InvalidBlockHash { + validation_error: Option, + }, + InvalidTerminalBlock { + validation_error: Option, + }, +} + +/// Processes the responses from multiple execution engines, finding the "best" status and returning +/// it (if any). +/// +/// This function has the following basic goals: +/// +/// - Detect a consensus failure between nodes. +/// - Find the most-synced node by preferring a definite response (valid/invalid) over a +/// syncing/accepted response or error. +/// +/// # Details +/// +/// - If there are conflicting valid/invalid responses, always return an error. +/// - If there are syncing/accepted responses but valid/invalid responses exist, return the +/// valid/invalid responses since they're definite. +/// - If there are multiple valid responses, return the first one processed. +/// - If there are multiple invalid responses, return the first one processed. +/// - Syncing/accepted responses are grouped, if there are multiple of them, return the first one +/// processed. +/// - If there are no responses (only errors or nothing), return an error. +pub fn process_multiple_payload_statuses( + head_block_hash: Hash256, + statuses: impl Iterator>, + log: &Logger, +) -> Result { + let mut errors = vec![]; + let mut valid_statuses = vec![]; + let mut invalid_statuses = vec![]; + let mut other_statuses = vec![]; + + for status in statuses { + match status { + Err(e) => errors.push(e), + Ok(response) => match &response.status { + PayloadStatusV1Status::Valid => { + if response + .latest_valid_hash + .map_or(false, |h| h == head_block_hash) + { + // The response is only valid if `latest_valid_hash` is not `null` and + // equal to the provided `block_hash`. + valid_statuses.push(PayloadStatus::Valid) + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: ApiError::BadResponse( + format!( + "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + response.latest_valid_hash, + ) + ), + }); + } + } + PayloadStatusV1Status::Invalid => { + if let Some(latest_valid_hash) = response.latest_valid_hash { + // The response is only valid if `latest_valid_hash` is not `null`. + invalid_statuses.push(PayloadStatus::Invalid { + latest_valid_hash, + validation_error: response.validation_error.clone(), + }) + } else { + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: ApiError::BadResponse( + "new_payload: response.status = INVALID but null latest_valid_hash" + .to_string(), + ), + }); + } + } + PayloadStatusV1Status::InvalidBlockHash => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + invalid_statuses.push(PayloadStatus::InvalidBlockHash { + validation_error: response.validation_error.clone(), + }); + } + PayloadStatusV1Status::InvalidTerminalBlock => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { + validation_error: response.validation_error.clone(), + }); + } + PayloadStatusV1Status::Syncing => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + other_statuses.push(PayloadStatus::Syncing) + } + PayloadStatusV1Status::Accepted => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + other_statuses.push(PayloadStatus::Accepted) + } + }, + } + } + + if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { + crit!( + log, + "Consensus failure between execution nodes"; + "invalid_statuses" => ?invalid_statuses, + "valid_statuses" => ?valid_statuses, + ); + + // Choose to exit and ignore the valid response. This preferences correctness over + // liveness. + return Err(Error::ConsensusFailure); + } + + valid_statuses + .first() + .or_else(|| invalid_statuses.first()) + .or_else(|| other_statuses.first()) + .cloned() + .map(Result::Ok) + .unwrap_or_else(|| Err(Error::EngineErrors(errors))) +} From 246a1b6f87058420c8b9a32eb6d843df0ef3aa51 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 12:32:19 +1100 Subject: [PATCH 46/92] Use `PayloadStatus` in beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 26 +++++++++++-------- .../beacon_chain/src/block_verification.rs | 7 ++--- beacon_node/beacon_chain/src/errors.rs | 5 ++-- .../beacon_chain/src/execution_payload.rs | 25 +++++++++--------- .../tests/payload_invalidation.rs | 4 ++- beacon_node/execution_layer/src/lib.rs | 3 ++- 6 files changed, 37 insertions(+), 33 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c9483869504..0e1941bb3e4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -52,7 +52,7 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{ EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, }; -use execution_layer::{ExecutionLayer, PayloadStatusV1Status}; +use execution_layer::{ExecutionLayer, PayloadStatus}; use fork_choice::{AttestationFromBlock, ForkChoice}; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -3744,12 +3744,12 @@ impl BeaconChain { .map_err(Error::ExecutionForkChoiceUpdateFailed); match forkchoice_updated_response { - Ok((status, latest_valid_hash)) => match status { - PayloadStatusV1Status::Valid | PayloadStatusV1Status::Syncing => Ok(()), + Ok(status) => match status { + PayloadStatus::Valid | PayloadStatus::Syncing => Ok(()), // The specification doesn't list `ACCEPTED` as a valid response to a fork choice // update. This response *seems* innocent enough, so we won't return early with an // error. However, we create a log to bring attention to the issue. - PayloadStatusV1Status::Accepted => { + PayloadStatus::Accepted => { warn!( log, "Fork choice update received ACCEPTED"; @@ -3759,16 +3759,20 @@ impl BeaconChain { ); Ok(()) } - PayloadStatusV1Status::Invalid - | PayloadStatusV1Status::InvalidTerminalBlock - | PayloadStatusV1Status::InvalidBlockHash => { + PayloadStatus::Invalid { + latest_valid_hash, .. + } => { // TODO(bellatrix): process the invalid payload. // // See: https://github.com/sigp/lighthouse/pull/2837 - Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { - status, - latest_valid_hash, - }) + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) + } + PayloadStatus::InvalidTerminalBlock { .. } + | PayloadStatus::InvalidBlockHash { .. } => { + // TODO(bellatrix): process the invalid payload. + // + // See: https://github.com/sigp/lighthouse/pull/2837 + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } }, Err(e) => Err(e), diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a87d0d7a7ea..ef0ca74b6be 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -54,7 +54,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use eth2::types::EventKind; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -270,10 +270,7 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The block is invalid and the peer is faulty - RejectedByExecutionEngine { - status: PayloadStatusV1Status, - latest_valid_hash: Option>, - }, + RejectedByExecutionEngine { status: PayloadStatus }, /// The execution payload timestamp does not match the slot /// /// ## Peer scoring diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index cb1f663b369..c3880300682 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -8,7 +8,7 @@ use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use futures::channel::mpsc::TrySendError; use operation_pool::OpPoolError; use safe_arith::ArithError; @@ -139,8 +139,7 @@ pub enum BeaconChainError { ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), ExecutionForkChoiceUpdateInvalid { - status: PayloadStatusV1Status, - latest_valid_hash: Option>, + status: PayloadStatus, }, BlockRewardSlotError, BlockRewardAttestationError, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index cc90ce94272..67dd756c091 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatusV1Status; +use execution_layer::PayloadStatus; use fork_choice::PayloadVerificationStatus; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -58,24 +58,25 @@ pub fn notify_new_payload( .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); match new_payload_response { - Ok((status, latest_valid_hash)) => match status { - PayloadStatusV1Status::Valid => Ok(PayloadVerificationStatus::Verified), - PayloadStatusV1Status::Syncing | PayloadStatusV1Status::Accepted => { + Ok(status) => match status { + PayloadStatus::Valid => Ok(PayloadVerificationStatus::Verified), + PayloadStatus::Syncing | PayloadStatus::Accepted => { Ok(PayloadVerificationStatus::NotVerified) } - PayloadStatusV1Status::Invalid - | PayloadStatusV1Status::InvalidTerminalBlock - | PayloadStatusV1Status::InvalidBlockHash => { + PayloadStatus::Invalid { + latest_valid_hash, .. + } => { // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); chain.process_invalid_execution_payload(latest_root, latest_valid_hash)?; - Err(ExecutionPayloadError::RejectedByExecutionEngine { - status, - latest_valid_hash, - } - .into()) + Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) + } + PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { + // There is no `latest_valid_hash` provided by the execution engine, so there's no + // scope for invalidating ancestors of this block. Just return an error. + Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } }, Err(e) => Err(ExecutionPayloadError::RequestFailed(e).into()), diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 3d0694ede83..e8de39c5286 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -110,7 +110,9 @@ impl InvalidPayloadRig { self.import_block_parametric(is_valid, |error| { matches!( error, - BlockError::ExecutionPayloadError(ExecutionPayloadError::RejectedByExecutionEngine) + BlockError::ExecutionPayloadError( + ExecutionPayloadError::RejectedByExecutionEngine { .. } + ) ) }) } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index fcb5c63b0e0..82b38142615 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -7,7 +7,7 @@ use engine_api::{Error as ApiError, *}; use engines::{Engine, EngineError, Engines, ForkChoiceState, Logging}; use lru::LruCache; -use payload_status::{process_multiple_payload_statuses, PayloadStatus}; +use payload_status::process_multiple_payload_statuses; use sensitive_url::SensitiveUrl; use slog::{crit, debug, error, info, Logger}; use slot_clock::SlotClock; @@ -23,6 +23,7 @@ use tokio::{ use types::{ChainSpec, Epoch, ProposerPreparationData}; pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status}; +pub use payload_status::PayloadStatus; mod engine_api; mod engines; From 21455917fe605338204076f1df9f75684444a283 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 17:19:36 +1100 Subject: [PATCH 47/92] Fix test compile errors --- .../src/test_rig.rs | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index e8253036fbf..eff865fac08 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,5 +1,5 @@ use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatusV1Status}; +use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; @@ -163,13 +163,13 @@ impl TestRig { let head_block_hash = valid_payload.block_hash; let finalized_block_hash = Hash256::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Syncing); /* * Execution Engine A: @@ -177,13 +177,13 @@ impl TestRig { * Provide the valid payload back to the EE again. */ - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&valid_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -195,13 +195,13 @@ impl TestRig { let head_block_hash = valid_payload.block_hash; let finalized_block_hash = Hash256::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -211,7 +211,7 @@ impl TestRig { let mut invalid_payload = valid_payload.clone(); invalid_payload.random = Hash256::from_low_u64_be(42); - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&invalid_payload) @@ -219,7 +219,7 @@ impl TestRig { .unwrap(); assert!(matches!( status, - PayloadStatusV1Status::Invalid | PayloadStatusV1Status::InvalidBlockHash + PayloadStatus::Invalid { latest_valid_hash, .. } if latest_valid_hash == valid_payload.block_hash )); /* @@ -252,13 +252,13 @@ impl TestRig { * Provide the second payload back to the EE again. */ - let (status, _) = self + let status = self .ee_a .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine A: @@ -272,26 +272,26 @@ impl TestRig { random: Hash256::zero(), suggested_fee_recipient: Address::zero(), }); - let (status, _) = self + let status = self .ee_a .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: * * Provide the second payload, without providing the first. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Syncing); /* * Execution Engine B: @@ -301,13 +301,13 @@ impl TestRig { let head_block_hash = second_payload.block_hash; let finalized_block_hash = Hash256::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_b .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Syncing); + assert_eq!(status, PayloadStatus::Syncing); /* * Execution Engine B: @@ -315,26 +315,26 @@ impl TestRig { * Provide the first payload to the EE. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&valid_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: * * Provide the second payload, now the first has been provided. */ - let (status, _) = self + let status = self .ee_b .execution_layer .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); /* * Execution Engine B: @@ -344,13 +344,13 @@ impl TestRig { let head_block_hash = second_payload.block_hash; let finalized_block_hash = Hash256::zero(); let payload_attributes = None; - let (status, _) = self + let status = self .ee_b .execution_layer .notify_forkchoice_updated(head_block_hash, finalized_block_hash, payload_attributes) .await .unwrap(); - assert_eq!(status, PayloadStatusV1Status::Valid); + assert_eq!(status, PayloadStatus::Valid); } } From 09f49963ed4959bd56689225f5aedb531447a4e4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 16 Feb 2022 18:09:10 +1100 Subject: [PATCH 48/92] Tidy comments around fork choice response --- beacon_node/execution_layer/src/lib.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 42013ad23b3..10ae6b3eb02 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -605,17 +605,16 @@ impl ExecutionLayer { for result in broadcast_results { match result { Ok(response) => match (&response.payload_status.latest_valid_hash, &response.payload_status.status) { + // TODO(bellatrix) a strict interpretation of the v1.0.0.alpha.6 spec says that + // `latest_valid_hash` *cannot* be `None`. However, we accept it to maintain + // Geth compatibility for the short term. See: + // + // https://github.com/ethereum/go-ethereum/issues/24404 (None, &PayloadStatusV1Status::Valid) => valid += 1, (Some(latest_hash), &PayloadStatusV1Status::Valid) => { if latest_hash == &head_block_hash { valid += 1; } else { - // According to a strict interpretation of the spec, the EE should never - // respond with `VALID` *and* a `latest_valid_hash`. - // - // For the sake of being liberal with what we accept, we will accept a - // `latest_valid_hash` *only if* it matches the submitted payload. - // Otherwise, register an error. errors.push(EngineError::Api { id: "unknown".to_string(), error: engine_api::Error::BadResponse( From 5e054baf9c0ef2992fec5fb2ba0bca5a8dcc5c54 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 09:27:24 +1100 Subject: [PATCH 49/92] Don't panic in drop --- testing/execution_engine_integration/src/execution_engine.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 7d13c279660..cff36a025bd 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -25,7 +25,9 @@ pub struct ExecutionEngine { impl Drop for ExecutionEngine { fn drop(&mut self) { // Ensure the EE process is killed on drop. - self.child.kill().unwrap() + if let Err(e) = self.child.kill() { + eprintln!("failed to kill child: {:?}", e) + } } } From b461bf9bbf7925504d5e0643ebba65650c32b8f3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 09:47:17 +1100 Subject: [PATCH 50/92] Partially add invalidation call to fork choice --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0e1941bb3e4..76d1e006352 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3762,9 +3762,13 @@ impl BeaconChain { PayloadStatus::Invalid { latest_valid_hash, .. } => { - // TODO(bellatrix): process the invalid payload. - // - // See: https://github.com/sigp/lighthouse/pull/2837 + // The execution engine has stated that all blocks between the + // `head_execution_block_hash` and `latest_valid_hash` are invalid. + self.process_invalid_execution_payload( + head_execution_block_hash, + latest_valid_hash, + )?; + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } PayloadStatus::InvalidTerminalBlock { .. } From 22439c2526967d75fe61e0431507a8c2974ec066 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 11:02:55 +1100 Subject: [PATCH 51/92] Simplify forkchoice update method on beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 74 +++++++++++--------- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/client/src/builder.rs | 25 ++++--- 3 files changed, 52 insertions(+), 48 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 76d1e006352..06c47993d3d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3677,52 +3677,53 @@ impl BeaconChain { // If this is a post-merge block, update the execution layer. if let Some(new_head_execution_block_hash) = new_head_execution_block_hash_opt { if is_merge_transition_complete { - let execution_layer = self - .execution_layer - .clone() - .ok_or(Error::ExecutionLayerMissing)?; - let store = self.store.clone(); - let log = self.log.clone(); - - // Spawn the update task, without waiting for it to complete. - execution_layer.spawn( - move |execution_layer| async move { - if let Err(e) = Self::update_execution_engine_forkchoice( - execution_layer, - store, - new_finalized_checkpoint.root, - new_head_execution_block_hash, - &log, - ) - .await - { - crit!( - log, - "Failed to update execution head"; - "error" => ?e - ); - } - }, - "update_execution_engine_forkchoice", - ) + if let Err(e) = self.update_execution_engine_forkchoice_blocking( + new_finalized_checkpoint.root, + new_head_execution_block_hash, + ) { + crit!( + self.log, + "Failed to update execution head"; + "error" => ?e + ); + } } } Ok(()) } - pub async fn update_execution_engine_forkchoice( - execution_layer: ExecutionLayer, - store: BeaconStore, + pub fn update_execution_engine_forkchoice_blocking( + &self, + finalized_beacon_block_root: Hash256, + head_execution_block_hash: Hash256, + ) -> Result<(), Error> { + let execution_layer = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)?; + + execution_layer + .block_on_generic(|_| { + self.update_execution_engine_forkchoice_async( + finalized_beacon_block_root, + head_execution_block_hash, + ) + }) + .map_err(Error::ForkchoiceUpdate)? + } + + pub async fn update_execution_engine_forkchoice_async( + &self, finalized_beacon_block_root: Hash256, head_execution_block_hash: Hash256, - log: &Logger, ) -> Result<(), Error> { // Loading the finalized block from the store is not ideal. Perhaps it would be better to // store it on fork-choice so we can do a lookup without hitting the database. // // See: https://github.com/sigp/lighthouse/pull/2627#issuecomment-927537245 - let finalized_block = store + let finalized_block = self + .store .get_block(&finalized_beacon_block_root)? .ok_or(Error::MissingBeaconBlock(finalized_beacon_block_root))?; @@ -3734,7 +3735,10 @@ impl BeaconChain { .map(|ep| ep.block_hash) .unwrap_or_else(Hash256::zero); - let forkchoice_updated_response = execution_layer + let forkchoice_updated_response = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)? .notify_forkchoice_updated( head_execution_block_hash, finalized_execution_block_hash, @@ -3751,7 +3755,7 @@ impl BeaconChain { // error. However, we create a log to bring attention to the issue. PayloadStatus::Accepted => { warn!( - log, + self.log, "Fork choice update received ACCEPTED"; "msg" => "execution engine provided an unexpected response to a fork \ choice update. although this is not a serious issue, please raise \ diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index c3880300682..68ae36739d6 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -150,6 +150,7 @@ pub enum BeaconChainError { JustifiedPayloadInvalid { justified_root: Hash256, }, + ForkchoiceUpdate(execution_layer::Error), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c3e0f8af5c0..32d29127d4a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -662,9 +662,6 @@ where ); if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { - let store = beacon_chain.store.clone(); - let inner_execution_layer = execution_layer.clone(); - let head = beacon_chain .head_info() .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; @@ -672,18 +669,20 @@ where // Issue the head to the execution engine on startup. This ensures it can start // syncing. if let Some(block_hash) = head.execution_payload_block_hash { + // Spawn a new task using the "async" fork choice update method, rather than + // using the "blocking" method. + // + // Using the blocking method may cause a panic if this code is run inside an + // async context. + let inner_chain = beacon_chain.clone(); runtime_context.executor.spawn( async move { - let result = BeaconChain::< - Witness, - >::update_execution_engine_forkchoice( - inner_execution_layer, - store, - head.finalized_checkpoint.root, - block_hash, - &log, - ) - .await; + let result = inner_chain + .update_execution_engine_forkchoice_async( + head.finalized_checkpoint.root, + block_hash, + ) + .await; // No need to exit early if setting the head fails. It will be set again if/when the // node comes online. From dd57c3101e7f52f49d4541fb5c8fe0a284bad092 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 11:21:31 +1100 Subject: [PATCH 52/92] Add invalidation to fork choice --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 ++++++++---- .../beacon_chain/src/execution_payload.rs | 7 ++++--- consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/proto_array/src/error.rs | 2 +- consensus/proto_array/src/proto_array.rs | 19 +++++++++++++++---- .../src/proto_array_fork_choice.rs | 2 +- 6 files changed, 30 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 06c47993d3d..e92d84354cd 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3198,7 +3198,7 @@ impl BeaconChain { pub fn process_invalid_execution_payload( &self, latest_root: Hash256, - latest_valid_hash: Hash256, + latest_valid_hash: Option, ) -> Result<(), Error> { debug!( self.log, @@ -3770,16 +3770,20 @@ impl BeaconChain { // `head_execution_block_hash` and `latest_valid_hash` are invalid. self.process_invalid_execution_payload( head_execution_block_hash, - latest_valid_hash, + Some(latest_valid_hash), )?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { - // TODO(bellatrix): process the invalid payload. + // The execution engine has stated that the head block is invalid, however it + // hasn't returned a latest valid ancestor. // - // See: https://github.com/sigp/lighthouse/pull/2837 + // Using a `None` latest valid ancestor will result in only the head block + // being invalidated (no ancestors). + self.process_invalid_execution_payload(head_execution_block_hash, None)?; + Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } }, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 67dd756c091..c89758ab74e 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -69,13 +69,14 @@ pub fn notify_new_payload( // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); - chain.process_invalid_execution_payload(latest_root, latest_valid_hash)?; + chain.process_invalid_execution_payload(latest_root, Some(latest_valid_hash))?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { - // There is no `latest_valid_hash` provided by the execution engine, so there's no - // scope for invalidating ancestors of this block. Just return an error. + // Returning an error here should sufficient to invalidate the block. We have no + // information to indicate it's parent is invalid, so no need to run + // `BeaconChain::process_invalid_execution_payload`. Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } }, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 463cadec245..d27db101de3 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -468,7 +468,7 @@ where pub fn on_invalid_execution_payload( &mut self, head_block_root: Hash256, - latest_valid_ancestor_root: Hash256, + latest_valid_ancestor_root: Option, ) -> Result<(), Error> { self.proto_array .process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index cdf80a9accb..60b39e0a68c 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -39,7 +39,7 @@ pub enum Error { }, UnknownLatestValidAncestorHash { block_root: Hash256, - latest_valid_ancestor_hash: Hash256, + latest_valid_ancestor_hash: Option, }, } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3aae97e5541..dcd990cb4f7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -310,7 +310,7 @@ impl ProtoArray { pub fn propagate_execution_payload_invalidation( &mut self, head_block_root: Hash256, - latest_valid_ancestor_hash: Hash256, + latest_valid_ancestor_hash: Option, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); let mut index = *self @@ -319,8 +319,8 @@ impl ProtoArray { .ok_or(Error::NodeUnknown(head_block_root))?; // Try to map the ancestor payload *hash* to an ancestor beacon block *root*. - let latest_valid_ancestor_root = - self.execution_block_hash_to_beacon_block_root(&latest_valid_ancestor_hash); + let latest_valid_ancestor_root = latest_valid_ancestor_hash + .and_then(|hash| self.execution_block_hash_to_beacon_block_root(&hash)); // Set to `true` if both conditions are satisfied: // @@ -355,9 +355,14 @@ impl ProtoArray { block_root: node.root, latest_valid_ancestor_hash, }); - } else if hash == latest_valid_ancestor_hash { + } else if Some(hash) == latest_valid_ancestor_hash { // If the `best_child` or `best_descendant` of the latest valid hash was // invalidated, set those fields to `None`. + // + // In theory, an invalid `best_child` necessarily infers an invalid + // `best_descendant`. However, we check each variable independently to + // defend against errors which might result in an invalid block being set as + // head. node.best_child = node .best_child .filter(|best_child| invalidated_indices.contains(best_child)); @@ -385,6 +390,12 @@ impl ProtoArray { } ExecutionStatus::Unknown(hash) => { node.execution_status = ExecutionStatus::Invalid(*hash); + + // It's impossible for an invalid block to lead to a "best" block, so set these + // fields to `None`. + // + // Failing to set these values will result in `Self::node_leads_to_viable_head` + // returning `false` for *valid* ancestors of invalid blocks. node.best_child = None; node.best_descendant = None; } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 30294b160df..1bc6a81a233 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -165,7 +165,7 @@ impl ProtoArrayForkChoice { pub fn process_execution_payload_invalidation( &mut self, head_block_root: Hash256, - latest_valid_ancestor_root: Hash256, + latest_valid_ancestor_root: Option, ) -> Result<(), String> { self.proto_array .propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) From e38e819791ea448980f5c9f2a9ccae41df2fa1ff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 11:29:00 +1100 Subject: [PATCH 53/92] Bump `cc` version --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c0d21c7d56..c9a9e69683d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -627,9 +627,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cexpr" From 3059f2285d71f57c54d64425ab5b9c4bab246b78 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 11:42:01 +1100 Subject: [PATCH 54/92] Update geth responses --- testing/execution_engine_integration/src/test_rig.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index eff865fac08..23b50f94f75 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -217,10 +217,7 @@ impl TestRig { .notify_new_payload(&invalid_payload) .await .unwrap(); - assert!(matches!( - status, - PayloadStatus::Invalid { latest_valid_hash, .. } if latest_valid_hash == valid_payload.block_hash - )); + assert!(matches!(status, PayloadStatus::InvalidBlockHash { .. })); /* * Execution Engine A: @@ -291,7 +288,7 @@ impl TestRig { .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatus::Syncing); + assert_eq!(status, PayloadStatus::Accepted); /* * Execution Engine B: From 5c543a08ef951362f230c82564aef5fdd6499edc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 13:16:08 +1100 Subject: [PATCH 55/92] Use windows-2019 for CI --- .github/workflows/test-suite.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 89f66d50d69..04f7659fe29 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -44,7 +44,7 @@ jobs: run: make test-release release-tests-windows: name: release-tests-windows - runs-on: windows-latest + runs-on: windows-2019 needs: cargo-fmt steps: - uses: actions/checkout@v1 From d86bbd3b66453e8ae8b720ee8bb41f3f2f84db30 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 17 Feb 2022 13:16:08 +1100 Subject: [PATCH 56/92] Use windows-2019 for CI --- .github/workflows/test-suite.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 89f66d50d69..04f7659fe29 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -44,7 +44,7 @@ jobs: run: make test-release release-tests-windows: name: release-tests-windows - runs-on: windows-latest + runs-on: windows-2019 needs: cargo-fmt steps: - uses: actions/checkout@v1 From bbdec5cca948feb43b588bfd3712f56db05c24b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 08:43:04 +1100 Subject: [PATCH 57/92] Skip windows tests for exec payload tests --- .../tests/tests.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/testing/execution_engine_integration/tests/tests.rs b/testing/execution_engine_integration/tests/tests.rs index 30c68f6df0e..d4fcb29dca8 100644 --- a/testing/execution_engine_integration/tests/tests.rs +++ b/testing/execution_engine_integration/tests/tests.rs @@ -1,6 +1,16 @@ -use execution_engine_integration::{Geth, TestRig}; +#[cfg(not(target_family = "windows"))] +mod not_windows { + use execution_engine_integration::{Geth, TestRig}; + #[test] + fn geth() { + TestRig::new(Geth).perform_tests_blocking() + } +} -#[test] -fn geth() { - TestRig::new(Geth).perform_tests_blocking() +#[cfg(target_family = "windows")] +mod windows { + #[test] + fn all_tests_skipped_on_windows() { + // + } } From 903d03e188d3c50285a38565d8beb8224a7bc06b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 09:17:15 +1100 Subject: [PATCH 58/92] Update fork choice response --- .../test_utils/execution_block_generator.rs | 55 +++++++++++-------- .../src/test_utils/handle_rpc.rs | 13 +---- 2 files changed, 33 insertions(+), 35 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index e0135f24a36..b0da7b510d4 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,7 @@ use crate::engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, }; use crate::engines::ForkChoiceState; @@ -267,39 +270,34 @@ impl ExecutionBlockGenerator { &mut self, forkchoice_state: ForkChoiceState, payload_attributes: Option, - ) -> Result, String> { + ) -> Result { if let Some(payload) = self .pending_payloads .remove(&forkchoice_state.head_block_hash) { self.insert_block(Block::PoS(payload))?; } - if !self.blocks.contains_key(&forkchoice_state.head_block_hash) { - return Err(format!( - "block hash {:?} unknown", - forkchoice_state.head_block_hash - )); - } - if !self.blocks.contains_key(&forkchoice_state.safe_block_hash) { - return Err(format!( - "block hash {:?} unknown", - forkchoice_state.head_block_hash - )); - } - if forkchoice_state.finalized_block_hash != Hash256::zero() + let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); + let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); + let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash != Hash256::zero() && !self .blocks - .contains_key(&forkchoice_state.finalized_block_hash) - { - return Err(format!( - "finalized block hash {:?} is unknown", - forkchoice_state.finalized_block_hash - )); + .contains_key(&forkchoice_state.finalized_block_hash); + + if unknown_head_block_hash || unknown_safe_block_hash || unknown_finalized_block_hash { + return Ok(JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + }, + payload_id: None, + }); } - match payload_attributes { - None => Ok(None), + let id = match payload_attributes { + None => None, Some(attributes) => { if !self.blocks.iter().any(|(_, block)| { block.block_hash() == self.terminal_block_hash @@ -342,9 +340,18 @@ impl ExecutionBlockGenerator { self.payload_ids.insert(id, execution_payload); - Ok(Some(id)) + Some(id) } - } + }; + + Ok(JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid, + latest_valid_hash: Some(forkchoice_state.head_block_hash), + validation_error: None, + }, + payload_id: id.map(Into::into), + }) } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 746d96e293c..3a7d3c77b7d 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -95,8 +95,7 @@ pub async fn handle_rpc( let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; - let head_block_hash = forkchoice_state.head_block_hash; - let id = ctx + let response = ctx .execution_block_generator .write() .forkchoice_updated_v1( @@ -104,15 +103,7 @@ pub async fn handle_rpc( payload_attributes.map(|json| json.into()), )?; - Ok(serde_json::to_value(JsonForkchoiceUpdatedV1Response { - payload_status: JsonPayloadStatusV1 { - status: JsonPayloadStatusV1Status::Valid, - latest_valid_hash: Some(head_block_hash), - validation_error: None, - }, - payload_id: id.map(Into::into), - }) - .unwrap()) + Ok(serde_json::to_value(response).unwrap()) } other => Err(format!( "The method {} does not exist/is not available", From ae1d54eb9822e8788f167e273e8dff710e476651 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 09:35:45 +1100 Subject: [PATCH 59/92] Handle payload import whilst syncing in tests --- .../tests/payload_invalidation.rs | 7 ++- .../src/test_utils/handle_rpc.rs | 43 +++++++++------- .../execution_layer/src/test_utils/mod.rs | 50 +++++++++++++++++-- 3 files changed, 78 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e8de39c5286..3b13ef4797b 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -140,7 +140,12 @@ impl InvalidPayloadRig { match is_valid { Payload::Valid | Payload::Syncing => { if is_valid == Payload::Syncing { - mock_execution_layer.server.all_payloads_syncing(); + // Importing a payload whilst returning `SYNCING` simulates an EE that obtains + // the block via it's own means (e.g., devp2p). + let should_import_payload = true; + mock_execution_layer + .server + .all_payloads_syncing(should_import_payload); } else { mock_execution_layer.server.full_payload_verification(); } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 3a7d3c77b7d..f3a0d441d0f 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,5 +1,5 @@ use super::Context; -use crate::engine_api::{http::*, PayloadStatusV1, PayloadStatusV1Status}; +use crate::engine_api::{http::*, *}; use crate::json_structures::*; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; @@ -57,26 +57,33 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; - let response = if let Some(status) = *ctx.static_new_payload_response.lock() { - match status { - PayloadStatusV1Status::Valid => PayloadStatusV1 { - status, - latest_valid_hash: Some(request.block_hash), - validation_error: None, - }, - PayloadStatusV1Status::Syncing => PayloadStatusV1 { - status, - latest_valid_hash: None, - validation_error: None, - }, - _ => unimplemented!("invalid static newPayloadResponse"), - } + let (static_response, should_import) = + if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { + if response.status.status == PayloadStatusV1Status::Valid { + response.status.latest_valid_hash = ctx + .execution_block_generator + .read() + .latest_execution_block() + .map(|b| b.block_hash); + } + + (Some(response.status), response.should_import) + } else { + (None, true) + }; + + let dynamic_response = if should_import { + Some( + ctx.execution_block_generator + .write() + .new_payload(request.into()), + ) } else { - ctx.execution_block_generator - .write() - .new_payload(request.into()) + None }; + let response = static_response.or(dynamic_response).unwrap(); + Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } ENGINE_GET_PAYLOAD_V1 => { diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index c3a36ac47bd..438e72f2c97 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -1,6 +1,6 @@ //! Provides a mock execution engine HTTP JSON-RPC API for use in testing. -use crate::engine_api::{http::JSONRPC_VERSION, PayloadStatusV1Status}; +use crate::engine_api::{http::JSONRPC_VERSION, PayloadStatusV1, PayloadStatusV1Status}; use bytes::Bytes; use environment::null_logger; use execution_block_generator::{Block, PoWBlock}; @@ -123,7 +123,45 @@ impl MockServer { } pub fn all_payloads_valid(&self) { - *self.ctx.static_new_payload_response.lock() = Some(PayloadStatusV1Status::Valid) + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: None, + validation_error: None, + }, + should_import: true, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + /// Setting `should_import = true` simulates an EE that initially returns `SYNCING` but obtains + /// the block via it's own means (e.g., devp2p). + pub fn all_payloads_syncing(&self, should_import: bool) { + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + }, + should_import, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + pub fn all_payloads_invalid(&self, latest_valid_hash: Hash256) { + let response = StaticNewPayloadResponse { + status: PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(latest_valid_hash), + validation_error: Some("static response".into()), + }, + should_import: true, + }; + *self.ctx.static_new_payload_response.lock() = Some(response) + } + + pub fn full_payload_verification(&self) { + *self.ctx.static_new_payload_response.lock() = None } pub fn insert_pow_block( @@ -184,6 +222,12 @@ struct MissingIdField; impl warp::reject::Reject for MissingIdField {} +#[derive(Debug, Clone, PartialEq)] +pub struct StaticNewPayloadResponse { + status: PayloadStatusV1, + should_import: bool, +} + /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. @@ -193,7 +237,7 @@ pub struct Context { pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, pub preloaded_responses: Arc>>, - pub static_new_payload_response: Arc>>, + pub static_new_payload_response: Arc>>, pub _phantom: PhantomData, } From 24e1d5deb4227803d5dc2874bc152073c4ef3208 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 09:36:05 +1100 Subject: [PATCH 60/92] Log crit when a payload id can't be generated --- beacon_node/execution_layer/src/lib.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 8f581c349c5..2894050dcf3 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -438,7 +438,16 @@ impl ExecutionLayer { ) .await .map(|response| response.payload_id)? - .ok_or(ApiError::PayloadIdUnavailable)? + .ok_or_else(|| { + crit!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "no payload id, the engine is likely syncing. / + this has potentially caused a missed block proposal.", + ); + + ApiError::PayloadIdUnavailable + })? }; engine.api.get_payload_v1(payload_id).await From 1b761473869b1155821cd6d4784c02d21d70e78c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 09:39:36 +1100 Subject: [PATCH 61/92] Remove unused field --- beacon_node/beacon_chain/src/block_verification.rs | 3 +-- beacon_node/beacon_chain/src/execution_payload.rs | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ef0ca74b6be..4ffff5739e0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1126,8 +1126,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // // It is important that this function is called *after* `per_slot_processing`, since the // `randao` may change. - let payload_verification_status = - notify_new_payload(chain, &state, block.message(), block_root)?; + let payload_verification_status = notify_new_payload(chain, &state, block.message())?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index c89758ab74e..5b31bc6e26d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -35,7 +35,6 @@ pub fn notify_new_payload( chain: &BeaconChain, state: &BeaconState, block: BeaconBlockRef, - block_root: Hash256, ) -> Result> { if !is_execution_enabled(state, block.body()) { return Ok(PayloadVerificationStatus::Irrelevant); From d54aa7d59254a8bcf3e4a7ac31f15050f8e5a53a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 18 Feb 2022 10:12:36 +1100 Subject: [PATCH 62/92] Fix the mock EL FC response --- beacon_node/execution_layer/src/test_utils/handle_rpc.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index f3a0d441d0f..1ee29ce7a9d 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -60,11 +60,7 @@ pub async fn handle_rpc( let (static_response, should_import) = if let Some(mut response) = ctx.static_new_payload_response.lock().clone() { if response.status.status == PayloadStatusV1Status::Valid { - response.status.latest_valid_hash = ctx - .execution_block_generator - .read() - .latest_execution_block() - .map(|b| b.block_hash); + response.status.latest_valid_hash = Some(request.block_hash) } (Some(response.status), response.should_import) From b8964c14a2549320f1ddcb3ee29b5359b9b7aee2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 21 Feb 2022 09:18:19 +1100 Subject: [PATCH 63/92] Update comment --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e92d84354cd..9623586c379 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3180,9 +3180,13 @@ impl BeaconChain { /// This method must be called whenever an execution engine indicates that a payload is /// invalid. /// - /// All beacon blocks between `latest_root` and `latest_valid_hash` will be - /// invalidated in fork choice. Conversely, the `last_valid_hash` and all ancestors will be - /// validated. + /// If the `latest_root` is known to fork-choice it will be invalidated. If it is not known, an + /// error will be returned. + /// + /// If `latest_valid_hash` is `None` or references a block unknown to fork choice, no other + /// blocks will be invalidated. If `latest_valid_hash` is a block known to fork choice, all + /// blocks between the `latest_root` and the `latest_valid_hash` will be invalidated (which may + /// cause further, second-order invalidations). /// /// ## Notes /// From 3bdf1cc9977745160b629ecabce461edbc7ce099 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 21 Feb 2022 09:34:08 +1100 Subject: [PATCH 64/92] Tidy, check justified epoch --- beacon_node/beacon_chain/src/beacon_chain.rs | 31 ++++++++++++-------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9623586c379..87095d9732d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3242,12 +3242,14 @@ impl BeaconChain { // Check to ensure the justified checkpoint does not have an invalid payload. If so, try // to kill the client. let head_info = self.head_info()?; - let justified_root = head_info.current_justified_checkpoint.root; - // De-alias 0x00..00 to the genesis block. - let justified_root = if justified_root == Hash256::zero() { - self.genesis_block_root - } else { - justified_root + // De-alias 0x00..00 to the genesis block at genesis. + let justified_root = { + let justified_checkpoint = head_info.current_justified_checkpoint; + if justified_checkpoint.root == Hash256::zero() && justified_checkpoint.epoch == 0 { + self.genesis_block_root + } else { + justified_checkpoint.root + } }; if let Some(proto_block) = self.fork_choice.read().get_block(&justified_root) { @@ -3256,15 +3258,20 @@ impl BeaconChain { self.log, "The justified checkpoint is invalid"; "msg" => "ensure you are not connected to a malicious network. this error is not \ - recoverable, please reach out to the developers for assistance." + recoverable, please reach out to the lighthouse developers for assistance." ); let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + )) { + crit!( + self.log, + "Unable trigger client shut down"; + "msg" => "shut down may already be under way", + "error" => ?e + ); + } // Return an error here to try and prevent progression by upstream functions. return Err(Error::JustifiedPayloadInvalid { justified_root }); From eb23e17ce7855e1670be0a45373653f4cb063b53 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 21 Feb 2022 11:14:25 +1100 Subject: [PATCH 65/92] Add ExecutionBlockHash newtype --- beacon_node/beacon_chain/src/beacon_chain.rs | 24 +++-- .../beacon_chain/src/block_verification.rs | 14 +-- .../beacon_chain/src/execution_payload.rs | 6 +- .../tests/payload_invalidation.rs | 11 +- beacon_node/client/src/builder.rs | 1 + beacon_node/execution_layer/src/engine_api.rs | 14 +-- .../execution_layer/src/engine_api/http.rs | 60 ++++++----- .../src/engine_api/json_structures.rs | 14 +-- beacon_node/execution_layer/src/engines.rs | 12 +-- beacon_node/execution_layer/src/lib.rs | 30 +++--- .../execution_layer/src/payload_status.rs | 6 +- .../test_utils/execution_block_generator.rs | 44 ++++---- .../src/test_utils/mock_execution_layer.rs | 8 +- .../execution_layer/src/test_utils/mod.rs | 14 +-- consensus/fork_choice/src/fork_choice.rs | 8 +- consensus/proto_array/src/error.rs | 8 +- consensus/proto_array/src/proto_array.rs | 9 +- .../src/proto_array_fork_choice.rs | 15 +-- .../src/per_block_processing/errors.rs | 4 +- consensus/types/src/chain_spec.rs | 12 +-- consensus/types/src/execution_block_hash.rs | 101 ++++++++++++++++++ consensus/types/src/execution_payload.rs | 4 +- .../types/src/execution_payload_header.rs | 4 +- consensus/types/src/lib.rs | 2 + lcli/src/create_payload_header.rs | 2 +- lcli/src/new_testnet.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 8 +- .../src/test_rig.rs | 16 +-- 28 files changed, 288 insertions(+), 165 deletions(-) create mode 100644 consensus/types/src/execution_block_hash.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 87095d9732d..6f351e64608 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -205,7 +205,7 @@ pub struct HeadInfo { pub genesis_validators_root: Hash256, pub proposer_shuffling_decision_root: Hash256, pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, + pub execution_payload_block_hash: Option, } pub trait BeaconChainTypes: Send + Sync + 'static { @@ -224,15 +224,15 @@ pub enum HeadSafetyStatus { /// /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with /// the variant. - Safe(Option), + Safe(Option), /// The head block execution payload has not yet been verified by an EL. /// /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(Hash256), + Unsafe(ExecutionBlockHash), /// The head block execution payload was deemed to be invalid by an EL. /// /// The `execution_payload.block_hash` of the head block is returned. - Invalid(Hash256), + Invalid(ExecutionBlockHash), } pub type BeaconForkChoice = ForkChoice< @@ -3202,7 +3202,7 @@ impl BeaconChain { pub fn process_invalid_execution_payload( &self, latest_root: Hash256, - latest_valid_hash: Option, + latest_valid_hash: Option, ) -> Result<(), Error> { debug!( self.log, @@ -3690,6 +3690,7 @@ impl BeaconChain { if is_merge_transition_complete { if let Err(e) = self.update_execution_engine_forkchoice_blocking( new_finalized_checkpoint.root, + beacon_block_root, new_head_execution_block_hash, ) { crit!( @@ -3707,7 +3708,8 @@ impl BeaconChain { pub fn update_execution_engine_forkchoice_blocking( &self, finalized_beacon_block_root: Hash256, - head_execution_block_hash: Hash256, + head_block_root: Hash256, + head_execution_block_hash: ExecutionBlockHash, ) -> Result<(), Error> { let execution_layer = self .execution_layer @@ -3718,6 +3720,7 @@ impl BeaconChain { .block_on_generic(|_| { self.update_execution_engine_forkchoice_async( finalized_beacon_block_root, + head_block_root, head_execution_block_hash, ) }) @@ -3727,7 +3730,8 @@ impl BeaconChain { pub async fn update_execution_engine_forkchoice_async( &self, finalized_beacon_block_root: Hash256, - head_execution_block_hash: Hash256, + head_block_root: Hash256, + head_execution_block_hash: ExecutionBlockHash, ) -> Result<(), Error> { // Loading the finalized block from the store is not ideal. Perhaps it would be better to // store it on fork-choice so we can do a lookup without hitting the database. @@ -3744,7 +3748,7 @@ impl BeaconChain { .execution_payload() .ok() .map(|ep| ep.block_hash) - .unwrap_or_else(Hash256::zero); + .unwrap_or_else(ExecutionBlockHash::zero); let forkchoice_updated_response = self .execution_layer @@ -3780,7 +3784,7 @@ impl BeaconChain { // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. self.process_invalid_execution_payload( - head_execution_block_hash, + head_block_root, Some(latest_valid_hash), )?; @@ -3793,7 +3797,7 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - self.process_invalid_execution_payload(head_execution_block_hash, None)?; + self.process_invalid_execution_payload(head_block_root, None)?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4ffff5739e0..4ac587fd762 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -76,9 +76,9 @@ use std::time::Duration; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, - InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -283,7 +283,7 @@ pub enum ExecutionPayloadError { /// /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, /// but is invalid upon further verification. - InvalidTerminalPoWBlock { parent_hash: Hash256 }, + InvalidTerminalPoWBlock { parent_hash: ExecutionBlockHash }, /// The `TERMINAL_BLOCK_HASH` is set, but the block has not reached the /// `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH`. /// @@ -302,8 +302,8 @@ pub enum ExecutionPayloadError { /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, /// but is invalid upon further verification. InvalidTerminalBlockHash { - terminal_block_hash: Hash256, - payload_parent_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, + payload_parent_hash: ExecutionBlockHash, }, /// The execution node failed to provide a parent block to a known block. This indicates an /// issue with the execution node. @@ -311,7 +311,7 @@ pub enum ExecutionPayloadError { /// ## Peer scoring /// /// The peer is not necessarily invalid. - PoWParentMissing(Hash256), + PoWParentMissing(ExecutionBlockHash), } impl From for ExecutionPayloadError { diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5b31bc6e26d..4b249ea7601 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -103,7 +103,7 @@ pub fn validate_merge_block( let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_payload = block.execution_payload()?; - if spec.terminal_block_hash != Hash256::zero() { + if spec.terminal_block_hash != ExecutionBlockHash::zero() { if block_epoch < spec.terminal_block_hash_activation_epoch { return Err(ExecutionPayloadError::InvalidActivationEpoch { activation_epoch: spec.terminal_block_hash_activation_epoch, @@ -267,7 +267,7 @@ pub async fn prepare_execution_payload( .ok_or(BlockProductionError::ExecutionLayerMissing)?; let parent_hash = if !is_merge_transition_complete(state) { - let is_terminal_block_hash_set = spec.terminal_block_hash != Hash256::zero(); + let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = state.current_epoch() >= spec.terminal_block_hash_activation_epoch; @@ -318,7 +318,7 @@ pub async fn prepare_execution_payload( parent_hash, timestamp, random, - finalized_block_hash.unwrap_or_else(Hash256::zero), + finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 3b13ef4797b..7be683f0a18 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -16,7 +16,9 @@ type E = MainnetEthSpec; #[derive(PartialEq, Clone)] enum Payload { Valid, - Invalid { latest_valid_hash: Option }, + Invalid { + latest_valid_hash: Option, + }, Syncing, } @@ -52,7 +54,7 @@ impl InvalidPayloadRig { self } - fn block_hash(&self, block_root: Hash256) -> Hash256 { + fn block_hash(&self, block_root: Hash256) -> ExecutionBlockHash { self.harness .chain .get_block(&block_root) @@ -308,13 +310,14 @@ fn pre_finalized_latest_valid_hash() { assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); + let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); // Import a block that will invalidate the justified checkpoint. rig.import_block(Payload::Invalid { - latest_valid_hash: Some(pre_finalized_block_root), + latest_valid_hash: Some(pre_finalized_block_hash), }); // The latest imported block should be the head. @@ -388,7 +391,7 @@ fn latest_valid_hash_is_junk() { // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); - let junk_hash = Hash256::from_low_u64_be(42); + let junk_hash = ExecutionBlockHash::repeat_byte(42); rig.import_block(Payload::Invalid { latest_valid_hash: Some(junk_hash), }); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 32d29127d4a..a02338b57bd 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -680,6 +680,7 @@ where let result = inner_chain .update_execution_engine_forkchoice_async( head.finalized_checkpoint.root, + head.block_root, block_hash, ) .await; diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index c2f8fb690b8..605679dd7e7 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; pub const LATEST_TAG: &str = "latest"; use crate::engines::ForkChoiceState; -pub use types::{Address, EthSpec, ExecutionPayload, Hash256, Uint256}; +pub use types::{Address, EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; pub mod http; pub mod json_structures; @@ -23,9 +23,9 @@ pub enum Error { ServerMessage { code: i64, message: String }, Eip155Failure, IsSyncing, - ExecutionBlockNotFound(Hash256), + ExecutionBlockNotFound(ExecutionBlockHash), ExecutionHeadBlockNotFound, - ParentHashEqualsBlockHash(Hash256), + ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, } @@ -53,7 +53,7 @@ pub trait EngineApi { async fn get_block_by_hash<'a>( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, ) -> Result, Error>; async fn new_payload_v1( @@ -86,7 +86,7 @@ pub enum PayloadStatusV1Status { #[derive(Clone, Debug, PartialEq)] pub struct PayloadStatusV1 { pub status: PayloadStatusV1Status, - pub latest_valid_hash: Option, + pub latest_valid_hash: Option, pub validation_error: Option, } @@ -100,10 +100,10 @@ pub enum BlockByNumberQuery<'a> { #[serde(rename_all = "camelCase")] pub struct ExecutionBlock { #[serde(rename = "hash")] - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] pub block_number: u64, - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index ce4c3beff04..8d82b8d311b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -125,7 +125,7 @@ impl EngineApi for HttpJsonRpc { async fn get_block_by_hash<'a>( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, ) -> Result, Error> { let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]); @@ -413,7 +413,9 @@ mod test { Tester::new() .assert_request_equals( |client| async move { - let _ = client.get_block_by_hash(Hash256::repeat_byte(1)).await; + let _ = client + .get_block_by_hash(ExecutionBlockHash::repeat_byte(1)) + .await; }, json!({ "id": STATIC_ID, @@ -433,9 +435,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::repeat_byte(1), - safe_block_hash: Hash256::repeat_byte(1), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::repeat_byte(1), + safe_block_hash: ExecutionBlockHash::repeat_byte(1), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -488,7 +490,7 @@ mod test { |client| async move { let _ = client .new_payload_v1::(ExecutionPayload { - parent_hash: Hash256::repeat_byte(0), + parent_hash: ExecutionBlockHash::repeat_byte(0), fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), @@ -500,7 +502,7 @@ mod test { timestamp: 42, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(1), - block_hash: Hash256::repeat_byte(1), + block_hash: ExecutionBlockHash::repeat_byte(1), transactions: vec![].into(), }) .await; @@ -538,9 +540,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::repeat_byte(0), - safe_block_hash: Hash256::repeat_byte(0), - finalized_block_hash: Hash256::repeat_byte(1), + head_block_hash: ExecutionBlockHash::repeat_byte(0), + safe_block_hash: ExecutionBlockHash::repeat_byte(0), + finalized_block_hash: ExecutionBlockHash::repeat_byte(1), }, None, ) @@ -588,9 +590,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -635,9 +637,9 @@ mod test { let response = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), - finalized_block_hash: Hash256::zero(), + head_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: ExecutionBlockHash::zero(), }, Some(PayloadAttributes { timestamp: 5, @@ -650,7 +652,7 @@ mod test { assert_eq!(response, ForkchoiceUpdatedResponse { payload_status: PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::zero()), + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some(String::new()), }, payload_id: @@ -703,7 +705,7 @@ mod test { .unwrap(); let expected = ExecutionPayload { - parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), @@ -715,7 +717,7 @@ mod test { timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), + block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), transactions: vec![].into(), }; @@ -728,7 +730,7 @@ mod test { |client| async move { let _ = client .new_payload_v1::(ExecutionPayload { - parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + parent_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), @@ -740,7 +742,7 @@ mod test { timestamp: 5, extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), - block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), transactions: vec![].into(), }) .await; @@ -788,7 +790,7 @@ mod test { assert_eq!(response, PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), + latest_valid_hash: Some(ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), validation_error: Some(String::new()), } ); @@ -801,9 +803,9 @@ mod test { let _ = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), }, None, ) @@ -840,9 +842,9 @@ mod test { let response = client .forkchoice_updated_v1( ForkChoiceState { - head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + head_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: ExecutionBlockHash::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), }, None, ) @@ -851,7 +853,7 @@ mod test { assert_eq!(response, ForkchoiceUpdatedResponse { payload_status: PayloadStatusV1 { status: PayloadStatusV1Status::Valid, - latest_valid_hash: Some(Hash256::zero()), + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some(String::new()), }, payload_id: None, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 03d981d439d..e9559e894cc 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,6 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{EthSpec, FixedVector, Transaction, Unsigned, VariableList}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -58,7 +58,7 @@ pub struct JsonPayloadIdResponse { #[derive(Debug, PartialEq, Default, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase")] pub struct JsonExecutionPayloadV1 { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -76,7 +76,7 @@ pub struct JsonExecutionPayloadV1 { #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(with = "serde_transactions")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, @@ -206,9 +206,9 @@ impl From for PayloadAttributes { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { - pub head_block_hash: Hash256, - pub safe_block_hash: Hash256, - pub finalized_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, + pub safe_block_hash: ExecutionBlockHash, + pub finalized_block_hash: ExecutionBlockHash, } impl From for JsonForkChoiceStateV1 { @@ -260,7 +260,7 @@ pub enum JsonPayloadStatusV1Status { #[serde(rename_all = "camelCase")] pub struct JsonPayloadStatusV1 { pub status: JsonPayloadStatusV1Status, - pub latest_valid_hash: Option, + pub latest_valid_hash: Option, pub validation_error: Option, } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 03801f3168d..d8e19baae13 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -8,7 +8,7 @@ use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; use std::future::Future; use tokio::sync::{Mutex, RwLock}; -use types::{Address, Hash256}; +use types::{Address, ExecutionBlockHash, Hash256}; /// The number of payload IDs that will be stored for each `Engine`. /// @@ -25,9 +25,9 @@ enum EngineState { #[derive(Copy, Clone, PartialEq, Debug)] pub struct ForkChoiceState { - pub head_block_hash: Hash256, - pub safe_block_hash: Hash256, - pub finalized_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, + pub safe_block_hash: ExecutionBlockHash, + pub finalized_block_hash: ExecutionBlockHash, } /// Used to enable/disable logging on some tasks. @@ -48,7 +48,7 @@ impl Logging { #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { - pub head_block_hash: Hash256, + pub head_block_hash: ExecutionBlockHash, pub timestamp: u64, pub random: Hash256, pub suggested_fee_recipient: Address, @@ -75,7 +75,7 @@ impl Engine { pub async fn get_payload_id( &self, - head_block_hash: Hash256, + head_block_hash: ExecutionBlockHash, timestamp: u64, random: Hash256, suggested_fee_recipient: Address, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2894050dcf3..f9c505b17ce 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -20,7 +20,7 @@ use tokio::{ sync::{Mutex, MutexGuard}, time::{sleep, sleep_until, Instant}, }; -use types::{ChainSpec, Epoch, ProposerPreparationData}; +use types::{ChainSpec, Epoch, ExecutionBlockHash, ProposerPreparationData}; pub use engine_api::{http::HttpJsonRpc, PayloadAttributes, PayloadStatusV1Status}; pub use payload_status::PayloadStatus; @@ -72,7 +72,7 @@ struct Inner { engines: Engines, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, - execution_blocks: Mutex>, + execution_blocks: Mutex>, executor: TaskExecutor, log: Logger, } @@ -141,7 +141,9 @@ impl ExecutionLayer { } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. - async fn execution_blocks(&self) -> MutexGuard<'_, LruCache> { + async fn execution_blocks( + &self, + ) -> MutexGuard<'_, LruCache> { self.inner.execution_blocks.lock().await } @@ -388,10 +390,10 @@ impl ExecutionLayer { /// will be contacted. pub async fn get_payload( &self, - parent_hash: Hash256, + parent_hash: ExecutionBlockHash, timestamp: u64, random: Hash256, - finalized_block_hash: Hash256, + finalized_block_hash: ExecutionBlockHash, proposer_index: u64, ) -> Result, Error> { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -508,8 +510,8 @@ impl ExecutionLayer { /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, - head_block_hash: Hash256, - finalized_block_hash: Hash256, + head_block_hash: ExecutionBlockHash, + finalized_block_hash: ExecutionBlockHash, payload_attributes: Option, ) -> Result { debug!( @@ -561,12 +563,12 @@ impl ExecutionLayer { pub async fn get_terminal_pow_block_hash( &self, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, Error> { let hash_opt = self .engines() .first_success(|engine| async move { let terminal_block_hash = spec.terminal_block_hash; - if terminal_block_hash != Hash256::zero() { + if terminal_block_hash != ExecutionBlockHash::zero() { if self .get_pow_block(engine, terminal_block_hash) .await? @@ -610,7 +612,7 @@ impl ExecutionLayer { &self, engine: &Engine, spec: &ChainSpec, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let mut block = engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) @@ -622,7 +624,7 @@ impl ExecutionLayer { loop { let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd { - if block.parent_hash == Hash256::zero() { + if block.parent_hash == ExecutionBlockHash::zero() { return Ok(Some(block.block_hash)); } let parent = self @@ -670,7 +672,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/fork-choice.md pub async fn is_valid_terminal_pow_block_hash( &self, - block_hash: Hash256, + block_hash: ExecutionBlockHash, spec: &ChainSpec, ) -> Result, Error> { let broadcast_results = self @@ -749,7 +751,7 @@ impl ExecutionLayer { async fn get_pow_block( &self, engine: &Engine, - hash: Hash256, + hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { // The block was in the cache, no need to request it from the execution @@ -843,7 +845,7 @@ mod test { MockExecutionLayer::default_params() .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - let missing_terminal_block = Hash256::repeat_byte(42); + let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( el.is_valid_terminal_pow_block_hash(missing_terminal_block, &spec) diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 2ada8b84d8d..aa65f489232 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -2,7 +2,7 @@ use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Statu use crate::engines::EngineError; use crate::Error; use slog::{crit, warn, Logger}; -use types::Hash256; +use types::ExecutionBlockHash; /// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. /// @@ -11,7 +11,7 @@ use types::Hash256; pub enum PayloadStatus { Valid, Invalid { - latest_valid_hash: Hash256, + latest_valid_hash: ExecutionBlockHash, validation_error: Option, }, Syncing, @@ -44,7 +44,7 @@ pub enum PayloadStatus { /// processed. /// - If there are no responses (only errors or nothing), return an error. pub fn process_multiple_payload_statuses( - head_block_hash: Hash256, + head_block_hash: ExecutionBlockHash, statuses: impl Iterator>, log: &Logger, ) -> Result { diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b0da7b510d4..52accad3a1f 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::{EthSpec, ExecutionPayload, Hash256, Uint256}; +use types::{EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; const GAS_LIMIT: u64 = 16384; const GAS_USED: u64 = GAS_LIMIT - 1; @@ -29,14 +29,14 @@ impl Block { } } - pub fn parent_hash(&self) -> Hash256 { + pub fn parent_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.parent_hash, Block::PoS(payload) => payload.parent_hash, } } - pub fn block_hash(&self) -> Hash256 { + pub fn block_hash(&self) -> ExecutionBlockHash { match self { Block::PoW(block) => block.block_hash, Block::PoS(payload) => payload.block_hash, @@ -72,8 +72,8 @@ impl Block { #[serde(rename_all = "camelCase")] pub struct PoWBlock { pub block_number: u64, - pub block_hash: Hash256, - pub parent_hash: Hash256, + pub block_hash: ExecutionBlockHash, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } @@ -81,18 +81,18 @@ pub struct ExecutionBlockGenerator { /* * Common database */ - blocks: HashMap>, - block_hashes: HashMap, + blocks: HashMap>, + block_hashes: HashMap, /* * PoW block parameters */ pub terminal_total_difficulty: Uint256, pub terminal_block_number: u64, - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, /* * PoS block parameters */ - pub pending_payloads: HashMap>, + pub pending_payloads: HashMap>, pub next_payload_id: u64, pub payload_ids: HashMap>, } @@ -101,7 +101,7 @@ impl ExecutionBlockGenerator { pub fn new( terminal_total_difficulty: Uint256, terminal_block_number: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, ) -> Self { let mut gen = Self { blocks: <_>::default(), @@ -144,11 +144,11 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } - pub fn block_by_hash(&self, hash: Hash256) -> Option> { + pub fn block_by_hash(&self, hash: ExecutionBlockHash) -> Option> { self.blocks.get(&hash).cloned() } - pub fn execution_block_by_hash(&self, hash: Hash256) -> Option { + pub fn execution_block_by_hash(&self, hash: ExecutionBlockHash) -> Option { self.block_by_hash(hash) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } @@ -190,7 +190,7 @@ impl ExecutionBlockGenerator { pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { let parent_hash = if block_number == 0 { - Hash256::zero() + ExecutionBlockHash::zero() } else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) { *hash } else { @@ -280,7 +280,8 @@ impl ExecutionBlockGenerator { let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); - let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash != Hash256::zero() + let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash + != ExecutionBlockHash::zero() && !self .blocks .contains_key(&forkchoice_state.finalized_block_hash); @@ -332,11 +333,12 @@ impl ExecutionBlockGenerator { timestamp: attributes.timestamp, extra_data: "block gen was here".as_bytes().to_vec().into(), base_fee_per_gas: Uint256::one(), - block_hash: Hash256::zero(), + block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), }; - execution_payload.block_hash = execution_payload.tree_hash_root(); + execution_payload.block_hash = + ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); self.payload_ids.insert(id, execution_payload); @@ -363,7 +365,7 @@ pub fn generate_pow_block( terminal_total_difficulty: Uint256, terminal_block_number: u64, block_number: u64, - parent_hash: Hash256, + parent_hash: ExecutionBlockHash, ) -> Result { if block_number > terminal_block_number { return Err(format!( @@ -385,12 +387,12 @@ pub fn generate_pow_block( let mut block = PoWBlock { block_number, - block_hash: Hash256::zero(), + block_hash: ExecutionBlockHash::zero(), parent_hash, total_difficulty, }; - block.block_hash = block.tree_hash_root(); + block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); Ok(block) } @@ -409,7 +411,7 @@ mod test { let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), ); for i in 0..=TERMINAL_BLOCK { @@ -427,7 +429,7 @@ mod test { let expected_parent = i .checked_sub(1) .map(|i| generator.block_by_number(i).unwrap().block_hash()) - .unwrap_or_else(Hash256::zero); + .unwrap_or_else(ExecutionBlockHash::zero); assert_eq!(block.parent_hash(), expected_parent); assert_eq!( diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 22adb52032f..a15ab25254b 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -58,7 +58,7 @@ impl MockExecutionLayer { Self::new( DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), Epoch::new(0), ) } @@ -66,7 +66,7 @@ impl MockExecutionLayer { pub fn new( terminal_total_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, ) -> Self { let el_runtime = ExecutionLayerRuntime::default(); @@ -117,7 +117,7 @@ impl MockExecutionLayer { self.el .notify_forkchoice_updated( parent_hash, - Hash256::zero(), + ExecutionBlockHash::zero(), Some(PayloadAttributes { timestamp, random, @@ -149,7 +149,7 @@ impl MockExecutionLayer { assert_eq!(status, PayloadStatus::Valid); self.el - .notify_forkchoice_updated(block_hash, Hash256::zero(), None) + .notify_forkchoice_updated(block_hash, ExecutionBlockHash::zero(), None) .await .unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 438e72f2c97..7ae24b2d662 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -14,7 +14,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, Hash256, Uint256}; +use types::{EthSpec, ExecutionBlockHash, Hash256, Uint256}; use warp::Filter; pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; @@ -47,7 +47,7 @@ impl MockServer { &runtime::Handle::current(), DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, - Hash256::zero(), + ExecutionBlockHash::zero(), ) } @@ -55,7 +55,7 @@ impl MockServer { handle: &runtime::Handle, terminal_difficulty: Uint256, terminal_block: u64, - terminal_block_hash: Hash256, + terminal_block_hash: ExecutionBlockHash, ) -> Self { let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -148,7 +148,7 @@ impl MockServer { *self.ctx.static_new_payload_response.lock() = Some(response) } - pub fn all_payloads_invalid(&self, latest_valid_hash: Hash256) { + pub fn all_payloads_invalid(&self, latest_valid_hash: ExecutionBlockHash) { let response = StaticNewPayloadResponse { status: PayloadStatusV1 { status: PayloadStatusV1Status::Invalid, @@ -167,8 +167,8 @@ impl MockServer { pub fn insert_pow_block( &self, block_number: u64, - block_hash: Hash256, - parent_hash: Hash256, + block_hash: ExecutionBlockHash, + parent_hash: ExecutionBlockHash, total_difficulty: Uint256, ) { let block = Block::PoW(PoWBlock { @@ -187,7 +187,7 @@ impl MockServer { .unwrap() } - pub fn get_block(&self, block_hash: Hash256) -> Option> { + pub fn get_block(&self, block_hash: ExecutionBlockHash) -> Option> { self.ctx .execution_block_generator .read() diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index d27db101de3..7aa2ab3f6ff 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,8 +6,8 @@ use std::marker::PhantomData; use std::time::Duration; use types::{ consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, - RelativeEpoch, SignedBeaconBlock, Slot, + BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -468,7 +468,7 @@ where pub fn on_invalid_execution_payload( &mut self, head_block_root: Hash256, - latest_valid_ancestor_root: Option, + latest_valid_ancestor_root: Option, ) -> Result<(), Error> { self.proto_array .process_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) @@ -603,7 +603,7 @@ where let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { let block_hash = execution_payload.block_hash; - if block_hash == Hash256::zero() { + if block_hash == ExecutionBlockHash::zero() { // The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify // the payload. ExecutionStatus::irrelevant() diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 60b39e0a68c..1d0ab9b1049 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,4 +1,4 @@ -use types::{Checkpoint, Epoch, Hash256}; +use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256}; #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -28,18 +28,18 @@ pub enum Error { InvalidBestNode(Box), InvalidAncestorOfValidPayload { ancestor_block_root: Hash256, - ancestor_payload_block_hash: Hash256, + ancestor_payload_block_hash: ExecutionBlockHash, }, ValidExecutionStatusBecameInvalid { block_root: Hash256, - payload_block_hash: Hash256, + payload_block_hash: ExecutionBlockHash, }, InvalidJustifiedCheckpointExecutionStatus { justified_root: Hash256, }, UnknownLatestValidAncestorHash { block_root: Hash256, - latest_valid_ancestor_hash: Option, + latest_valid_ancestor_hash: Option, }, } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index dcd990cb4f7..90124df8432 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -5,7 +5,10 @@ use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; -use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; +use types::{ + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, +}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union // selector. @@ -310,7 +313,7 @@ impl ProtoArray { pub fn propagate_execution_payload_invalidation( &mut self, head_block_root: Hash256, - latest_valid_ancestor_hash: Option, + latest_valid_ancestor_hash: Option, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); let mut index = *self @@ -767,7 +770,7 @@ impl ProtoArray { /// `block_hash`, if any. pub fn execution_block_hash_to_beacon_block_root( &self, - block_hash: &Hash256, + block_hash: &ExecutionBlockHash, ) -> Option { self.nodes .iter() diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 1bc6a81a233..21e7b360188 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -5,7 +5,10 @@ use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; +use types::{ + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, +}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -21,11 +24,11 @@ pub struct VoteTracker { #[ssz(enum_behaviour = "union")] pub enum ExecutionStatus { /// An EL has determined that the payload is valid. - Valid(Hash256), + Valid(ExecutionBlockHash), /// An EL has determined that the payload is invalid. - Invalid(Hash256), + Invalid(ExecutionBlockHash), /// An EL has not yet verified the execution payload. - Unknown(Hash256), + Unknown(ExecutionBlockHash), /// The block is either prior to the merge fork, or after the merge fork but before the terminal /// PoW block has been found. /// @@ -41,7 +44,7 @@ impl ExecutionStatus { ExecutionStatus::Irrelevant(false) } - pub fn block_hash(&self) -> Option { + pub fn block_hash(&self) -> Option { match self { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) @@ -165,7 +168,7 @@ impl ProtoArrayForkChoice { pub fn process_execution_payload_invalidation( &mut self, head_block_root: Hash256, - latest_valid_ancestor_root: Option, + latest_valid_ancestor_root: Option, ) -> Result<(), String> { self.proto_array .propagate_execution_payload_invalidation(head_block_root, latest_valid_ancestor_root) diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index abfbb621d9e..e214b6e63d8 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -58,8 +58,8 @@ pub enum BlockProcessingError { InconsistentBlockFork(InconsistentFork), InconsistentStateFork(InconsistentFork), ExecutionHashChainIncontiguous { - expected: Hash256, - found: Hash256, + expected: ExecutionBlockHash, + found: ExecutionBlockHash, }, ExecutionRandaoMismatch { expected: Hash256, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d391fe01e1c..29c67808cc8 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -144,7 +144,7 @@ pub struct ChainSpec { /// The Merge fork epoch is optional, with `None` representing "Merge never happens". pub bellatrix_fork_epoch: Option, pub terminal_total_difficulty: Uint256, - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, /* @@ -549,7 +549,7 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), - terminal_block_hash: Hash256::zero(), + terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), /* @@ -746,7 +746,7 @@ impl ChainSpec { // `Uint256::MAX` which is `2*256- 1`. .checked_add(Uint256::one()) .expect("addition does not overflow"), - terminal_block_hash: Hash256::zero(), + terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), /* @@ -787,7 +787,7 @@ pub struct Config { pub terminal_total_difficulty: Uint256, // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash")] - pub terminal_block_hash: Hash256, + pub terminal_block_hash: ExecutionBlockHash, // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, @@ -870,8 +870,8 @@ const fn default_terminal_total_difficulty() -> Uint256 { ]) } -fn default_terminal_block_hash() -> Hash256 { - Hash256::zero() +fn default_terminal_block_hash() -> ExecutionBlockHash { + ExecutionBlockHash::zero() } fn default_terminal_block_hash_activation_epoch() -> Epoch { diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs new file mode 100644 index 00000000000..dbfe2181592 --- /dev/null +++ b/consensus/types/src/execution_block_hash.rs @@ -0,0 +1,101 @@ +use crate::test_utils::TestRandom; +use crate::Hash256; +use rand::RngCore; +use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[serde(transparent)] +pub struct ExecutionBlockHash(Hash256); + +impl ExecutionBlockHash { + pub fn zero() -> Self { + Self(Hash256::zero()) + } + + pub fn repeat_byte(b: u8) -> Self { + Self(Hash256::repeat_byte(b)) + } + + pub fn from_root(root: Hash256) -> Self { + Self(root) + } + + pub fn into_root(self) -> Hash256 { + self.0 + } +} + +impl Encode for ExecutionBlockHash { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for ExecutionBlockHash { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + Hash256::from_ssz_bytes(bytes).map(Self) + } +} + +impl tree_hash::TreeHash for ExecutionBlockHash { + fn tree_hash_type() -> tree_hash::TreeHashType { + Hash256::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + Hash256::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for ExecutionBlockHash { + fn random_for_test(rng: &mut impl RngCore) -> Self { + Self(Hash256::random_for_test(rng)) + } +} + +impl std::str::FromStr for ExecutionBlockHash { + type Err = String; + + fn from_str(s: &str) -> Result { + Hash256::from_str(s) + .map(Self) + .map_err(|e| format!("{:?}", e)) + } +} + +impl fmt::Display for ExecutionBlockHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 781fb7460f1..fc37c1193bf 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -15,7 +15,7 @@ pub type Transaction = VariableList; #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct ExecutionPayload { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -34,7 +34,7 @@ pub struct ExecutionPayload { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index aa022f6420c..1c173093a42 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct ExecutionPayloadHeader { - pub parent_hash: Hash256, + pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, pub state_root: Hash256, pub receipts_root: Hash256, @@ -28,7 +28,7 @@ pub struct ExecutionPayloadHeader { pub extra_data: VariableList, #[serde(with = "eth2_serde_utils::quoted_u256")] pub base_fee_per_gas: Uint256, - pub block_hash: Hash256, + pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7e5c66bdb8b..4058250d2a3 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -37,6 +37,7 @@ pub mod deposit_message; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; +pub mod execution_block_hash; pub mod execution_payload; pub mod execution_payload_header; pub mod fork; @@ -113,6 +114,7 @@ pub use crate::deposit_message::DepositMessage; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; +pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_payload::{ExecutionPayload, Transaction}; pub use crate::execution_payload_header::ExecutionPayloadHeader; pub use crate::fork::Fork; diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 814a57f264c..04122d0e6b1 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -23,7 +23,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, - random: eth1_block_hash, + random: eth1_block_hash.into_root(), ..ExecutionPayloadHeader::default() }; let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 83dcc2e7198..5254ff5a62e 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -108,7 +108,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul let genesis_state = interop_genesis_state::( &keypairs, genesis_time, - eth1_block_hash, + eth1_block_hash.into_root(), execution_payload_header, &spec, )?; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 608429a9cb2..9744434f53d 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -14,15 +14,15 @@ use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, + ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[serde(deny_unknown_fields)] pub struct PowBlock { - pub block_hash: Hash256, - pub parent_hash: Hash256, + pub block_hash: ExecutionBlockHash, + pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 23b50f94f75..26dbc1bfdd1 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; -use types::{Address, ChainSpec, EthSpec, Hash256, MainnetEthSpec, Uint256}; +use types::{Address, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Uint256}; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); @@ -139,7 +139,7 @@ impl TestRig { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); let random = Hash256::zero(); - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let valid_payload = self .ee_a @@ -161,7 +161,7 @@ impl TestRig { * `notify_new_payload`. */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; let status = self .ee_a @@ -193,7 +193,7 @@ impl TestRig { * Do not provide payload attributes (we'll test that later). */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; let status = self .ee_a @@ -228,7 +228,7 @@ impl TestRig { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; let random = Hash256::zero(); - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let second_payload = self .ee_a @@ -263,7 +263,7 @@ impl TestRig { * Indicate that the payload is the head of the chain, providing payload attributes. */ let head_block_hash = valid_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = Some(PayloadAttributes { timestamp: second_payload.timestamp + 1, random: Hash256::zero(), @@ -296,7 +296,7 @@ impl TestRig { * Set the second payload as the head, without providing payload attributes. */ let head_block_hash = second_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; let status = self .ee_b @@ -339,7 +339,7 @@ impl TestRig { * Set the second payload as the head, without providing payload attributes. */ let head_block_hash = second_payload.block_hash; - let finalized_block_hash = Hash256::zero(); + let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = None; let status = self .ee_b From a7cd9574f12fd03d62213136db4b9833795c0110 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 22 Feb 2022 14:55:45 +1100 Subject: [PATCH 66/92] Fix misc test compile issues --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +++++++++++++-- beacon_node/beacon_chain/src/errors.rs | 3 +++ .../beacon_chain/tests/attestation_production.rs | 1 + .../tests/attestation_verification.rs | 1 + .../beacon_chain/tests/block_verification.rs | 4 ++++ beacon_node/beacon_chain/tests/merge.rs | 4 ++-- beacon_node/beacon_chain/tests/op_verification.rs | 1 + .../tests/sync_committee_verification.rs | 1 + beacon_node/beacon_chain/tests/tests.rs | 1 + 9 files changed, 27 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6f351e64608..54c31a64b14 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3281,6 +3281,7 @@ impl BeaconChain { self.log, "Justified block is not in fork choice"; ); + return Err(Error::JustifiedMissingFromForkChoice { justified_root }); } Ok(()) @@ -3763,7 +3764,7 @@ impl BeaconChain { .map_err(Error::ExecutionForkChoiceUpdateFailed); match forkchoice_updated_response { - Ok(status) => match status { + Ok(status) => match &status { PayloadStatus::Valid | PayloadStatus::Syncing => Ok(()), // The specification doesn't list `ACCEPTED` as a valid response to a fork choice // update. This response *seems* innocent enough, so we won't return early with an @@ -3781,17 +3782,27 @@ impl BeaconChain { PayloadStatus::Invalid { latest_valid_hash, .. } => { + warn!( + self.log, + "Fork choice update invalidated payload"; + "status" => ?status + ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. self.process_invalid_execution_payload( head_block_root, - Some(latest_valid_hash), + Some(*latest_valid_hash), )?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { + warn!( + self.log, + "Fork choice update invalidated payload"; + "status" => ?status + ); // The execution engine has stated that the head block is invalid, however it // hasn't returned a latest valid ancestor. // diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 68ae36739d6..9f90c908502 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -151,6 +151,9 @@ pub enum BeaconChainError { justified_root: Hash256, }, ForkchoiceUpdate(execution_layer::Error), + JustifiedMissingFromForkChoice { + justified_root: Hash256, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 4d862cbac72..189d3baded2 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -26,6 +26,7 @@ fn produces_attestations() { .default_spec() .keypairs(KEYPAIRS[..].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let chain = &harness.chain; diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 3c675ec6a4a..00bf9fa9aab 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -42,6 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness(chain: &[ExecutionPayload]) { for ep in chain { assert!(*ep != ExecutionPayload::default()); - assert!(ep.block_hash != Hash256::zero()); + assert!(ep.block_hash != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { @@ -40,7 +40,7 @@ fn merge_with_terminal_block_hash_override() { spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, 0, - Hash256::zero(), + ExecutionBlockHash::zero(), ) .unwrap() .block_hash; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index ec22a4804a1..c9df6aa31db 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -40,6 +40,7 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store) + .mock_execution_layer() .build(); harness.advance_slot(); harness diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 2596ff18c16..13ca6196204 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -30,6 +30,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness BeaconChainHarness Date: Tue, 22 Feb 2022 14:56:03 +1100 Subject: [PATCH 67/92] Ensure default payload exec status --- consensus/fork_choice/src/fork_choice.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7aa2ab3f6ff..2ce84c3d443 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -300,9 +300,15 @@ where let execution_status = anchor_block.message_merge().map_or_else( |()| ExecutionStatus::irrelevant(), |message| { - // Assume that this payload is valid, since the anchor should be a trusted block and - // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash) + let execution_payload = &message.body.execution_payload; + if execution_payload == &<_>::default() { + // A default payload does not have execution enabled. + ExecutionStatus::irrelevant() + } else { + // Assume that this payload is valid, since the anchor should be a trusted block and + // state. + ExecutionStatus::Valid(message.body.execution_payload.block_hash) + } }, ); From 6dfabb0da44d7140cf636dd82ec4bbb86ebb1f7e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 22 Feb 2022 15:07:46 +1100 Subject: [PATCH 68/92] Add mock EL to store tests --- beacon_node/beacon_chain/tests/store_tests.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5c020df492f..d3038ac48d3 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -66,6 +66,7 @@ fn get_harness( .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store) + .mock_execution_layer() .build(); harness.advance_slot(); harness @@ -554,6 +555,7 @@ fn delete_blocks_and_states() { .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store.clone()) + .mock_execution_layer() .build(); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -680,6 +682,7 @@ fn multi_epoch_fork_valid_blocks_test( .default_spec() .keypairs(validators_keypairs) .fresh_disk_store(store) + .mock_execution_layer() .build(); let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap(); @@ -974,6 +977,7 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1083,6 +1087,7 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1212,6 +1217,7 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1306,6 +1312,7 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1448,6 +1455,7 @@ fn prunes_skipped_slots_states() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1571,6 +1579,7 @@ fn finalizes_non_epoch_start_slot() { .default_spec() .keypairs(validators_keypairs) .fresh_ephemeral_store() + .mock_execution_layer() .build(); let (state, state_root) = rig.get_current_state_and_root(); @@ -2140,6 +2149,7 @@ fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store.clone()) + .mock_execution_layer() .build(); harness.advance_slot(); @@ -2183,6 +2193,7 @@ fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) + .mock_execution_layer() .build(); assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); @@ -2254,6 +2265,7 @@ fn revert_minority_fork_on_resume() { .spec(spec1) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store1) + .mock_execution_layer() .build(); // Chain with fork epoch configured. @@ -2263,6 +2275,7 @@ fn revert_minority_fork_on_resume() { .spec(spec2.clone()) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_disk_store(store2) + .mock_execution_layer() .build(); // Apply the same blocks to both chains initially. @@ -2358,6 +2371,7 @@ fn revert_minority_fork_on_resume() { .set_slot(end_slot.as_u64()); builder })) + .mock_execution_layer() .build(); // Head should now be just before the fork. From 0a152cf91f1b26df8b6feaaa4f678f22cbb8a91f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 22 Feb 2022 15:57:53 +1100 Subject: [PATCH 69/92] Fix failing test --- .../tests/payload_invalidation.rs | 2 ++ .../src/proto_array_fork_choice.rs | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 7be683f0a18..044fd669161 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -370,6 +370,8 @@ fn latest_valid_hash_will_validate() { if slot > LATEST_VALID_SLOT { assert!(execution_status.is_invalid()) + } else if slot == 0 { + assert!(execution_status.is_irrelevant()) } else { assert!(execution_status.is_valid()) } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 21e7b360188..d9828090969 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -53,17 +53,36 @@ impl ExecutionStatus { } } + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has a valid payload pub fn is_valid(&self) -> bool { matches!(self, ExecutionStatus::Valid(_)) } + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has a payload that has not yet been verified by an EL. pub fn is_not_verified(&self) -> bool { matches!(self, ExecutionStatus::Unknown(_)) } + /// Returns `true` if the block: + /// + /// - Has execution enabled + /// - Has an invalid payload. pub fn is_invalid(&self) -> bool { matches!(self, ExecutionStatus::Invalid(_)) } + + /// Returns `true` if the block: + /// + /// - Does not have execution enabled (before or after Bellatrix fork) + pub fn is_irrelevant(&self) -> bool { + matches!(self, ExecutionStatus::Irrelevant(_)) + } } /// A block that is to be applied to the fork choice. From be3e2e3e4a2a28c7813b0ce326868fce519edb25 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 22 Feb 2022 16:12:34 +1100 Subject: [PATCH 70/92] Un-comment release-only flag in test --- beacon_node/beacon_chain/tests/payload_invalidation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 044fd669161..2115fffe050 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,4 +1,4 @@ -// #![cfg(not(debug_assertions))] +#![cfg(not(debug_assertions))] use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, From fe35997273751f8d3e302b6303cdc39e65517751 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 08:34:53 +1100 Subject: [PATCH 71/92] Tidy, add comment --- .../execution_layer/src/test_utils/mod.rs | 3 ++ consensus/fork_choice/src/fork_choice.rs | 1 + consensus/fork_choice/tests/tests.rs | 16 +++++------ consensus/proto_array/src/proto_array.rs | 28 +++++++++++++++++++ .../src/proto_array_fork_choice.rs | 1 + 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 7ae24b2d662..e98b14e465d 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -27,6 +27,7 @@ mod execution_block_generator; mod handle_rpc; mod mock_execution_layer; +/// Used for returning a static response for new payload calls. pub enum FixedPayloadResponse { None, Valid, @@ -160,6 +161,8 @@ impl MockServer { *self.ctx.static_new_payload_response.lock() = Some(response) } + /// Disables any static payload response so the execution block generator will do its own + /// verification. pub fn full_payload_verification(&self) { *self.ctx.static_new_payload_response.lock() = None } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 2ce84c3d443..3f1ba7cb1ce 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -471,6 +471,7 @@ where Ok(true) } + /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. pub fn on_invalid_execution_payload( &mut self, head_block_root: Hash256, diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index fc431f29aa7..0f7457f8eb3 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -122,24 +122,24 @@ impl ForkChoiceTest { } /// Assert there was a shutdown signal sent by the beacon chain. - pub fn assert_shutdown_signal_sent(self) -> Self { + pub fn shutdown_signal_sent(&self) -> bool { let mutex = self.harness.shutdown_receiver.clone(); let mut shutdown_receiver = mutex.lock(); shutdown_receiver.close(); let msg = shutdown_receiver.try_next().unwrap(); - assert!(msg.is_some()); + msg.is_some() + } + + /// Assert there was a shutdown signal sent by the beacon chain. + pub fn assert_shutdown_signal_sent(self) -> Self { + assert!(self.shutdown_signal_sent()); self } /// Assert no shutdown was signal sent by the beacon chain. pub fn assert_shutdown_signal_not_sent(self) -> Self { - let mutex = self.harness.shutdown_receiver.clone(); - let mut shutdown_receiver = mutex.lock(); - - shutdown_receiver.close(); - let msg = shutdown_receiver.try_next().unwrap(); - assert!(msg.is_none()); + assert!(!self.shutdown_signal_sent()); self } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 90124df8432..e9b20632e84 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -267,6 +267,12 @@ impl ProtoArray { Ok(()) } + /// Updates the `verified_node_index` and all descendants to have validated execution payloads. + /// + /// Returns an error if: + /// + /// - The `verified_node_index` is unknown. + /// - Any of the to-be-validated payloads are already invalid. pub fn propagate_execution_payload_validation( &mut self, verified_node_index: usize, @@ -310,6 +316,28 @@ impl ProtoArray { } } + /// Potentially updates blocks to have an invalid payload and therefore be ineligible for the + /// head. + /// + /// The `head_block_root` is the block *root* of the latest invalid block. The + /// `latest_valid_hash` is a payload *hash* and should be: + /// + /// - `Some(hash)` if the block with that payload *hash* is known to be valid and is an + /// ancestor of `head_block_root`. + /// - `None` if the latest valid ancestor of `head_block_root` is unkown. + /// + /// ## Details + /// + /// If `head_block_root` is not known to fork choice, an error is returned. + /// + /// If `latest_valid_hash` is `Some(hash)` where `hash` is either not known to fork choice + /// (perhaps it's junk orpre-finalization), then only the `head_block_root` block will be + /// invalidated (no ancestors). No error will be returned in this case. + /// + /// If `latest_valid_hash` is `Some(hash)` where `hash` is a known ancestor of + /// `head_block_root`, then all blocks between `head_block_root` and `latest_valid_hash` will + /// be invalidated. Additionally, all blocks that descend from a newly-invalidated block will + /// also be invalidated. pub fn propagate_execution_payload_invalidation( &mut self, head_block_root: Hash256, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index d9828090969..1c140f38a2c 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -184,6 +184,7 @@ impl ProtoArrayForkChoice { }) } + /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. pub fn process_execution_payload_invalidation( &mut self, head_block_root: Hash256, From 031d71f68be942f953e31c9f32a9f6d98dcce167 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 08:35:18 +1100 Subject: [PATCH 72/92] Check for finalized desc --- consensus/proto_array/src/proto_array.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index e9b20632e84..eddc6c4f825 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -357,9 +357,10 @@ impl ProtoArray { // // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. - let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root - .map_or(false, |ancestor_root| { + let latest_valid_ancestor_is_descendant = + latest_valid_ancestor_root.map_or(false, |ancestor_root| { self.is_descendant(ancestor_root, head_block_root) + && self.is_descendant(self.finalized_checkpoint.root, ancestor_root) }); // Collect all *ancestors* which were declared invalid since they reside between the From a915ce6953505087b95e15c1a7f53b3facdac098 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 11:18:58 +1100 Subject: [PATCH 73/92] Avoid reading finalized block from DB --- beacon_node/beacon_chain/src/beacon_chain.rs | 98 +++++++++----------- beacon_node/beacon_chain/src/errors.rs | 7 ++ beacon_node/client/src/builder.rs | 23 ++++- 3 files changed, 72 insertions(+), 56 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 54c31a64b14..981a51d9b67 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3302,19 +3302,51 @@ impl BeaconChain { } fn fork_choice_internal(&self) -> Result<(), Error> { - // Determine the root of the block that is the head of the chain. - let beacon_block_root = self - .fork_choice - .write() - .get_head(self.slot()?, &self.spec)?; + // Atomically obtain the head block root and the finalized block. + let (beacon_block_root, finalized_block) = { + let mut fork_choice = self.fork_choice.write(); + + // Determine the root of the block that is the head of the chain. + let beacon_block_root = fork_choice.get_head(self.slot()?, &self.spec)?; + + let finalized_checkpoint = fork_choice.finalized_checkpoint(); + let finalized_block = fork_choice.get_block(&finalized_checkpoint.root).ok_or( + BeaconChainError::FinalizedBlockMissingFromForkChoice(finalized_checkpoint.root), + )?; + + (beacon_block_root, finalized_block) + }; let current_head = self.head_info()?; let old_finalized_checkpoint = current_head.finalized_checkpoint; + // Exit early if the head hasn't changed. if beacon_block_root == current_head.block_root { return Ok(()); } + // Check to ensure that this finalized block hasn't been marked as invalid. + if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { + crit!( + self.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = self.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_block.root, + }); + } + let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); // At this point we know that the new head block is not the same as the previous one @@ -3562,33 +3594,6 @@ impl BeaconChain { } if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Check to ensure that this finalized block hasn't been marked as invalid. - let finalized_block = self - .fork_choice - .read() - .get_block(&new_finalized_checkpoint.root) - .ok_or(BeaconChainError::FinalizedBlockMissingFromForkChoice( - new_finalized_checkpoint.root, - ))?; - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Ok(()); - } - // Due to race conditions, it's technically possible that the head we load here is // different to the one earlier in this function. // @@ -3689,8 +3694,12 @@ impl BeaconChain { // If this is a post-merge block, update the execution layer. if let Some(new_head_execution_block_hash) = new_head_execution_block_hash_opt { if is_merge_transition_complete { + let finalized_execution_block_hash = finalized_block + .execution_status + .block_hash() + .unwrap_or_else(ExecutionBlockHash::zero); if let Err(e) = self.update_execution_engine_forkchoice_blocking( - new_finalized_checkpoint.root, + finalized_execution_block_hash, beacon_block_root, new_head_execution_block_hash, ) { @@ -3708,7 +3717,7 @@ impl BeaconChain { pub fn update_execution_engine_forkchoice_blocking( &self, - finalized_beacon_block_root: Hash256, + finalized_execution_block_hash: ExecutionBlockHash, head_block_root: Hash256, head_execution_block_hash: ExecutionBlockHash, ) -> Result<(), Error> { @@ -3720,7 +3729,7 @@ impl BeaconChain { execution_layer .block_on_generic(|_| { self.update_execution_engine_forkchoice_async( - finalized_beacon_block_root, + finalized_execution_block_hash, head_block_root, head_execution_block_hash, ) @@ -3730,27 +3739,10 @@ impl BeaconChain { pub async fn update_execution_engine_forkchoice_async( &self, - finalized_beacon_block_root: Hash256, + finalized_execution_block_hash: ExecutionBlockHash, head_block_root: Hash256, head_execution_block_hash: ExecutionBlockHash, ) -> Result<(), Error> { - // Loading the finalized block from the store is not ideal. Perhaps it would be better to - // store it on fork-choice so we can do a lookup without hitting the database. - // - // See: https://github.com/sigp/lighthouse/pull/2627#issuecomment-927537245 - let finalized_block = self - .store - .get_block(&finalized_beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(finalized_beacon_block_root))?; - - let finalized_execution_block_hash = finalized_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash) - .unwrap_or_else(ExecutionBlockHash::zero); - let forkchoice_updated_response = self .execution_layer .as_ref() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9f90c908502..e0c99abee26 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -146,6 +146,9 @@ pub enum BeaconChainError { BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + InvalidFinalizedPayload { + finalized_root: Hash256, + }, InvalidFinalizedPayloadShutdownError(TrySendError), JustifiedPayloadInvalid { justified_root: Hash256, @@ -154,6 +157,10 @@ pub enum BeaconChainError { JustifiedMissingFromForkChoice { justified_root: Hash256, }, + FinalizedCheckpointMismatch { + head_state: Checkpoint, + fork_choice: Hash256, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a02338b57bd..f8c21b86230 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -31,8 +31,8 @@ use std::time::Duration; use timer::spawn_timer; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use types::{ - test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, Hash256, - SignedBeaconBlock, + test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, + ExecutionBlockHash, Hash256, SignedBeaconBlock, }; /// Interval between polling the eth1 node for genesis information. @@ -669,6 +669,23 @@ where // Issue the head to the execution engine on startup. This ensures it can start // syncing. if let Some(block_hash) = head.execution_payload_block_hash { + let finalized_root = head.finalized_checkpoint.root; + let finalized_block = beacon_chain + .store + .get_block(&finalized_root) + .map_err(|e| format!("Failed to read finalized block from DB: {:?}", e))? + .ok_or(format!( + "Finalized block missing from store: {:?}", + finalized_root + ))?; + let finalized_execution_block_hash = finalized_block + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash) + .unwrap_or_else(ExecutionBlockHash::zero); + // Spawn a new task using the "async" fork choice update method, rather than // using the "blocking" method. // @@ -679,7 +696,7 @@ where async move { let result = inner_chain .update_execution_engine_forkchoice_async( - head.finalized_checkpoint.root, + finalized_execution_block_hash, head.block_root, block_hash, ) From 0a77253c503cdb6445fa7be2360e84a286de96ef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 11:21:51 +1100 Subject: [PATCH 74/92] Remove todo about payload ids --- beacon_node/execution_layer/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f9c505b17ce..70f2d62dcf3 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -542,8 +542,6 @@ impl ExecutionLayer { }) .await; - // TODO(bellatrix): process payload_ids so we can use them for producing payloads later. - process_multiple_payload_statuses( head_block_hash, broadcast_results From 6366e949d8d7da8836c68c184e6edd85e04eddd5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 11:28:50 +1100 Subject: [PATCH 75/92] Get justified root atomically --- beacon_node/beacon_chain/src/beacon_chain.rs | 72 ++++++++++---------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 981a51d9b67..0c364c07460 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3239,49 +3239,47 @@ impl BeaconChain { ); } - // Check to ensure the justified checkpoint does not have an invalid payload. If so, try - // to kill the client. - let head_info = self.head_info()?; - // De-alias 0x00..00 to the genesis block at genesis. - let justified_root = { - let justified_checkpoint = head_info.current_justified_checkpoint; - if justified_checkpoint.root == Hash256::zero() && justified_checkpoint.epoch == 0 { - self.genesis_block_root - } else { - justified_checkpoint.root - } + // Atomically obtain the justified root from fork choice. + let justified_block = { + let fork_choice = self.fork_choice.read(); + // De-alias 0x00..00 to the genesis block at genesis. + let justified_root = { + let justified_checkpoint = fork_choice.justified_checkpoint(); + if justified_checkpoint.root == Hash256::zero() && justified_checkpoint.epoch == 0 { + self.genesis_block_root + } else { + justified_checkpoint.root + } + }; + fork_choice + .get_block(&justified_root) + .ok_or_else(|| Error::JustifiedMissingFromForkChoice { justified_root })? }; - if let Some(proto_block) = self.fork_choice.read().get_block(&justified_root) { - if proto_block.execution_status.is_invalid() { + if justified_block.execution_status.is_invalid() { + crit!( + self.log, + "The justified checkpoint is invalid"; + "msg" => "ensure you are not connected to a malicious network. this error is not \ + recoverable, please reach out to the lighthouse developers for assistance." + ); + + let mut shutdown_sender = self.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + )) { crit!( self.log, - "The justified checkpoint is invalid"; - "msg" => "ensure you are not connected to a malicious network. this error is not \ - recoverable, please reach out to the lighthouse developers for assistance." + "Unable trigger client shut down"; + "msg" => "shut down may already be under way", + "error" => ?e ); - - let mut shutdown_sender = self.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, - )) { - crit!( - self.log, - "Unable trigger client shut down"; - "msg" => "shut down may already be under way", - "error" => ?e - ); - } - - // Return an error here to try and prevent progression by upstream functions. - return Err(Error::JustifiedPayloadInvalid { justified_root }); } - } else { - crit!( - self.log, - "Justified block is not in fork choice"; - ); - return Err(Error::JustifiedMissingFromForkChoice { justified_root }); + + // Return an error here to try and prevent progression by upstream functions. + return Err(Error::JustifiedPayloadInvalid { + justified_root: justified_block.root, + }); } Ok(()) From 70a10e1b187a35b86c1b1b362433be162c7f3f40 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 23 Feb 2022 11:31:26 +1100 Subject: [PATCH 76/92] Fix clippy lint --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0c364c07460..5d5f36de10f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3253,7 +3253,7 @@ impl BeaconChain { }; fork_choice .get_block(&justified_root) - .ok_or_else(|| Error::JustifiedMissingFromForkChoice { justified_root })? + .ok_or(Error::JustifiedMissingFromForkChoice { justified_root })? }; if justified_block.execution_status.is_invalid() { From d74b96c1930323e2e040bdb40feab7ed115584ed Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 25 Feb 2022 15:22:52 +1100 Subject: [PATCH 77/92] Apply suggestions from code review Co-authored-by: realbigsean Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- .../beacon_chain/src/execution_payload.rs | 2 +- .../beacon_chain/tests/payload_invalidation.rs | 6 +++--- beacon_node/execution_layer/src/lib.rs | 4 ++-- consensus/proto_array/src/proto_array.rs | 16 ++++++++-------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d5f36de10f..f5b60031906 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3242,7 +3242,7 @@ impl BeaconChain { // Atomically obtain the justified root from fork choice. let justified_block = { let fork_choice = self.fork_choice.read(); - // De-alias 0x00..00 to the genesis block at genesis. + // De-alias 0x00..00 to the genesis block root. let justified_root = { let justified_checkpoint = fork_choice.justified_checkpoint(); if justified_checkpoint.root == Hash256::zero() && justified_checkpoint.epoch == 0 { @@ -3260,7 +3260,7 @@ impl BeaconChain { crit!( self.log, "The justified checkpoint is invalid"; - "msg" => "ensure you are not connected to a malicious network. this error is not \ + "msg" => "ensure you are not connected to a malicious network. This error is not \ recoverable, please reach out to the lighthouse developers for assistance." ); @@ -3270,7 +3270,7 @@ impl BeaconChain { )) { crit!( self.log, - "Unable trigger client shut down"; + "Unable to trigger client shut down"; "msg" => "shut down may already be under way", "error" => ?e ); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 4b249ea7601..4e3e91abc58 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -73,7 +73,7 @@ pub fn notify_new_payload( Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { - // Returning an error here should sufficient to invalidate the block. We have no + // Returning an error here should be sufficient to invalidate the block. We have no // information to indicate it's parent is invalid, so no need to run // `BeaconChain::process_invalid_execution_payload`. Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2115fffe050..198f6741570 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -227,7 +227,7 @@ fn valid_invalid_syncing() { rig.import_block(Payload::Syncing); } -/// Ensure that an invalid payload can invalidate its parent too (give then right +/// Ensure that an invalid payload can invalidate its parent too (given the right /// `latest_valid_hash`. #[test] fn invalid_payload_invalidates_parent() { @@ -297,7 +297,7 @@ fn justified_checkpoint_becomes_invalid() { ); } -/// Ensure that a `latest_valid_hash` for a pre-finality block only revert a single block. +/// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. #[test] fn pre_finalized_latest_valid_hash() { let num_blocks = E::slots_per_epoch() * 4; @@ -315,7 +315,7 @@ fn pre_finalized_latest_valid_hash() { // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); - // Import a block that will invalidate the justified checkpoint. + // Import a pre-finalized block. rig.import_block(Payload::Invalid { latest_valid_hash: Some(pre_finalized_block_hash), }); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 70f2d62dcf3..354127461ee 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -444,8 +444,8 @@ impl ExecutionLayer { crit!( self.log(), "Exec engine unable to produce payload"; - "msg" => "no payload id, the engine is likely syncing. / - this has potentially caused a missed block proposal.", + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", ); ApiError::PayloadIdUnavailable diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index eddc6c4f825..8930d775277 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -267,7 +267,7 @@ impl ProtoArray { Ok(()) } - /// Updates the `verified_node_index` and all descendants to have validated execution payloads. + /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. /// /// Returns an error if: /// @@ -324,14 +324,14 @@ impl ProtoArray { /// /// - `Some(hash)` if the block with that payload *hash* is known to be valid and is an /// ancestor of `head_block_root`. - /// - `None` if the latest valid ancestor of `head_block_root` is unkown. + /// - `None` if the latest valid ancestor of `head_block_root` is unknown. /// /// ## Details /// /// If `head_block_root` is not known to fork choice, an error is returned. /// /// If `latest_valid_hash` is `Some(hash)` where `hash` is either not known to fork choice - /// (perhaps it's junk orpre-finalization), then only the `head_block_root` block will be + /// (perhaps it's junk or pre-finalization), then only the `head_block_root` block will be /// invalidated (no ancestors). No error will be returned in this case. /// /// If `latest_valid_hash` is `Some(hash)` where `hash` is a known ancestor of @@ -364,7 +364,7 @@ impl ProtoArray { }); // Collect all *ancestors* which were declared invalid since they reside between the - // `invalid_root` and the `latest_valid_ancestor_root`. + // `head_block_root` and the `latest_valid_ancestor_root`. loop { let node = self .nodes @@ -376,7 +376,7 @@ impl ProtoArray { | ExecutionStatus::Invalid(hash) | ExecutionStatus::Unknown(hash) => { // If we're no longer processing the `head_block_root` and the last valid - // ancestor is known, exit now with an error. + // ancestor is unknown, exit now with an error. // // In effect, this means that if an unknown hash (junk or pre-finalization) is // supplied, we only invalidate a single block and no ancestors. The alternative @@ -434,7 +434,7 @@ impl ProtoArray { // The block is already invalid, but keep going backwards to ensure all ancestors // are updated. ExecutionStatus::Invalid(_) => (), - // This block is pre-merge, therefore it has no execution status. Nor does its + // This block is pre-merge, therefore it has no execution status. Nor do its // ancestors. ExecutionStatus::Irrelevant(_) => break, } @@ -458,7 +458,7 @@ impl ProtoArray { .ok_or(Error::NodeUnknown(latest_valid_ancestor_root))?; let first_potential_descendant = latest_valid_ancestor_index + 1; - // Collect all *descendants* which declared invalid since they're the descendant of a block + // Collect all *descendants* which have been declared invalid since they're the descendant of a block // with an invalid execution payload. for index in first_potential_descendant..self.nodes.len() { let node = self @@ -777,7 +777,7 @@ impl ProtoArray { } /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always - /// returns `false` if either input roots are unknown. + /// returns `false` if either input root is unknown. /// /// ## Notes /// From 2ae6d78158f27155c36b3073807f1b4150be4048 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 25 Feb 2022 15:23:39 +1100 Subject: [PATCH 78/92] Add merge to makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bc607304af5..8507767c3c0 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ PINNED_NIGHTLY ?= nightly # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair +FORKS=phase0 altair merge # Builds the Lighthouse binary in release (optimized). # From 1ed7431ba2752b3bbefda99811392b138cf0d449 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 25 Feb 2022 16:37:37 +1100 Subject: [PATCH 79/92] Add just/fin block getters to fork choice Add just/fin block getters to fork choice --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++-------------- beacon_node/beacon_chain/src/errors.rs | 3 -- consensus/fork_choice/src/fork_choice.rs | 29 ++++++++++++++++++++ 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8ed855fd4f6..8cfe515f07b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3240,21 +3240,7 @@ impl BeaconChain { } // Atomically obtain the justified root from fork choice. - let justified_block = { - let fork_choice = self.fork_choice.read(); - // De-alias 0x00..00 to the genesis block root. - let justified_root = { - let justified_checkpoint = fork_choice.justified_checkpoint(); - if justified_checkpoint.root == Hash256::zero() && justified_checkpoint.epoch == 0 { - self.genesis_block_root - } else { - justified_checkpoint.root - } - }; - fork_choice - .get_block(&justified_root) - .ok_or(Error::JustifiedMissingFromForkChoice { justified_root })? - }; + let justified_block = self.fork_choice.read().get_justified_block()?; if justified_block.execution_status.is_invalid() { crit!( @@ -3307,12 +3293,7 @@ impl BeaconChain { // Determine the root of the block that is the head of the chain. let beacon_block_root = fork_choice.get_head(self.slot()?, &self.spec)?; - let finalized_checkpoint = fork_choice.finalized_checkpoint(); - let finalized_block = fork_choice.get_block(&finalized_checkpoint.root).ok_or( - BeaconChainError::FinalizedBlockMissingFromForkChoice(finalized_checkpoint.root), - )?; - - (beacon_block_root, finalized_block) + (beacon_block_root, fork_choice.get_finalized_block()?) }; let current_head = self.head_info()?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e0c99abee26..d2d9ebef6ac 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -154,9 +154,6 @@ pub enum BeaconChainError { justified_root: Hash256, }, ForkchoiceUpdate(execution_layer::Error), - JustifiedMissingFromForkChoice { - justified_root: Hash256, - }, FinalizedCheckpointMismatch { head_state: Checkpoint, fork_choice: Hash256, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 94ce93ccad5..9f98dadf3b0 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -44,6 +44,12 @@ pub enum Error { block_root: Hash256, payload_verification_status: PayloadVerificationStatus, }, + MissingJustifiedBlock { + justified_checkpoint: Checkpoint, + }, + MissingFinalizedBlock { + finalized_checkpoint: Checkpoint, + }, } impl From for Error { @@ -893,6 +899,29 @@ where } } + /// Returns the `ProtoBlock` for the justified checkpoint. + /// + /// ## Notes + /// + /// This does *not* return the "best justified checkpoint". It returns the justified checkpoint + /// that is used for computing balances. + pub fn get_justified_block(&self) -> Result> { + let justified_checkpoint = self.justified_checkpoint(); + self.get_block(&justified_checkpoint.root) + .ok_or(Error::MissingJustifiedBlock { + justified_checkpoint, + }) + } + + /// Returns the `ProtoBlock` for the finalized checkpoint. + pub fn get_finalized_block(&self) -> Result> { + let finalized_checkpoint = self.finalized_checkpoint(); + self.get_block(&finalized_checkpoint.root) + .ok_or(Error::MissingFinalizedBlock { + finalized_checkpoint, + }) + } + /// Return `true` if `block_root` is equal to the finalized root, or a known descendant of it. pub fn is_descendant_of_finalized(&self, block_root: Hash256) -> bool { self.proto_array From 43e7b5992a32d4e8abd72c6c78ba32129b35b022 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 11:45:56 +1100 Subject: [PATCH 80/92] Add execution block hashes to errors --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 ++ beacon_node/beacon_chain/src/errors.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8cfe515f07b..4cd5cfe07be 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3265,6 +3265,7 @@ impl BeaconChain { // Return an error here to try and prevent progression by upstream functions. return Err(Error::JustifiedPayloadInvalid { justified_root: justified_block.root, + execution_block_hash: justified_block.execution_status.block_hash(), }); } @@ -3323,6 +3324,7 @@ impl BeaconChain { // Exit now, the node is in an invalid state. return Err(Error::InvalidFinalizedPayload { finalized_root: finalized_block.root, + execution_block_hash: block_hash, }); } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d2d9ebef6ac..e8cc157ce42 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -148,10 +148,12 @@ pub enum BeaconChainError { FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayload { finalized_root: Hash256, + execution_block_hash: ExecutionBlockHash, }, InvalidFinalizedPayloadShutdownError(TrySendError), JustifiedPayloadInvalid { justified_root: Hash256, + execution_block_hash: Option, }, ForkchoiceUpdate(execution_layer::Error), FinalizedCheckpointMismatch { From c39a1d9e6a7f996d7f34145dd55d728cd228271d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 11:49:44 +1100 Subject: [PATCH 81/92] Update beacon_node/beacon_chain/src/execution_payload.rs Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/execution_payload.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 4e3e91abc58..30a0d2b1982 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -74,7 +74,7 @@ pub fn notify_new_payload( } PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { // Returning an error here should be sufficient to invalidate the block. We have no - // information to indicate it's parent is invalid, so no need to run + // information to indicate its parent is invalid, so no need to run // `BeaconChain::process_invalid_execution_payload`. Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } From 5142db0f0b1faad4b0571be6ba2e28127c9daeec Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 11:51:50 +1100 Subject: [PATCH 82/92] Lower log level --- beacon_node/execution_layer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 354127461ee..326c46f8700 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -441,7 +441,7 @@ impl ExecutionLayer { .await .map(|response| response.payload_id)? .ok_or_else(|| { - crit!( + error!( self.log(), "Exec engine unable to produce payload"; "msg" => "No payload ID, the engine is likely syncing. \ From ae2968485665b67a31cf73da130c524253ed66b3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 12:04:28 +1100 Subject: [PATCH 83/92] Add test for just/fin block getter --- consensus/fork_choice/tests/tests.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index aa6c6a118b2..160800ca508 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -485,6 +485,22 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified } +#[test] +fn justified_and_finalized_blocks() { + let tester = ForkChoiceTest::new(); + let fork_choice = tester.harness.chain.fork_choice.read(); + + let justified_checkpoint = fork_choice.justified_checkpoint(); + assert_eq!(justified_checkpoint.epoch, 0); + assert!(justified_checkpoint.root != Hash256::zero()); + assert!(fork_choice.get_justified_block().is_ok()); + + let finalized_checkpoint = fork_choice.finalized_checkpoint(); + assert_eq!(finalized_checkpoint.epoch, 0); + assert!(finalized_checkpoint.root != Hash256::zero()); + assert!(fork_choice.get_finalized_block().is_ok()); +} + /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` #[test] From 7eed12208656484ad735b3184b21bdbff7c24b9f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 12:08:49 +1100 Subject: [PATCH 84/92] Log errors whilst processing payload status --- beacon_node/execution_layer/src/payload_status.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index aa65f489232..e0b1a01b43a 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -172,6 +172,15 @@ pub fn process_multiple_payload_statuses( return Err(Error::ConsensusFailure); } + // Log any errors to assist with troubleshooting. + for error in &errors { + warn!( + log, + "Error whilst processing payload status"; + "error" => ?error, + ); + } + valid_statuses .first() .or_else(|| invalid_statuses.first()) From 8da2668ae5dd9ca083fc1cca4f04d84a509fb07c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 12:10:58 +1100 Subject: [PATCH 85/92] Remove unused struct --- beacon_node/execution_layer/src/test_utils/mod.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index e98b14e465d..9d6eb5cf04b 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -14,7 +14,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, ExecutionBlockHash, Hash256, Uint256}; +use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::Filter; pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; @@ -27,14 +27,6 @@ mod execution_block_generator; mod handle_rpc; mod mock_execution_layer; -/// Used for returning a static response for new payload calls. -pub enum FixedPayloadResponse { - None, - Valid, - Invalid { latest_valid_hash: Hash256 }, - Syncing, -} - pub struct MockServer { _shutdown_tx: oneshot::Sender<()>, listen_socket_addr: SocketAddr, From c7d0c6c368acc6c5b706dd72123403b095717e59 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 12:16:10 +1100 Subject: [PATCH 86/92] Simplify best child/desc code --- consensus/proto_array/src/proto_array.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 8bd3c6964c1..a24d0a68dd9 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -395,12 +395,18 @@ impl ProtoArray { // `best_descendant`. However, we check each variable independently to // defend against errors which might result in an invalid block being set as // head. - node.best_child = node + if node .best_child - .filter(|best_child| invalidated_indices.contains(best_child)); - node.best_descendant = node.best_descendant.filter(|best_descendant| { - invalidated_indices.contains(best_descendant) - }); + .map_or(false, |i| invalidated_indices.contains(&i)) + { + node.best_child = None + } + if node + .best_descendant + .map_or(false, |i| invalidated_indices.contains(&i)) + { + node.best_descendant = None + } // It might be new knowledge that this block is valid, ensure that it and all // ancestors are marked as valid. From 4e3a07858113566c557a437f269a4481261a4377 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 14:04:40 +1100 Subject: [PATCH 87/92] Adjust weights --- consensus/proto_array/src/proto_array.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index a24d0a68dd9..6501529fbef 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -129,7 +129,9 @@ impl ProtoArray { continue; } - let mut node_delta = if node.execution_status.is_invalid() { + let execution_status_is_invalid = node.execution_status.is_invalid(); + + let mut node_delta = if execution_status_is_invalid { // If the node has an invalid execution payload, reduce its weight to zero. 0_i64 .checked_sub(node.weight as i64) @@ -145,17 +147,24 @@ impl ProtoArray { // the delta by the previous score amount. if self.previous_proposer_boost.root != Hash256::zero() && self.previous_proposer_boost.root == node.root + // Invalid nodes will always have a weight of zero so there's no need to subtract + // the proposer boost delta. + && !execution_status_is_invalid { node_delta = node_delta .checked_sub(self.previous_proposer_boost.score as i64) .ok_or(Error::DeltaOverflow(node_index))?; } // If we find the node matching the current proposer boost root, increase - // the delta by the new score amount. + // the delta by the new score amount (unless the block has an invalid execution status). // // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance if let Some(proposer_score_boost) = spec.proposer_score_boost { - if proposer_boost_root != Hash256::zero() && proposer_boost_root == node.root { + if proposer_boost_root != Hash256::zero() + && proposer_boost_root == node.root + // Invalid nodes (or their ancestors) should not receive a proposer boost. + && !execution_status_is_invalid + { proposer_score = calculate_proposer_boost::(new_balances, proposer_score_boost) .ok_or(Error::ProposerBoostOverflow(node_index))?; @@ -166,7 +175,10 @@ impl ProtoArray { } // Apply the delta to the node. - if node_delta < 0 { + if execution_status_is_invalid { + // Invalid nodes always have a weight of 0. + node.weight = 0 + } else if node_delta < 0 { // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` // here. // From b3be437c0e1827d4e6f9c94436b904f33e5bcee3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 14:09:28 +1100 Subject: [PATCH 88/92] Add error for irrelevant descendant --- consensus/proto_array/src/error.rs | 3 +++ consensus/proto_array/src/proto_array.rs | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 1d0ab9b1049..7e1b73bedc3 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -41,6 +41,9 @@ pub enum Error { block_root: Hash256, latest_valid_ancestor_hash: Option, }, + IrrelevantDescendant { + block_root: Hash256, + }, } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 6501529fbef..9ff0621a715 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -496,7 +496,11 @@ impl ProtoArray { ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { node.execution_status = ExecutionStatus::Invalid(*hash) } - ExecutionStatus::Irrelevant(_) => (), + ExecutionStatus::Irrelevant(_) => { + return Err(Error::IrrelevantDescendant { + block_root: node.root, + }) + } } invalidated_indices.insert(index); From e9905aa07f20421ee4b988107e3e317bf653dcfe Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 26 Feb 2022 14:29:36 +1100 Subject: [PATCH 89/92] Invalidate more payloads --- consensus/proto_array/src/proto_array.rs | 98 ++++++++++++++---------- 1 file changed, 56 insertions(+), 42 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 9ff0621a715..35e7c9df50e 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -356,6 +356,14 @@ impl ProtoArray { latest_valid_ancestor_hash: Option, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); + + /* + * Step 1: + * + * Find the `head_block_root` and maybe iterate backwards and invalidate ancestors. Record + * all invalidated block indices in `invalidated_indices`. + */ + let mut index = *self .indices .get(&head_block_root) @@ -388,17 +396,15 @@ impl ProtoArray { | ExecutionStatus::Invalid(hash) | ExecutionStatus::Unknown(hash) => { // If we're no longer processing the `head_block_root` and the last valid - // ancestor is unknown, exit now with an error. + // ancestor is unknown, exit this loop and proceed to invalidate and + // descendants of `head_block_root`. // // In effect, this means that if an unknown hash (junk or pre-finalization) is - // supplied, we only invalidate a single block and no ancestors. The alternative - // is to invalidate *all* ancestors, which would likely involve shutting down - // the client due to an invalid justified checkpoint. + // supplied, don't validate any ancestors. The alternative is to invalidate + // *all* ancestors, which would likely involve shutting down the client due to + // an invalid justified checkpoint. if !latest_valid_ancestor_is_descendant && node.root != head_block_root { - return Err(Error::UnknownLatestValidAncestorHash { - block_root: node.root, - latest_valid_ancestor_hash, - }); + break; } else if Some(hash) == latest_valid_ancestor_hash { // If the `best_child` or `best_descendant` of the latest valid hash was // invalidated, set those fields to `None`. @@ -469,42 +475,50 @@ impl ProtoArray { } } - if let Some(latest_valid_ancestor_root) = latest_valid_ancestor_root { - let latest_valid_ancestor_index = *self - .indices - .get(&latest_valid_ancestor_root) - .ok_or(Error::NodeUnknown(latest_valid_ancestor_root))?; - let first_potential_descendant = latest_valid_ancestor_index + 1; - - // Collect all *descendants* which have been declared invalid since they're the descendant of a block - // with an invalid execution payload. - for index in first_potential_descendant..self.nodes.len() { - let node = self - .nodes - .get_mut(index) - .ok_or(Error::InvalidNodeIndex(index))?; - - if let Some(parent_index) = node.parent { - if invalidated_indices.contains(&parent_index) { - match &node.execution_status { - ExecutionStatus::Valid(hash) => { - return Err(Error::ValidExecutionStatusBecameInvalid { - block_root: node.root, - payload_block_hash: *hash, - }) - } - ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { - node.execution_status = ExecutionStatus::Invalid(*hash) - } - ExecutionStatus::Irrelevant(_) => { - return Err(Error::IrrelevantDescendant { - block_root: node.root, - }) - } - } + /* + * Step 2: + * + * Start at either the `latest_valid_ancestor` or the `head_block_root` and iterate + * *forwards* to invalidate all descendants of all blocks in `invalidated_indices`. + */ + + let starting_block_root = latest_valid_ancestor_root + .filter(|_| latest_valid_ancestor_is_descendant) + .unwrap_or(head_block_root); + let latest_valid_ancestor_index = *self + .indices + .get(&starting_block_root) + .ok_or(Error::NodeUnknown(starting_block_root))?; + let first_potential_descendant = latest_valid_ancestor_index + 1; + + // Collect all *descendants* which have been declared invalid since they're the descendant of a block + // with an invalid execution payload. + for index in first_potential_descendant..self.nodes.len() { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; - invalidated_indices.insert(index); + if let Some(parent_index) = node.parent { + if invalidated_indices.contains(&parent_index) { + match &node.execution_status { + ExecutionStatus::Valid(hash) => { + return Err(Error::ValidExecutionStatusBecameInvalid { + block_root: node.root, + payload_block_hash: *hash, + }) + } + ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { + node.execution_status = ExecutionStatus::Invalid(*hash) + } + ExecutionStatus::Irrelevant(_) => { + return Err(Error::IrrelevantDescendant { + block_root: node.root, + }) + } } + + invalidated_indices.insert(index); } } } From 031ab86fb6ddd165590b6ef0409cec37b295cf00 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 28 Feb 2022 10:53:38 +1100 Subject: [PATCH 90/92] Update consensus/proto_array/src/proto_array.rs Co-authored-by: Michael Sproul --- consensus/proto_array/src/proto_array.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 35e7c9df50e..f4f76a8f639 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -397,7 +397,7 @@ impl ProtoArray { | ExecutionStatus::Unknown(hash) => { // If we're no longer processing the `head_block_root` and the last valid // ancestor is unknown, exit this loop and proceed to invalidate and - // descendants of `head_block_root`. + // descendants of `head_block_root`/`latest_valid_ancestor_root`. // // In effect, this means that if an unknown hash (junk or pre-finalization) is // supplied, don't validate any ancestors. The alternative is to invalidate From 6dee54c860eefe1bbc5a70d7ebc5353b1dc8f288 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 28 Feb 2022 11:02:56 +1100 Subject: [PATCH 91/92] Update fork choice docs --- consensus/proto_array/src/proto_array.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index f4f76a8f639..b0e8991a785 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -328,15 +328,24 @@ impl ProtoArray { } } - /// Potentially updates blocks to have an invalid payload and therefore be ineligible for the - /// head. + /// Invalidate the relevant ancestors and descendants of a block with an invalid execution + /// payload. + /// + /// The `head_block_root` should be the beacon block root of the block with the invalid + /// execution payload, _or_ its parent where the block with the invalid payload has not yet + /// been applied to `self`. + /// + /// The `latest_valid_hash` should be the hash of most recent *valid* execution payload + /// contained in an ancestor block of `head_block_root`. /// - /// The `head_block_root` is the block *root* of the latest invalid block. The - /// `latest_valid_hash` is a payload *hash* and should be: + /// This function will invalidate: /// - /// - `Some(hash)` if the block with that payload *hash* is known to be valid and is an - /// ancestor of `head_block_root`. - /// - `None` if the latest valid ancestor of `head_block_root` is unknown. + /// * The block matching `head_block_root` _unless_ that block has a payload matching `latest_valid_hash`. + /// * All ancestors of `head_block_root` back to the block with payload matching + /// `latest_valid_hash` (endpoint > exclusive). In the case where the `head_block_root` is the parent + /// of the invalid block and itself matches `latest_valid_hash`, no ancestors will be invalidated. + /// * All descendants of `latest_valid_hash` if supplied and consistent with `head_block_root`, + /// or else all descendants of `head_block_root`. /// /// ## Details /// From 1ab1c433e6968f689ff036965b191adc015b3144 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 28 Feb 2022 10:43:49 +1100 Subject: [PATCH 92/92] Add execution_status FC tests Start adding execution_status tests Complete test 01 Add tests 2, 3 Appease clippy --- consensus/proto_array/src/bin.rs | 12 + .../src/fork_choice_test_definition.rs | 91 +- .../execution_status.rs | 1092 +++++++++++++++++ .../ffg_updates.rs | 120 +- .../fork_choice_test_definition/no_votes.rs | 38 +- .../src/fork_choice_test_definition/votes.rs | 236 ++-- .../src/proto_array_fork_choice.rs | 9 + 7 files changed, 1391 insertions(+), 207 deletions(-) create mode 100644 consensus/proto_array/src/fork_choice_test_definition/execution_status.rs diff --git a/consensus/proto_array/src/bin.rs b/consensus/proto_array/src/bin.rs index ba83714ce77..e1d307affb4 100644 --- a/consensus/proto_array/src/bin.rs +++ b/consensus/proto_array/src/bin.rs @@ -6,6 +6,18 @@ fn main() { write_test_def_to_yaml("no_votes.yaml", get_no_votes_test_definition()); write_test_def_to_yaml("ffg_01.yaml", get_ffg_case_01_test_definition()); write_test_def_to_yaml("ffg_02.yaml", get_ffg_case_02_test_definition()); + write_test_def_to_yaml( + "execution_status_01.yaml", + get_execution_status_test_definition_01(), + ); + write_test_def_to_yaml( + "execution_status_02.yaml", + get_execution_status_test_definition_02(), + ); + write_test_def_to_yaml( + "execution_status_03.yaml", + get_execution_status_test_definition_03(), + ); } fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index e28fc67718f..fd90d539037 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -1,11 +1,16 @@ +mod execution_status; mod ffg_updates; mod no_votes; mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{AttestationShufflingId, Checkpoint, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; +use types::{ + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + MainnetEthSpec, Slot, +}; +pub use execution_status::*; pub use ffg_updates::*; pub use no_votes::*; pub use votes::*; @@ -18,6 +23,13 @@ pub enum Operation { justified_state_balances: Vec, expected_head: Hash256, }, + ProposerBoostFindHead { + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + justified_state_balances: Vec, + expected_head: Hash256, + proposer_boost_root: Hash256, + }, InvalidFindHead { justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, @@ -40,6 +52,14 @@ pub enum Operation { prune_threshold: usize, expected_len: usize, }, + InvalidatePayload { + head_block_root: Hash256, + latest_valid_ancestor_root: Option, + }, + AssertWeight { + block_root: Hash256, + weight: u64, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -52,9 +72,11 @@ pub struct ForkChoiceTestDefinition { impl ForkChoiceTestDefinition { pub fn run(self) { + let mut spec = MainnetEthSpec::default_spec(); + spec.proposer_score_boost = Some(50); + let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); - let execution_status = ExecutionStatus::irrelevant(); let mut fork_choice = ProtoArrayForkChoice::new( self.finalized_block_slot, Hash256::zero(), @@ -62,7 +84,7 @@ impl ForkChoiceTestDefinition { self.finalized_checkpoint, junk_shuffling_id.clone(), junk_shuffling_id, - execution_status, + ExecutionStatus::Unknown(ExecutionBlockHash::zero()), ) .expect("should create fork choice struct"); @@ -80,7 +102,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), - &MainnetEthSpec::default_spec(), + &spec, ) .map_err(|e| e) .unwrap_or_else(|e| { @@ -89,7 +111,34 @@ impl ForkChoiceTestDefinition { assert_eq!( head, expected_head, - "Operation at index {} failed checks. Operation: {:?}", + "Operation at index {} failed head check. Operation: {:?}", + op_index, op + ); + check_bytes_round_trip(&fork_choice); + } + Operation::ProposerBoostFindHead { + justified_checkpoint, + finalized_checkpoint, + justified_state_balances, + expected_head, + proposer_boost_root, + } => { + let head = fork_choice + .find_head::( + justified_checkpoint, + finalized_checkpoint, + &justified_state_balances, + proposer_boost_root, + &spec, + ) + .map_err(|e| e) + .unwrap_or_else(|e| { + panic!("find_head op at index {} returned error {}", op_index, e) + }); + + assert_eq!( + head, expected_head, + "Operation at index {} failed head check. Operation: {:?}", op_index, op ); check_bytes_round_trip(&fork_choice); @@ -104,7 +153,7 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), - &MainnetEthSpec::default_spec(), + &spec, ); assert!( @@ -138,7 +187,10 @@ impl ForkChoiceTestDefinition { ), justified_checkpoint, finalized_checkpoint, - execution_status, + // All blocks are imported optimistically. + execution_status: ExecutionStatus::Unknown(ExecutionBlockHash::from_root( + root, + )), }; fork_choice.process_block(block).unwrap_or_else(|e| { panic!( @@ -183,22 +235,41 @@ impl ForkChoiceTestDefinition { expected_len ); } + Operation::InvalidatePayload { + head_block_root, + latest_valid_ancestor_root, + } => fork_choice + .process_execution_payload_invalidation( + head_block_root, + latest_valid_ancestor_root, + ) + .unwrap(), + Operation::AssertWeight { block_root, weight } => assert_eq!( + fork_choice.get_weight(&block_root).unwrap(), + weight, + "block weight" + ), } } } } -/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. -fn get_hash(i: u64) -> Hash256 { +/// Gives a root that is not the zero hash (unless i is `usize::max_value)`. +fn get_root(i: u64) -> Hash256 { Hash256::from_low_u64_be(i + 1) } +/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. +fn get_hash(i: u64) -> ExecutionBlockHash { + ExecutionBlockHash::from_root(get_root(i)) +} + /// Gives a checkpoint with a root that is not the zero hash (unless i is `usize::max_value)`. /// `Epoch` will always equal `i`. fn get_checkpoint(i: u64) -> Checkpoint { Checkpoint { epoch: Epoch::new(i), - root: get_hash(i), + root: get_root(i), } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs new file mode 100644 index 00000000000..f1b0e512d7d --- /dev/null +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -0,0 +1,1092 @@ +use super::*; + +pub fn get_execution_status_test_definition_01() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add a vote to block 2 + // + // 0 + // / \ + // +vote-> 2 1 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 2 since 1 and 2 both have a vote + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move validator #0 vote from 1 to 3 + // + // 0 + // / \ + // 2 1 <- -vote + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 1, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is still 2. + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move a vote from 2 to 1. This is slashable, but that's not relevant here. + // + // 0 + // / \ + // -vote-> 2 1 <- +vote + // | + // 3 <- INVALID + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(1), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head has switched back to 1 + // + // 0 + // / \ + // 2 1 <-head + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +pub fn get_execution_status_test_definition_02() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add a vote to block 2 + // + // 0 + // / \ + // +vote-> 2 1 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(2), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 2 since 1 and 2 both have a vote + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 1, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + // Move validator #0 vote from 1 to 3 + // + // 0 + // / \ + // 2 1 <- -vote + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Move validator #1 vote from 2 to 3 + // + // 0 + // / \ + // -vote->2 1 + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(3), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is now 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <-head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 2, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is now 2. + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 <- INVALID + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(2), + }); + + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 0, + }); + // Invalidation of 3 should have removed upstream weight. + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // Invalidation should have removed weight. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { + let balances = vec![1_000; 2_000]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(2), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_root(1), + parent_root: get_root(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, because 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 1_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 1_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add another vote to 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_root(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 1. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(1), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + }); + + // Ensure that the head is now 3, applying a proposer boost to 3 as well. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- head + ops.push(Operation::ProposerBoostFindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances.clone(), + expected_head: get_root(3), + proposer_boost_root: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 33_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 33_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(3), + // This is a "magic number" generated from `calculate_proposer_boost`. + weight: 31_000, + }); + + // Invalidate the payload of 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 <- INVALID + ops.push(Operation::InvalidatePayload { + head_block_root: get_root(3), + latest_valid_ancestor_root: Some(get_hash(1)), + }); + + // Ensure that the head is now 1, maintaining the proposer boost on the invalid block. + // + // 0 + // / \ + // 2 1 <- head + // | + // 3 <- INVALID + ops.push(Operation::ProposerBoostFindHead { + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + justified_state_balances: balances, + expected_head: get_root(1), + proposer_boost_root: get_root(3), + }); + + ops.push(Operation::AssertWeight { + block_root: get_root(0), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(1), + weight: 2_000, + }); + ops.push(Operation::AssertWeight { + block_root: get_root(2), + weight: 0, + }); + // The proposer boost should be reverted due to the invalid payload. + ops.push(Operation::AssertWeight { + block_root: get_root(3), + weight: 0, + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_root(0), + }, + operations: ops, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_01() { + let test = get_execution_status_test_definition_01(); + test.run(); + } + + #[test] + fn test_02() { + let test = get_execution_status_test_definition_02(); + test.run(); + } + + #[test] + fn test_03() { + let test = get_execution_status_test_definition_03(); + test.run(); + } +} diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index a129064504d..77211a86a7d 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -9,7 +9,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Build the following tree (stick? lol). @@ -23,22 +23,22 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // 3 <- just: 2, fin: 1 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(2), - parent_root: get_hash(1), + root: get_root(2), + parent_root: get_root(1), justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(3), - parent_root: get_hash(2), + root: get_root(3), + parent_root: get_root(2), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), }); @@ -56,7 +56,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(3), + expected_head: get_root(3), }); // Ensure that with justified epoch 1 we find 2 @@ -72,7 +72,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(1), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Ensure that with justified epoch 2 we find 3 @@ -88,7 +88,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(2), finalized_checkpoint: get_checkpoint(1), justified_state_balances: balances, - expected_head: get_hash(3), + expected_head: get_root(3), }); // END OF TESTS @@ -109,7 +109,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Build the following tree. @@ -129,48 +129,48 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // Left branch ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(5), - parent_root: get_hash(3), + root: get_root(5), + parent_root: get_root(3), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(7), - parent_root: get_hash(5), + root: get_root(7), + parent_root: get_root(5), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), - root: get_hash(9), - parent_root: get_hash(7), + root: get_root(9), + parent_root: get_root(7), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), }); @@ -178,42 +178,42 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // Right branch ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), - parent_root: get_hash(0), + root: get_root(2), + parent_root: get_root(0), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(4), - parent_root: get_hash(2), + root: get_root(4), + parent_root: get_root(2), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(6), - parent_root: get_hash(4), + root: get_root(6), + parent_root: get_root(4), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(8), - parent_root: get_hash(6), + root: get_root(8), + parent_root: get_root(6), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(2), + root: get_root(2), }, finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(5), - root: get_hash(10), - parent_root: get_hash(8), + root: get_root(10), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), }); @@ -235,23 +235,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above, but with justified epoch 3 (should be invalid). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -272,7 +272,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(0), }); @@ -293,23 +293,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Save as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -330,7 +330,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(2), + block_root: get_root(2), target_epoch: Epoch::new(0), }); @@ -351,23 +351,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -389,27 +389,27 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(0), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(3), + root: get_root(3), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), @@ -432,23 +432,23 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(4), + root: get_root(4), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(3), - root: get_hash(6), + root: get_root(6), }, finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index 0fbcafc5d4a..a60b3e6b368 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -24,7 +24,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 2 Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), + root: get_root(2), parent_root: Hash256::zero(), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -50,7 +50,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 1 // @@ -59,8 +59,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -85,7 +85,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 3 // @@ -96,8 +96,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 3 Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -124,7 +124,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }, // Add block 4 // @@ -135,8 +135,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 4 3 Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(4), - parent_root: get_hash(2), + root: get_root(4), + parent_root: get_root(2), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), root: Hash256::zero(), @@ -163,7 +163,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }, // Add block 5 with a justified epoch of 2 // @@ -176,8 +176,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 5 <- justified epoch = 2 Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(5), - parent_root: get_hash(4), + root: get_root(5), + parent_root: get_root(4), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -203,7 +203,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }, // Ensure there is an error when starting from a block that has the wrong justified epoch. // @@ -217,7 +217,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { Operation::InvalidFindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -241,7 +241,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances.clone(), - expected_head: get_hash(5), + expected_head: get_root(5), }, // Add block 6 // @@ -256,8 +256,8 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // 6 Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(6), - parent_root: get_hash(5), + root: get_root(6), + parent_root: get_root(5), justified_checkpoint: get_checkpoint(2), finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), @@ -282,7 +282,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { root: Hash256::zero(), }, justified_state_balances: balances, - expected_head: get_hash(6), + expected_head: get_root(6), }, ]; diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index f65177a8497..58ac6af60ba 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -8,14 +8,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(0), + expected_head: get_root(0), }); // Add a block with a hash of 2. @@ -25,15 +25,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(2), - parent_root: get_hash(0), + root: get_root(2), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -45,14 +45,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared @@ -63,15 +63,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 ops.push(Operation::ProcessBlock { slot: Slot::new(1), - root: get_hash(1), - parent_root: get_hash(0), + root: get_root(1), + parent_root: get_root(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -83,14 +83,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add a vote to block 1 @@ -100,7 +100,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 2 1 <- +vote ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(2), }); @@ -112,14 +112,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(1), + expected_head: get_root(1), }); // Add a vote to block 2 @@ -129,7 +129,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // +vote-> 2 1 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(2), + block_root: get_root(2), target_epoch: Epoch::new(2), }); @@ -141,14 +141,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Add block 3. @@ -160,15 +160,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 ops.push(Operation::ProcessBlock { slot: Slot::new(2), - root: get_hash(3), - parent_root: get_hash(1), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -182,14 +182,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Move validator #0 vote from 1 to 3 @@ -201,7 +201,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 <- +vote ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(3), + block_root: get_root(3), target_epoch: Epoch::new(3), }); @@ -215,14 +215,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(2), + expected_head: get_root(2), }); // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't @@ -235,7 +235,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 3 ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(1), + block_root: get_root(1), target_epoch: Epoch::new(3), }); @@ -249,14 +249,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(3), + expected_head: get_root(3), }); // Add block 4. @@ -270,15 +270,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 4 ops.push(Operation::ProcessBlock { slot: Slot::new(3), - root: get_hash(4), - parent_root: get_hash(3), + root: get_root(4), + parent_root: get_root(3), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -294,14 +294,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }); // Add block 5, which has a justified epoch of 2. @@ -317,15 +317,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 5 <- justified epoch = 2 ops.push(Operation::ProcessBlock { slot: Slot::new(4), - root: get_hash(5), - parent_root: get_hash(4), + root: get_root(5), + parent_root: get_root(4), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(1), + root: get_root(1), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(1), + root: get_root(1), }, }); @@ -343,14 +343,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(4), + expected_head: get_root(4), }); // Add block 6, which has a justified epoch of 0. @@ -366,15 +366,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 5 6 <- justified epoch = 0 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(6), - parent_root: get_hash(4), + root: get_root(6), + parent_root: get_root(4), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, }); @@ -391,12 +391,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // +2 vote-> 5 6 ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(5), + block_root: get_root(5), target_epoch: Epoch::new(4), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(5), + block_root: get_root(5), target_epoch: Epoch::new(4), }); @@ -420,41 +420,41 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(7), - parent_root: get_hash(5), + root: get_root(7), + parent_root: get_root(5), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(8), - parent_root: get_hash(7), + root: get_root(8), + parent_root: get_root(7), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(9), - parent_root: get_hash(8), + root: get_root(9), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -479,14 +479,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, justified_state_balances: balances.clone(), - expected_head: get_hash(6), + expected_head: get_root(6), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -512,14 +512,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is @@ -544,12 +544,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 <- +2 votes ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_hash(9), + block_root: get_root(9), target_epoch: Epoch::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_hash(9), + block_root: get_root(9), target_epoch: Epoch::new(5), }); @@ -572,15 +572,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 10 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(10), - parent_root: get_hash(8), + root: get_root(10), + parent_root: get_root(8), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -588,14 +588,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Introduce 2 more validators into the system @@ -620,12 +620,12 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 9 10 <- +2 votes ops.push(Operation::ProcessAttestation { validator_index: 2, - block_root: get_hash(10), + block_root: get_root(10), target_epoch: Epoch::new(5), }); ops.push(Operation::ProcessAttestation { validator_index: 3, - block_root: get_hash(10), + block_root: get_root(10), target_epoch: Epoch::new(5), }); @@ -649,14 +649,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Set the balances of the last two validators to zero @@ -674,14 +674,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Set the balances of the last two validators back to 1 @@ -699,14 +699,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(10), + expected_head: get_root(10), }); // Remove the last two validators @@ -725,19 +725,19 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Ensure that pruning below the prune threshold does not prune. ops.push(Operation::Prune { - finalized_root: get_hash(5), + finalized_root: get_root(5), prune_threshold: usize::max_value(), expected_len: 11, }); @@ -746,14 +746,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Ensure that pruning above the prune threshold does prune. @@ -775,7 +775,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 9 10 ops.push(Operation::Prune { - finalized_root: get_hash(5), + finalized_root: get_root(5), prune_threshold: 1, expected_len: 6, }); @@ -784,14 +784,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances.clone(), - expected_head: get_hash(9), + expected_head: get_root(9), }); // Add block 11 @@ -807,15 +807,15 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // 11 ops.push(Operation::ProcessBlock { slot: Slot::new(0), - root: get_hash(11), - parent_root: get_hash(9), + root: get_root(11), + parent_root: get_root(9), justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, }); @@ -833,25 +833,25 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { ops.push(Operation::FindHead { justified_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(2), - root: get_hash(5), + root: get_root(5), }, justified_state_balances: balances, - expected_head: get_hash(11), + expected_head: get_root(11), }); ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), justified_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, finalized_checkpoint: Checkpoint { epoch: Epoch::new(1), - root: get_hash(0), + root: get_root(0), }, operations: ops, } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 1c140f38a2c..1f5b997f670 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -312,6 +312,15 @@ impl ProtoArrayForkChoice { } } + /// Returns the weight of a given block. + pub fn get_weight(&self, block_root: &Hash256) -> Option { + let block_index = self.proto_array.indices.get(block_root)?; + self.proto_array + .nodes + .get(*block_index) + .map(|node| node.weight) + } + /// See `ProtoArray` documentation. pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { self.proto_array