diff --git a/Cargo.lock b/Cargo.lock index 0d37ea75b..c5dff17cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6481,62 +6481,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives", - "alloy-rpc-types-engine", - "assert_matches", - "futures", - "itertools 0.13.0", - "metrics", - "reth-blockchain-tree", - "reth-blockchain-tree-api", - "reth-chainspec", - "reth-codecs", - "reth-config", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-engine-primitives", - "reth-errors", - "reth-ethereum-consensus", - "reth-ethereum-engine-primitives", - "reth-evm", - "reth-evm-ethereum", - "reth-exex-types", - "reth-metrics", - "reth-network-p2p", - "reth-node-types", - "reth-payload-builder", - "reth-payload-builder-primitives", - "reth-payload-primitives", - "reth-payload-validator", - "reth-primitives", - "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-prune-types", - "reth-rpc-types-compat", - "reth-stages", - "reth-stages-api", - "reth-static-file", - "reth-tasks", - "reth-testing-utils", - "reth-tokio-util", - "reth-tracing", - "schnellru", - "thiserror 2.0.9", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-bench" version = "1.1.5" diff --git a/Cargo.toml b/Cargo.toml index 0f0ea08e5..47d802c5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", "crates/consensus/debug-client/", @@ -304,7 +303,6 @@ overflow-checks = true op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-basic-payload-builder = { path = "crates/payload/basic" } -reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml deleted file mode 100644 index b937eb2b4..000000000 --- a/crates/consensus/beacon/Cargo.toml +++ /dev/null @@ -1,94 +0,0 @@ -[package] -name = "reth-beacon-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -# reth -reth-ethereum-consensus.workspace = true -reth-blockchain-tree-api.workspace = true -reth-codecs.workspace = true -reth-db-api.workspace = true -reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-stages-api.workspace = true -reth-errors.workspace = true -reth-provider.workspace = true -reth-tasks.workspace = true -reth-payload-builder.workspace = true -reth-payload-builder-primitives.workspace = true -reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-tokio-util.workspace = true -reth-engine-primitives.workspace = true -reth-network-p2p.workspace = true -reth-node-types.workspace = true -reth-chainspec = { workspace = true, optional = true } - -# ethereum -alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["std"] } -alloy-eips.workspace = true -alloy-consensus.workspace = true - -# async -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -futures.workspace = true - -# metrics -reth-metrics.workspace = true -metrics.workspace = true - -# misc -tracing.workspace = true -thiserror.workspace = true -schnellru.workspace = true -itertools.workspace = true - -[dev-dependencies] -# reth -reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-primitives = { workspace = true, features = ["test-utils"] } -reth-consensus = { workspace = true, features = ["test-utils"] } -reth-stages = { workspace = true, features = ["test-utils"] } -reth-blockchain-tree = { workspace = true, features = ["test-utils"] } -reth-db = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-evm = { workspace = true, features = ["test-utils"] } -reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-rpc-types-compat.workspace = true -reth-tracing.workspace = true -reth-downloaders.workspace = true -reth-evm-ethereum.workspace = true -reth-ethereum-engine-primitives.workspace = true -reth-config.workspace = true -reth-testing-utils.workspace = true -reth-exex-types.workspace = true -reth-prune-types.workspace = true -reth-chainspec.workspace = true -alloy-genesis.workspace = true -assert_matches.workspace = true - -[features] -optimism = [ - "reth-blockchain-tree/optimism", - "reth-codecs/op", - "reth-chainspec", - "reth-db-api/optimism", - "reth-db/optimism", - "reth-downloaders/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-downloaders/optimism", -] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs deleted file mode 100644 index 6fabfbf03..000000000 --- a/crates/consensus/beacon/src/engine/error.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::engine::hooks::EngineHookError; -pub use reth_engine_primitives::BeaconForkChoiceUpdateError; -use reth_errors::{DatabaseError, RethError}; -use reth_stages_api::PipelineError; - -/// Beacon engine result. -pub type BeaconEngineResult = Result; - -/// The error type for the beacon consensus engine service -/// [`BeaconConsensusEngine`](crate::BeaconConsensusEngine) -/// -/// Represents all possible error cases for the beacon consensus engine. -#[derive(Debug, thiserror::Error)] -pub enum BeaconConsensusEngineError { - /// Pipeline channel closed. - #[error("pipeline channel closed")] - PipelineChannelClosed, - /// Pipeline error. - #[error(transparent)] - Pipeline(#[from] Box), - /// Pruner channel closed. - #[error("pruner channel closed")] - PrunerChannelClosed, - /// Hook error. - #[error(transparent)] - Hook(#[from] EngineHookError), - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), -} - -// box the pipeline error as it is a large enum. -impl From for BeaconConsensusEngineError { - fn from(e: PipelineError) -> Self { - Self::Pipeline(Box::new(e)) - } -} - -// for convenience in the beacon engine -impl From for BeaconConsensusEngineError { - fn from(e: DatabaseError) -> Self { - Self::Common(e.into()) - } -} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs deleted file mode 100644 index e4f291c06..000000000 --- a/crates/consensus/beacon/src/engine/handle.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! `BeaconConsensusEngine` external API - -pub use reth_engine_primitives::BeaconConsensusEngineHandle; diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs deleted file mode 100644 index 544a4c564..000000000 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ /dev/null @@ -1,390 +0,0 @@ -use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookError, EngineHookEvent, - EngineHooks, -}; -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; -use tracing::debug; - -#[derive(Debug)] -pub(crate) struct PolledHook { - pub(crate) name: &'static str, - pub(crate) event: EngineHookEvent, - pub(crate) db_access_level: EngineHookDBAccessLevel, -} - -/// Manages hooks under the control of the engine. -/// -/// This type polls the initialized hooks one by one, respecting the DB access level -/// (i.e. [`crate::hooks::EngineHookDBAccessLevel::ReadWrite`] that enforces running at most one -/// such hook). -pub(crate) struct EngineHooksController { - /// Collection of hooks. - /// - /// Hooks might be removed from the collection, and returned upon completion. - /// In the current implementation, it only happens when moved to `active_db_write_hook`. - hooks: VecDeque>, - /// Currently running hook with DB write access, if any. - active_db_write_hook: Option>, -} - -impl EngineHooksController { - /// Creates a new [`EngineHooksController`]. - pub(crate) fn new(hooks: EngineHooks) -> Self { - Self { hooks: hooks.inner.into(), active_db_write_hook: None } - } - - /// Polls currently running hook with DB write access, if any. - /// - /// Returns [`Poll::Ready`] if currently running hook with DB write access returned - /// an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. No hook with DB write access is running. - /// 2. Currently running hook with DB write access returned [`Poll::Pending`] on polling. - /// 3. Currently running hook with DB write access returned [`Poll::Ready`] on polling, but no - /// action to act upon. - pub(crate) fn poll_active_db_write_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - ) -> Poll> { - let Some(mut hook) = self.active_db_write_hook.take() else { return Poll::Pending }; - - match hook.poll(cx, args)? { - Poll::Ready(event) => { - let result = PolledHook { - name: hook.name(), - event, - db_access_level: hook.db_access_level(), - }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled running hook with db write access" - ); - - if result.event.is_finished() { - self.hooks.push_back(hook); - } else { - self.active_db_write_hook = Some(hook); - } - - return Poll::Ready(Ok(result)) - } - Poll::Pending => { - self.active_db_write_hook = Some(hook); - } - } - - Poll::Pending - } - - /// Polls next engine from the collection. - /// - /// Returns [`Poll::Ready`] if next hook returned an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. Next hook is [`Option::None`], i.e. taken, meaning it's currently running and has a DB - /// write access. - /// 2. Next hook needs a DB write access, but either there's another hook with DB write access - /// running, or `db_write_active` passed into arguments is `true`. - /// 3. Next hook returned [`Poll::Pending`] on polling. - /// 4. Next hook returned [`Poll::Ready`] on polling, but no action to act upon. - pub(crate) fn poll_next_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; - - let result = self.poll_next_hook_inner(cx, &mut hook, args, db_write_active); - - if matches!( - result, - Poll::Ready(Ok(PolledHook { - event: EngineHookEvent::Started, - db_access_level: EngineHookDBAccessLevel::ReadWrite, - .. - })) - ) { - // If a read-write hook started, set `active_db_write_hook` to it - self.active_db_write_hook = Some(hook); - } else { - // Otherwise, push it back to the collection of hooks to poll it next time - self.hooks.push_back(hook); - } - - result - } - - fn poll_next_hook_inner( - &self, - cx: &mut Context<'_>, - hook: &mut Box, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - // Hook with DB write access level is not allowed to run due to any of the following - // reasons: - // - An already running hook with DB write access level - // - Active DB write according to passed argument - // - Missing a finalized block number. We might be on an optimistic sync scenario where we - // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. - if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || - db_write_active || - args.finalized_block_number.is_none()) - { - return Poll::Pending - } - - if let Poll::Ready(event) = hook.poll(cx, args)? { - let result = - PolledHook { name: hook.name(), event, db_access_level: hook.db_access_level() }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled next hook" - ); - - return Poll::Ready(Ok(result)) - } - debug!(target: "consensus::engine::hooks", hook = hook.name(), "Next hook is not ready"); - - Poll::Pending - } - - /// Returns a running hook with DB write access, if there's any. - pub(crate) fn active_db_write_hook(&self) -> Option<&dyn EngineHook> { - self.active_db_write_hook.as_ref().map(|hook| hook.as_ref()) - } -} - -#[cfg(test)] -mod tests { - use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookEvent, EngineHooks, - EngineHooksController, - }; - use futures::poll; - use reth_errors::{RethError, RethResult}; - use std::{ - collections::VecDeque, - future::poll_fn, - task::{Context, Poll}, - }; - - struct TestHook { - results: VecDeque>, - name: &'static str, - access_level: EngineHookDBAccessLevel, - } - - impl TestHook { - fn new_ro(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadOnly, - } - } - fn new_rw(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadWrite, - } - } - - fn add_result(&mut self, result: RethResult) { - self.results.push_back(result); - } - } - - impl EngineHook for TestHook { - fn name(&self) -> &'static str { - self.name - } - - fn poll( - &mut self, - _cx: &mut Context<'_>, - _ctx: EngineHookContext, - ) -> Poll> { - self.results.pop_front().map_or(Poll::Pending, Poll::Ready) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - self.access_level - } - } - - #[tokio::test] - async fn poll_active_db_write_hook() { - let mut controller = EngineHooksController::new(EngineHooks::new()); - - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - // No currently running hook with DB write access is set - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Pending` on polling - controller.active_db_write_hook = Some(Box::new(TestHook::new_rw("read-write"))); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Ready` on polling, but didn't - // return `EngineHookEvent::Finished` yet. - // Currently running hooks with DB write should still be set. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Started)); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_started() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_some()); - assert!(controller.hooks.is_empty()); - - // Currently running hook with DB write access returned `Ready` on polling and - // `EngineHookEvent::Finished` inside. - // Currently running hooks with DB write should be moved to collection of hooks. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Finished(Ok(())))); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_finished() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_none()); - assert!(controller.hooks.pop_front().is_some()); - } - - #[tokio::test] - async fn poll_next_hook_db_write_active() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let mut hook_rw = TestHook::new_rw("read-write"); - hook_rw.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw); - hooks.add(hook_ro); - let mut controller = EngineHooksController::new(hooks); - - // Read-write hook can't be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert!(result.is_pending()); - assert!(controller.active_db_write_hook.is_none()); - - // Read-only hook can be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - } - - #[tokio::test] - async fn poll_next_hook_db_write_inactive() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let hook_rw_1_name = "read-write-1"; - let mut hook_rw_1 = TestHook::new_rw(hook_rw_1_name); - hook_rw_1.add_result(Ok(EngineHookEvent::Started)); - - let hook_rw_2_name = "read-write-2"; - let mut hook_rw_2 = TestHook::new_rw(hook_rw_2_name); - hook_rw_2.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - hook_ro.add_result(Err(RethError::msg("something went wrong"))); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw_1); - hooks.add(hook_rw_2); - hooks.add(hook_ro); - - let mut controller = EngineHooksController::new(hooks); - let hooks_len = controller.hooks.len(); - - // Read-write hook can be polled because external DB write is not active - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_1_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_rw_1_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert_eq!( - controller.active_db_write_hook.as_ref().map(|hook| hook.name()), - Some(hook_rw_1_name) - ); - - // Read-write hook cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook can be polled in parallel with already running read-write hook - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - - // Read-write hook still cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook has finished with error - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!(result.map(|result| { result.is_err() }), Poll::Ready(true)); - - assert!(controller.active_db_write_hook.is_some()); - assert_eq!(controller.hooks.len(), hooks_len - 1) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs deleted file mode 100644 index 828a6f968..000000000 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ /dev/null @@ -1,129 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::{RethError, RethResult}; -use std::{ - fmt, - task::{Context, Poll}, -}; - -mod controller; -pub(crate) use controller::{EngineHooksController, PolledHook}; - -mod prune; -pub use prune::PruneHook; - -mod static_file; -pub use static_file::StaticFileHook; - -/// Collection of [engine hooks][`EngineHook`]. -#[derive(Default)] -pub struct EngineHooks { - inner: Vec>, -} - -impl fmt::Debug for EngineHooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EngineHooks").field("inner", &self.inner.len()).finish() - } -} - -impl EngineHooks { - /// Creates a new empty collection of [engine hooks][`EngineHook`]. - pub fn new() -> Self { - Self { inner: Vec::new() } - } - - /// Adds a new [engine hook][`EngineHook`] to the collection. - pub fn add(&mut self, hook: H) { - self.inner.push(Box::new(hook)) - } -} - -/// Hook that will be run during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub trait EngineHook: Send + Sync + 'static { - /// Returns a human-readable name for the hook. - fn name(&self) -> &'static str; - - /// Advances the hook execution, emitting an [event][`EngineHookEvent`]. - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll>; - - /// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs. - fn db_access_level(&self) -> EngineHookDBAccessLevel; -} - -/// Engine context passed to the [hook polling function][`EngineHook::poll`]. -#[derive(Copy, Clone, Debug)] -pub struct EngineHookContext { - /// Tip block number. - pub tip_block_number: BlockNumber, - /// Finalized block number, if known. - pub finalized_block_number: Option, -} - -/// An event emitted when [hook][`EngineHook`] is polled. -#[derive(Debug)] -pub enum EngineHookEvent { - /// Hook is not ready. - /// - /// If this is returned, the hook is idle. - NotReady, - /// Hook started. - /// - /// If this is returned, the hook is running. - Started, - /// Hook finished. - /// - /// If this is returned, the hook is idle. - Finished(Result<(), EngineHookError>), -} - -impl EngineHookEvent { - /// Returns `true` if the event is [`EngineHookEvent::Started`]. - pub const fn is_started(&self) -> bool { - matches!(self, Self::Started) - } - - /// Returns `true` if the event is [`EngineHookEvent::Finished`]. - pub const fn is_finished(&self) -> bool { - matches!(self, Self::Finished(_)) - } -} - -/// An error returned by [hook][`EngineHook`]. -#[derive(Debug, thiserror::Error)] -pub enum EngineHookError { - /// Hook channel closed. - #[error("hook channel closed")] - ChannelClosed, - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), - /// An internal error occurred. - #[error(transparent)] - Internal(#[from] Box), -} - -/// Level of database access the hook needs for execution. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum EngineHookDBAccessLevel { - /// Read-only database access. - ReadOnly, - /// Read-write database access. - ReadWrite, -} - -impl EngineHookDBAccessLevel { - /// Returns `true` if the hook needs read-only access to the database. - pub const fn is_read_only(&self) -> bool { - matches!(self, Self::ReadOnly) - } - - /// Returns `true` if the hook needs read-write access to the database. - pub const fn is_read_write(&self) -> bool { - matches!(self, Self::ReadWrite) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs deleted file mode 100644 index 409fc98b8..000000000 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ /dev/null @@ -1,203 +0,0 @@ -//! Prune hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use metrics::Counter; -use reth_errors::{RethError, RethResult}; -use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter}; -use reth_prune::{Pruner, PrunerError, PrunerWithResult}; -use reth_tasks::TaskSpawner; -use std::{ - fmt::{self, Debug}, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; - -/// Manages pruning under the control of the engine. -/// -/// This type controls the [Pruner]. -pub struct PruneHook { - /// The current state of the pruner. - pruner_state: PrunerState, - /// The type that can spawn the pruner task. - pruner_task_spawner: Box, - metrics: Metrics, -} - -impl fmt::Debug for PruneHook -where - PF: DatabaseProviderFactory + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PruneHook") - .field("pruner_state", &self.pruner_state) - .field("metrics", &self.metrics) - .finish() - } -} - -impl PruneHook { - /// Create a new instance - pub fn new( - pruner: Pruner, - pruner_task_spawner: Box, - ) -> Self { - Self { - pruner_state: PrunerState::Idle(Some(pruner)), - pruner_task_spawner, - metrics: Metrics::default(), - } - } - - /// Advances the pruner state. - /// - /// This checks for the result in the channel, or returns pending if the pruner is idle. - fn poll_pruner(&mut self, cx: &mut Context<'_>) -> Poll> { - let result = match self.pruner_state { - PrunerState::Idle(_) => return Poll::Pending, - PrunerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((pruner, result)) => { - self.pruner_state = PrunerState::Idle(Some(pruner)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(err.into())), - } - } - Err(_) => { - // failed to receive the pruner - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } -} - -impl PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - /// This will try to spawn the pruner if it is idle: - /// 1. Check if pruning is needed through [`Pruner::is_pruning_needed`]. - /// - /// 2.1. If pruning is needed, pass tip block number to the [`Pruner::run`] and spawn it in a - /// separate task. Set pruner state to [`PrunerState::Running`]. - /// 2.2. If pruning is not needed, set pruner state back to [`PrunerState::Idle`]. - /// - /// If pruner is already running, do nothing. - fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { - match &mut self.pruner_state { - PrunerState::Idle(pruner) => { - let mut pruner = pruner.take()?; - - // Check tip for pruning - if pruner.is_pruning_needed(tip_block_number) { - let (tx, rx) = oneshot::channel(); - self.pruner_task_spawner.spawn_critical_blocking( - "pruner task", - Box::pin(async move { - let result = pruner.run(tip_block_number); - let _ = tx.send((pruner, result)); - }), - ); - self.metrics.runs_total.increment(1); - self.pruner_state = PrunerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.pruner_state = PrunerState::Idle(Some(pruner)); - Some(EngineHookEvent::NotReady) - } - } - PrunerState::Running(_) => None, - } - } -} - -impl EngineHook for PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - fn name(&self) -> &'static str { - "Prune" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - // Try to spawn a pruner - match self.try_spawn_pruner(ctx.tip_block_number) { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll pruner and check its status - self.poll_pruner(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadWrite - } -} - -/// The possible pruner states within the sync controller. -/// -/// [`PrunerState::Idle`] means that the pruner is currently idle. -/// [`PrunerState::Running`] means that the pruner is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pruner is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PrunerState { - /// Pruner is idle. - Idle(Option>), - /// Pruner is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl fmt::Debug for PrunerState -where - PF: DatabaseProviderFactory + Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Idle(f0) => f.debug_tuple("Idle").field(&f0).finish(), - Self::Running(f0) => f.debug_tuple("Running").field(&f0).finish(), - } - } -} - -#[derive(reth_metrics::Metrics)] -#[metrics(scope = "consensus.engine.prune")] -struct Metrics { - /// The number of times the pruner was run. - runs_total: Counter, -} - -impl From for EngineHookError { - fn from(err: PrunerError) -> Self { - match err { - PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - Self::Internal(Box::new(err)) - } - PrunerError::Database(err) => RethError::Database(err).into(), - PrunerError::Provider(err) => RethError::Provider(err).into(), - } - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs deleted file mode 100644 index 99387492c..000000000 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! `StaticFile` hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use reth_codecs::Compact; -use reth_db_api::table::Value; -use reth_errors::RethResult; -use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; -use reth_provider::{ - BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, -}; -use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; -use reth_tasks::TaskSpawner; -use std::task::{ready, Context, Poll}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages producing static files under the control of the engine. -/// -/// This type controls the [`StaticFileProducer`]. -#[derive(Debug)] -pub struct StaticFileHook { - /// The current state of the `static_file_producer`. - state: StaticFileProducerState, - /// The type that can spawn the `static_file_producer` task. - task_spawner: Box, -} - -impl StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - /// Create a new instance - pub fn new( - static_file_producer: StaticFileProducer, - task_spawner: Box, - ) -> Self { - Self { state: StaticFileProducerState::Idle(Some(static_file_producer)), task_spawner } - } - - /// Advances the `static_file_producer` state. - /// - /// This checks for the result in the channel, or returns pending if the `static_file_producer` - /// is idle. - fn poll_static_file_producer( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - let result = match self.state { - StaticFileProducerState::Idle(_) => return Poll::Pending, - StaticFileProducerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((static_file_producer, result)) => { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(EngineHookError::Common(err.into()))), - } - } - Err(_) => { - // failed to receive the static_file_producer - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } - - /// This will try to spawn the `static_file_producer` if it is idle: - /// 1. Check if producing static files is needed through - /// [`StaticFileProducer::get_static_file_targets`](reth_static_file::StaticFileProducerInner::get_static_file_targets) - /// and then [`StaticFileTargets::any`](reth_static_file::StaticFileTargets::any). - /// - /// 2.1. If producing static files is needed, pass static file request to the - /// [`StaticFileProducer::run`](reth_static_file::StaticFileProducerInner::run) and - /// spawn it in a separate task. Set static file producer state to - /// [`StaticFileProducerState::Running`]. - /// 2.2. If producing static files is not needed, set static file producer state back to - /// [`StaticFileProducerState::Idle`]. - /// - /// If `static_file_producer` is already running, do nothing. - fn try_spawn_static_file_producer( - &mut self, - finalized_block_number: BlockNumber, - ) -> RethResult> { - Ok(match &mut self.state { - StaticFileProducerState::Idle(static_file_producer) => { - let Some(static_file_producer) = static_file_producer.take() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer is already running but the state is idle"); - return Ok(None) - }; - - let Some(locked_static_file_producer) = static_file_producer.try_lock_arc() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer lock is already taken"); - return Ok(None) - }; - - let finalized_block_number = locked_static_file_producer - .last_finalized_block()? - .map(|on_disk| finalized_block_number.min(on_disk)) - .unwrap_or(finalized_block_number); - - let targets = - locked_static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: Some(finalized_block_number), - receipts: Some(finalized_block_number), - transactions: Some(finalized_block_number), - })?; - - // Check if the moving data to static files has been requested. - if targets.any() { - let (tx, rx) = oneshot::channel(); - self.task_spawner.spawn_critical_blocking( - "static_file_producer task", - Box::pin(async move { - let result = locked_static_file_producer.run(targets); - let _ = tx.send((static_file_producer, result)); - }), - ); - self.state = StaticFileProducerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - Some(EngineHookEvent::NotReady) - } - } - StaticFileProducerState::Running(_) => None, - }) - } -} - -impl EngineHook for StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - fn name(&self) -> &'static str { - "StaticFile" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - let Some(finalized_block_number) = ctx.finalized_block_number else { - trace!(target: "consensus::engine::hooks::static_file", ?ctx, "Finalized block number is not available"); - return Poll::Pending - }; - - // Try to spawn a static_file_producer - match self.try_spawn_static_file_producer(finalized_block_number)? { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll static_file_producer and check its status - self.poll_static_file_producer(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadOnly - } -} - -/// The possible `static_file_producer` states within the sync controller. -/// -/// [`StaticFileProducerState::Idle`] means that the static file producer is currently idle. -/// [`StaticFileProducerState::Running`] means that the static file producer is currently running. -#[derive(Debug)] -enum StaticFileProducerState { - /// [`StaticFileProducer`] is idle. - Idle(Option>), - /// [`StaticFileProducer`] is running and waiting for a response - Running(oneshot::Receiver>), -} diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs deleted file mode 100644 index 384820ca9..000000000 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ /dev/null @@ -1,125 +0,0 @@ -use alloy_eips::eip1898::BlockWithParent; -use alloy_primitives::B256; -use reth_metrics::{ - metrics::{Counter, Gauge}, - Metrics, -}; -use schnellru::{ByLength, LruMap}; -use std::fmt::Debug; -use tracing::warn; - -/// The max hit counter for invalid headers in the cache before it is forcefully evicted. -/// -/// In other words, if a header is referenced more than this number of times, it will be evicted to -/// allow for reprocessing. -const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; - -/// Keeps track of invalid headers. -#[derive(Debug)] -pub struct InvalidHeaderCache { - /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap, - /// Metrics for the cache. - metrics: InvalidHeaderCacheMetrics, -} - -impl InvalidHeaderCache { - /// Invalid header cache constructor. - pub fn new(max_length: u32) -> Self { - Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } - } - - fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { - self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); - } - - /// Returns the invalid ancestor's header if it exists in the cache. - /// - /// If this is called, the hit count for the entry is incremented. - /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option { - { - let entry = self.headers.get(hash)?; - entry.hit_count += 1; - if entry.hit_count < INVALID_HEADER_HIT_EVICTION_THRESHOLD { - return Some(entry.header) - } - } - // if we get here, the entry has been hit too many times, so we evict it - self.headers.remove(hash); - self.metrics.hit_evictions.increment(1); - None - } - - /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( - &mut self, - header_hash: B256, - invalid_ancestor: BlockWithParent, - ) { - if self.get(&header_hash).is_none() { - warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); - self.insert_entry(header_hash, invalid_ancestor); - - // update metrics - self.metrics.known_ancestor_inserts.increment(1); - self.metrics.count.set(self.headers.len() as f64); - } - } - - /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { - if self.get(&invalid_ancestor.block.hash).is_none() { - warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); - self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); - - // update metrics - self.metrics.unique_inserts.increment(1); - self.metrics.count.set(self.headers.len() as f64); - } - } -} - -struct HeaderEntry { - /// Keeps track how many times this header has been hit. - hit_count: u8, - /// The actual header entry - header: BlockWithParent, -} - -/// Metrics for the invalid headers cache. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon.invalid_headers")] -struct InvalidHeaderCacheMetrics { - /// The total number of invalid headers in the cache. - count: Gauge, - /// The number of inserts with a known ancestor. - known_ancestor_inserts: Counter, - /// The number of unique invalid header inserts (i.e. without a known ancestor). - unique_inserts: Counter, - /// The number of times a header was evicted from the cache because it was hit too many times. - hit_evictions: Counter, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use reth_primitives::SealedHeader; - - #[test] - fn test_hit_eviction() { - let mut cache = InvalidHeaderCache::new(10); - let header = Header::default(); - let header = SealedHeader::seal(header); - cache.insert(header.block_with_parent()); - assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); - - for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { - assert!(cache.get(&header.hash()).is_some()); - assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, hit); - } - - assert!(cache.get(&header.hash()).is_none()); - } -} diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs deleted file mode 100644 index 67bae71be..000000000 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use reth_metrics::{ - metrics::{Counter, Gauge, Histogram}, - Metrics, -}; - -/// Beacon consensus engine metrics. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineMetrics { - /// The number of times the pipeline was run. - pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// Latency for making canonical already canonical block - pub(crate) make_canonical_already_canonical_latency: Histogram, - /// Latency for making canonical committed block - pub(crate) make_canonical_committed_latency: Histogram, - /// Latency for making canonical returns error - pub(crate) make_canonical_error_latency: Histogram, - /// Latency for all making canonical results - pub(crate) make_canonical_latency: Histogram, -} - -/// Metrics for the `EngineSyncController`. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineSyncMetrics { - /// How many blocks are currently being downloaded. - pub(crate) active_block_downloads: Gauge, -} diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs deleted file mode 100644 index 0412e9877..000000000 --- a/crates/consensus/beacon/src/engine/mod.rs +++ /dev/null @@ -1,2961 +0,0 @@ -use alloy_consensus::{BlockHeader, Header}; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, -}; -use futures::{stream::BoxStream, Future, StreamExt}; -use itertools::Either; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, -}; -use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, - PayloadTypes, -}; -use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState}, - EthBlockClient, -}; -use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_builder_primitives::PayloadBuilder; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, SealedBlock, SealedHeader}; -use reth_provider::{ - providers::{ProviderNodeTypes, TreeNodeTypes}, - BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, - StageCheckpointReader, -}; -use reth_stages_api::{ControlFlow, Pipeline, PipelineTarget, StageId}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use tokio::sync::{ - mpsc::{self, UnboundedSender}, - oneshot, -}; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::*; - -mod error; -pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; - -mod invalid_headers; -pub use invalid_headers::InvalidHeaderCache; - -pub use reth_engine_primitives::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; - -mod handle; -pub use handle::BeaconConsensusEngineHandle; - -mod metrics; -use metrics::EngineMetrics; - -pub mod sync; -use sync::{EngineSyncController, EngineSyncEvent}; - -/// Hooks for running during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub mod hooks; -use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; - -#[cfg(test)] -pub mod test_utils; - -/// The maximum number of invalid headers that can be tracked by the engine. -const MAX_INVALID_HEADERS: u32 = 512u32; - -/// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` -/// for more information. -/// -/// This is the default threshold, the distance to the head that the tree will be used for sync. -/// If the distance exceeds this threshold, the pipeline will be used for sync. -pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; - -/// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} - -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} - -/// Represents a pending forkchoice update. -/// -/// This type encapsulates the necessary components for a pending forkchoice update -/// in the context of a beacon consensus engine. -/// -/// It consists of: -/// - The current fork choice state. -/// - Optional payload attributes specific to the engine type. -/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update. -type PendingForkchoiceUpdate = - (ForkchoiceState, Option, oneshot::Sender>); - -/// The beacon consensus engine is the driver that switches between historical and live sync. -/// -/// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are -/// received by Engine API (JSON-RPC). -/// -/// The consensus engine is idle until it receives the first -/// [`BeaconEngineMessage::ForkchoiceUpdated`] message from the CL which would initiate the sync. At -/// first, the consensus engine would run the [Pipeline] until the latest known block hash. -/// Afterward, it would attempt to create/restore the [`BlockchainTreeEngine`] from the blocks -/// that are currently available. In case the restoration is successful, the consensus engine would -/// run in a live sync mode, populating the [`BlockchainTreeEngine`] with new blocks as they arrive -/// via engine API and downloading any missing blocks from the network to fill potential gaps. -/// -/// The consensus engine has two data input sources: -/// -/// ## New Payload (`engine_newPayloadV{}`) -/// -/// The engine receives new payloads from the CL. If the payload is connected to the canonical -/// chain, it will be fully validated added to a chain in the [`BlockchainTreeEngine`]: `VALID` -/// -/// If the payload's chain is disconnected (at least 1 block is missing) then it will be buffered: -/// `SYNCING` ([`BlockStatus::Disconnected`]). -/// -/// ## Forkchoice Update (FCU) (`engine_forkchoiceUpdatedV{}`) -/// -/// This contains the latest forkchoice state and the payload attributes. The engine will attempt to -/// make a new canonical chain based on the `head_hash` of the update and trigger payload building -/// if the `payload_attrs` are present and the FCU is `VALID`. -/// -/// The `head_hash` forms a chain by walking backwards from the `head_hash` towards the canonical -/// blocks of the chain. -/// -/// Making a new canonical chain can result in the following relevant outcomes: -/// -/// ### The chain is connected -/// -/// All blocks of the `head_hash`'s chain are present in the [`BlockchainTreeEngine`] and are -/// committed to the canonical chain. This also includes reorgs. -/// -/// ### The chain is disconnected -/// -/// In this case the [`BlockchainTreeEngine`] doesn't know how the new chain connects to the -/// existing canonical chain. It could be a simple commit (new blocks extend the current head) or a -/// re-org that requires unwinding the canonical chain. -/// -/// This further distinguishes between two variants: -/// -/// #### `head_hash`'s block exists -/// -/// The `head_hash`'s block was already received/downloaded, but at least one block is missing to -/// form a _connected_ chain. The engine will attempt to download the missing blocks from the -/// network by walking backwards (`parent_hash`), and then try to make the block canonical as soon -/// as the chain becomes connected. -/// -/// However, it still can be the case that the chain and the FCU is `INVALID`. -/// -/// #### `head_hash` block is missing -/// -/// This is similar to the previous case, but the `head_hash`'s block is missing. At which point the -/// engine doesn't know where the new head will point to: new chain could be a re-org or a simple -/// commit. The engine will download the missing head first and then proceed as in the previous -/// case. -/// -/// # Panics -/// -/// If the future is polled more than once. Leads to undefined state. -/// -/// Note: soon deprecated. See `reth_engine_service::EngineService`. -#[must_use = "Future does nothing unless polled"] -#[allow(missing_debug_implementations)] -pub struct BeaconConsensusEngine -where - N: EngineNodeTypes, - Client: EthBlockClient, - BT: BlockchainTreeEngine - + BlockReader - + BlockIdReader - + CanonChainTracker - + StageCheckpointReader, -{ - /// Controls syncing triggered by engine updates. - sync: EngineSyncController, - /// The type we can use to query both the database and the blockchain tree. - blockchain: BT, - /// Used for emitting updates about whether the engine is syncing or not. - sync_state_updater: Box, - /// The Engine API message receiver. - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - /// A clone of the handle - handle: BeaconConsensusEngineHandle, - /// Tracks the received forkchoice state updates received by the CL. - forkchoice_state_tracker: ForkchoiceStateTracker, - /// The payload store. - payload_builder: PayloadBuilderHandle, - /// Validator for execution payloads - payload_validator: ExecutionPayloadValidator, - /// Current blockchain tree action. - blockchain_tree_action: Option>, - /// Pending forkchoice update. - /// It is recorded if we cannot process the forkchoice update because - /// a hook with database read-write access is active. - /// This is a temporary solution to always process missed FCUs. - pending_forkchoice_update: - Option::PayloadAttributes>>, - /// Tracks the header of invalid payloads that were rejected by the engine because they're - /// invalid. - invalid_headers: InvalidHeaderCache, - /// After downloading a block corresponding to a recent forkchoice update, the engine will - /// check whether or not we can connect the block to the current canonical chain. If we can't, - /// we need to download and execute the missing parents of that block. - /// - /// When the block can't be connected, its block number will be compared to the canonical head, - /// resulting in a heuristic for the number of missing blocks, or the size of the gap between - /// the new block and the canonical head. - /// - /// If the gap is larger than this threshold, the engine will download and execute the missing - /// blocks using the pipeline. Otherwise, the engine, sync controller, and blockchain tree will - /// be used to download and execute the missing blocks. - pipeline_run_threshold: u64, - hooks: EngineHooksController, - /// Sender for engine events. - event_sender: EventSender, - /// Consensus engine metrics. - metrics: EngineMetrics, -} - -impl BeaconConsensusEngine -where - N: TreeNodeTypes, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + 'static, - Client: EthBlockClient + 'static, -{ - /// Create a new instance of the [`BeaconConsensusEngine`]. - #[allow(clippy::too_many_arguments)] - pub fn new( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let (to_engine, rx) = mpsc::unbounded_channel(); - Self::with_channel( - client, - pipeline, - blockchain, - task_spawner, - sync_state_updater, - max_block, - payload_builder, - target, - pipeline_run_threshold, - to_engine, - Box::pin(UnboundedReceiverStream::from(rx)), - hooks, - ) - } - - /// Create a new instance of the [`BeaconConsensusEngine`] using the given channel to configure - /// the [`BeaconEngineMessage`] communication channel. - /// - /// By default the engine is started with idle pipeline. - /// The pipeline can be launched immediately in one of the following ways descending in - /// priority: - /// - Explicit [`Option::Some`] target block hash provided via a constructor argument. - /// - The process was previously interrupted amidst the pipeline run. This is checked by - /// comparing the checkpoints of the first ([`StageId::Headers`]) and last - /// ([`StageId::Finish`]) stages. In this case, the latest available header in the database is - /// used as the target. - /// - /// Propagates any database related error. - #[allow(clippy::too_many_arguments)] - pub fn with_channel( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - to_engine: UnboundedSender>, - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let event_sender = EventSender::default(); - let handle = BeaconConsensusEngineHandle::new(to_engine); - let sync = EngineSyncController::new( - pipeline, - client, - task_spawner.clone(), - max_block, - blockchain.chain_spec(), - event_sender.clone(), - ); - let mut this = Self { - sync, - payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), - blockchain, - sync_state_updater, - engine_message_stream, - handle: handle.clone(), - forkchoice_state_tracker: Default::default(), - payload_builder, - invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - blockchain_tree_action: None, - pending_forkchoice_update: None, - pipeline_run_threshold, - hooks: EngineHooksController::new(hooks), - event_sender, - metrics: EngineMetrics::default(), - }; - - let maybe_pipeline_target = match target { - // Provided target always takes precedence. - target @ Some(_) => target, - None => this.check_pipeline_consistency()?, - }; - - if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target.into()); - } - - Ok((this, handle)) - } - - /// Returns current [`EngineHookContext`] that's used for polling engine hooks. - fn current_engine_hook_context(&self) -> RethResult { - Ok(EngineHookContext { - tip_block_number: self.blockchain.canonical_tip().number, - finalized_block_number: self - .blockchain - .finalized_block_number() - .map_err(RethError::Provider)?, - }) - } - - /// Set the next blockchain tree action. - fn set_blockchain_tree_action(&mut self, action: BlockchainTreeAction) { - let previous_action = self.blockchain_tree_action.replace(action); - debug_assert!(previous_action.is_none(), "Pre-existing action found"); - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if self.sync.is_pipeline_active() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - - /// Process the result of attempting to make forkchoice state head hash canonical. - /// - /// # Returns - /// - /// A forkchoice state update outcome or fatal error. - fn on_forkchoice_updated_make_canonical_result( - &mut self, - state: ForkchoiceState, - mut attrs: Option<::PayloadAttributes>, - make_canonical_result: Result, - elapsed: Duration, - ) -> Result { - match make_canonical_result { - Ok(outcome) => { - let should_update_head = match &outcome { - CanonicalOutcome::AlreadyCanonical { head, header } => { - self.on_head_already_canonical(head, header, &mut attrs) - } - CanonicalOutcome::Committed { head } => { - // new VALID update that moved the canonical chain forward - debug!(target: "consensus::engine", hash=?state.head_block_hash, number=head.number, "Canonicalized new head"); - true - } - }; - - if should_update_head { - let head = outcome.header(); - let _ = self.update_head(head.clone()); - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - // Validate that the forkchoice state is consistent. - let on_updated = if let Some(invalid_fcu_response) = - self.ensure_consistent_forkchoice_state(state)? - { - trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent"); - invalid_fcu_response - } else if let Some(attrs) = attrs { - // the CL requested to build a new payload on top of this new VALID head - let head = outcome.into_header().unseal(); - self.process_payload_attributes( - attrs, - head, - state, - EngineApiMessageVersion::default(), - ) - } else { - OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(state.head_block_hash), - )) - }; - Ok(on_updated) - } - Err(err) => { - if err.is_fatal() { - error!(target: "consensus::engine", %err, "Encountered fatal error"); - Err(err) - } else { - Ok(OnForkChoiceUpdated::valid( - self.on_failed_canonical_forkchoice_update(&state, err)?, - )) - } - } - } - } - - /// Invoked when head hash references a `VALID` block that is already canonical. - /// - /// Returns `true` if the head needs to be updated. - fn on_head_already_canonical( - &self, - head: &BlockNumHash, - header: &SealedHeader, - attrs: &mut Option<::PayloadAttributes>, - ) -> bool { - // On Optimism, the proposers are allowed to reorg their own chain at will. - #[cfg(feature = "optimism")] - if reth_chainspec::EthChainSpec::is_optimism(&self.blockchain.chain_spec()) { - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "[Optimism] Allowing beacon reorg to old head" - ); - return true - } - - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` ancestor - // of the head of canonical chain, i.e. the ancestor passed payload validation process - // and deemed `VALID`. In the case of such an event, client software MUST return - // `{payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, - // validationError: null}, payloadId: null}` - if head != &header.num_hash() { - attrs.take(); - } - - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "Ignoring beacon update to old head" - ); - false - } - - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree - /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid - /// chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - tx: oneshot::Sender>, - ) { - self.metrics.forkchoice_updated_messages.increment(1); - self.blockchain.on_forkchoice_update_received(&state); - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - match self.pre_validate_forkchoice_update(state) { - Ok(on_updated_result) => { - if let Some(on_updated) = on_updated_result { - // Pre-validate forkchoice state update and return if it's invalid - // or cannot be processed at the moment. - self.on_forkchoice_updated_status(state, on_updated, tx); - } else if let Some(hook) = self.hooks.active_db_write_hook() { - // We can only process new forkchoice updates if no hook with db write is - // running, since it requires exclusive access to the - // database - let replaced_pending = - self.pending_forkchoice_update.replace((state, attrs, tx)); - warn!( - target: "consensus::engine", - hook = %hook.name(), - head_block_hash = ?state.head_block_hash, - safe_block_hash = ?state.safe_block_hash, - finalized_block_hash = ?state.finalized_block_hash, - replaced_pending = ?replaced_pending.map(|(state, _, _)| state), - "Hook is in progress, delaying forkchoice update. \ - This may affect the performance of your node as a validator." - ); - } else { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - } - } - Err(error) => { - let _ = tx.send(Err(error.into())); - } - } - } - - /// Called after the forkchoice update status has been resolved. - /// Depending on the outcome, the method updates the sync state and notifies the listeners - /// about new processed FCU. - fn on_forkchoice_updated_status( - &mut self, - state: ForkchoiceState, - on_updated: OnForkChoiceUpdated, - tx: oneshot::Sender>, - ) { - // send the response to the CL ASAP - let status = on_updated.forkchoice_status(); - let _ = tx.send(Ok(on_updated)); - - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, status); - - match status { - ForkchoiceStatus::Invalid => {} - ForkchoiceStatus::Valid => { - // FCU head is valid, we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - // node's fully synced, clear active download requests - self.sync.clear_block_download_requests(); - } - ForkchoiceStatus::Syncing => { - // we're syncing - self.sync_state_updater.update_sync_state(SyncState::Syncing); - } - } - - // notify listeners about new processed FCU - self.event_sender.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); - } - - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - fn check_pipeline_consistency(&self) -> RethResult> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = - self.blockchain.get_stage_checkpoint(*stage_id)?.unwrap_or_default().block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return Ok(self.blockchain.block_hash(first_stage_checkpoint)?) - } - } - - Ok(None) - } - - /// Returns a new [`BeaconConsensusEngineHandle`] that can be cloned and shared. - /// - /// The [`BeaconConsensusEngineHandle`] can be used to interact with this - /// [`BeaconConsensusEngine`] - pub fn handle(&self) -> BeaconConsensusEngineHandle { - self.handle.clone() - } - - /// Returns true if the distance from the local tip to the block is greater than the configured - /// threshold. - /// - /// If the `local_tip` is greater than the `block`, then this will return false. - #[inline] - const fn exceeds_pipeline_run_threshold(&self, local_tip: u64, block: u64) -> bool { - block > local_tip && block - local_tip > self.pipeline_run_threshold - } - - /// Returns the finalized hash to sync to if the distance from the local tip to the block is - /// greater than the configured threshold and we're not synced to the finalized block yet - /// yet (if we've seen that block already). - /// - /// If this is invoked after a new block has been downloaded, the downloaded block could be the - /// (missing) finalized block. - fn can_pipeline_sync_to_finalized( - &self, - canonical_tip_num: u64, - target_block_number: u64, - downloaded_block: Option, - ) -> Option { - let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); - - // check if the distance exceeds the threshold for pipeline sync - let mut exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, target_block_number); - - // check if the downloaded block is the tracked finalized block - if let Some(ref buffered_finalized) = sync_target_state - .as_ref() - .and_then(|state| self.blockchain.buffered_header_by_hash(state.finalized_block_hash)) - { - // if we have buffered the finalized block, we should check how far - // we're off - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); - } - - // If this is invoked after we downloaded a block we can check if this block is the - // finalized block - if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { - if downloaded_block.hash == state.finalized_block_hash { - // we downloaded the finalized block - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); - } - } - - // if the number of missing blocks is greater than the max, run the - // pipeline - if exceeds_pipeline_run_threshold { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should - // skip the pipeline run - match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); - } - Ok(None) => { - // ensure the finalized block is known (not the zero hash) - if !state.finalized_block_hash.is_zero() { - // we don't have the block yet and the distance exceeds the allowed - // threshold - return Some(state.finalized_block_hash) - } - - // OPTIMISTIC SYNCING - // - // It can happen when the node is doing an - // optimistic sync, where the CL has no knowledge of the finalized hash, - // but is expecting the EL to sync as high - // as possible before finalizing. - // - // This usually doesn't happen on ETH mainnet since CLs use the more - // secure checkpoint syncing. - // - // However, optimism chains will do this. The risk of a reorg is however - // low. - debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); - return Some(state.head_block_hash) - } - Ok(Some(_)) => { - // we're fully synced to the finalized block - // but we want to continue downloading the missing parent - } - } - } - } - - None - } - - /// Returns how far the local tip is from the given block. If the local tip is at the same - /// height or its block number is greater than the given block, this returns None. - #[inline] - const fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { - if block > local_tip { - Some(block - local_tip) - } else { - None - } - } - - /// If validation fails, the response MUST contain the latest valid hash: - /// - /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: - /// - It is fully validated and deemed VALID - /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID - /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a `PoW` block. - /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. - fn latest_valid_hash_for_invalid_payload( - &mut self, - parent_hash: B256, - ) -> ProviderResult> { - // Check if parent exists in side chain or in canonical chain. - if self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any)?.is_some() { - return Ok(Some(parent_hash)) - } - - // iterate over ancestors in the invalid cache - // until we encounter the first valid ancestor - let mut current_hash = parent_hash; - let mut current_block = self.invalid_headers.get(¤t_hash); - while let Some(block) = current_block { - current_hash = block.parent; - current_block = self.invalid_headers.get(¤t_hash); - - // If current_header is None, then the current_hash does not have an invalid - // ancestor in the cache, check its presence in blockchain tree - if current_block.is_none() && - self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() - { - return Ok(Some(current_hash)) - } - } - Ok(None) - } - - /// Prepares the invalid payload response for the given hash, checking the - /// database for the parent hash and populating the payload status with the latest valid hash - /// according to the engine api spec. - fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { - // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal - // PoW block, which we need to identify by looking at the parent's block difficulty - if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { - if !parent.is_zero_difficulty() { - parent_hash = B256::ZERO; - } - } - - let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), - }) - .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) - } - - /// Checks if the given `check` hash points to an invalid header, inserting the given `head` - /// block into the invalid header cache if the `check` hash has a known invalid ancestor. - /// - /// Returns a payload status response according to the engine API spec if the block is known to - /// be invalid. - fn check_invalid_ancestor_with_head( - &mut self, - check: B256, - head: B256, - ) -> ProviderResult> { - // check if the check hash was previously marked as invalid - let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; - - // populate the latest valid hash field - let status = self.prepare_invalid_response(block.parent)?; - - // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, block); - - Ok(Some(status)) - } - - /// Checks if the given `head` points to an invalid header, which requires a specific response - /// to a forkchoice update. - fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { - // check if the head was previously marked as invalid - let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; - - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(block.parent)?)) - } - - /// Record latency metrics for one call to make a block canonical - /// Takes start time of the call and result of the make canonical call - /// - /// Handles cases for error, already canonical and committed blocks - fn record_make_canonical_latency( - &self, - start: Instant, - outcome: &Result, - ) -> Duration { - let elapsed = start.elapsed(); - self.metrics.make_canonical_latency.record(elapsed); - match outcome { - Ok(CanonicalOutcome::AlreadyCanonical { .. }) => { - self.metrics.make_canonical_already_canonical_latency.record(elapsed) - } - Ok(CanonicalOutcome::Committed { .. }) => { - self.metrics.make_canonical_committed_latency.record(elapsed) - } - Err(_) => self.metrics.make_canonical_error_latency.record(elapsed), - } - elapsed - } - - /// Ensures that the given forkchoice state is consistent, assuming the head block has been - /// made canonical. - /// - /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will - /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. - /// - /// This also updates the safe and finalized blocks in the [`CanonChainTracker`], if they are - /// consistent with the head block. - fn ensure_consistent_forkchoice_state( - &self, - state: ForkchoiceState, - ) -> ProviderResult> { - // Ensure that the finalized block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the finalized block is consistent with the head block, i.e. the - // finalized block is an ancestor of the head block. - if !state.finalized_block_hash.is_zero() && - !self.blockchain.is_canonical(state.finalized_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Finalized block is consistent, so update it in the canon chain tracker. - self.update_finalized_block(state.finalized_block_hash)?; - - // Also ensure that the safe block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the safe block is consistent with the head block, i.e. the safe - // block is an ancestor of the head block. - if !state.safe_block_hash.is_zero() && - !self.blockchain.is_canonical(state.safe_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Safe block is consistent, so update it in the canon chain tracker. - self.update_safe_block(state.safe_block_hash)?; - - Ok(None) - } - - /// Sets the state of the canon chain tracker based to the given head. - /// - /// This expects the given head to be the new canonical head. - /// - /// Additionally, updates the head used for p2p handshakes. - /// - /// This also updates the tracked safe and finalized blocks, and should be called before - /// returning a VALID forkchoice update response - fn update_canon_chain(&self, head: SealedHeader, update: &ForkchoiceState) -> RethResult<()> { - self.update_head(head)?; - self.update_finalized_block(update.finalized_block_hash)?; - self.update_safe_block(update.safe_block_hash)?; - Ok(()) - } - - /// Updates the state of the canon chain tracker based on the given head. - /// - /// This expects the given head to be the new canonical head. - /// Additionally, updates the head used for p2p handshakes. - /// - /// This should be called before returning a VALID forkchoice update response - #[inline] - fn update_head(&self, head: SealedHeader) -> RethResult<()> { - let mut head_block = Head { - number: head.number, - hash: head.hash(), - difficulty: head.difficulty, - timestamp: head.timestamp, - // NOTE: this will be set later - total_difficulty: Default::default(), - }; - - // we update the tracked header first - self.blockchain.set_canonical_head(head); - - head_block.total_difficulty = - self.blockchain.header_td_by_number(head_block.number)?.ok_or_else(|| { - RethError::Provider(ProviderError::TotalDifficultyNotFound(head_block.number)) - })?; - self.sync_state_updater.update_status(head_block); - - Ok(()) - } - - /// Updates the tracked safe block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_safe_block(&self, safe_block_hash: B256) -> ProviderResult<()> { - if !safe_block_hash.is_zero() { - if self.blockchain.safe_block_hash()? == Some(safe_block_hash) { - // nothing to update - return Ok(()) - } - - let safe = self - .blockchain - .find_block_by_hash(safe_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); - } - Ok(()) - } - - /// Updates the tracked finalized block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_finalized_block(&self, finalized_block_hash: B256) -> ProviderResult<()> { - if !finalized_block_hash.is_zero() { - if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) { - // nothing to update - return Ok(()) - } - - let finalized = self - .blockchain - .find_block_by_hash(finalized_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.header().number())?; - self.blockchain - .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); - } - Ok(()) - } - - /// Handler for a failed a forkchoice update due to a canonicalization error. - /// - /// This will determine if the state's head is invalid, and if so, return immediately. - /// - /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap - /// - /// See [`Self::on_forkchoice_updated`] and [`BlockchainTreeEngine::make_canonical`]. - fn on_failed_canonical_forkchoice_update( - &mut self, - state: &ForkchoiceState, - error: CanonicalError, - ) -> ProviderResult { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - // check if the new head was previously invalidated, if so then we deem this FCU - // as invalid - if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash)? { - warn!(target: "consensus::engine", %error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid"); - debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=%error, "Head was previously marked as invalid"); - return Ok(invalid_ancestor) - } - - match &error { - CanonicalError::Validation(BlockValidationError::BlockPreMerge { .. }) => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }) - .with_latest_valid_hash(B256::ZERO)) - } - CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { - .. - }) => { - // This just means we couldn't find the block when attempting to make it canonical, - // so we should not warn the user, since this will result in us attempting to sync - // to a new target and is considered normal operation during sync - } - CanonicalError::OptimisticTargetRevert(block_number) => { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - _ => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - // TODO(mattsse) better error handling before attempting to sync (FCU could be - // invalid): only trigger sync if we can't determine whether the FCU is invalid - } - } - - // we assume the FCU is valid and at least the head is missing, - // so we need to start syncing to it - // - // find the appropriate target to sync to, if we don't have the safe block hash then we - // start syncing to the safe block via pipeline first - let target = if self.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.blockchain.block_number(state.safe_block_hash).ok().flatten().is_none() - { - state.safe_block_hash - } else { - state.head_block_hash - }; - - // we need to first check the buffer for the target and its ancestors - let target = self.lowest_buffered_ancestor_or(target); - - // if the threshold is zero, we should not download the block first, and just use the - // pipeline. Otherwise we use the tree to insert the block first - if self.pipeline_run_threshold == 0 { - // use the pipeline to sync to the target - trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target.into()); - } else { - // trigger a full block download for missing hash, or the parent of its lowest buffered - // ancestor - trace!(target: "consensus::engine", request=%target, "Triggering full block download for missing ancestors of the new head"); - self.sync.download_full_block(target); - } - - debug!(target: "consensus::engine", %target, "Syncing to new target"); - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Return the parent hash of the lowest buffered ancestor for the requested block, if there - /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does - /// not exist in the buffer, this returns the hash that is passed in. - /// - /// Returns the parent hash of the block itself if the block is buffered and has no other - /// buffered ancestors. - fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { - self.blockchain - .lowest_buffered_ancestor(hash) - .map(|block| block.parent_hash) - .unwrap_or_else(|| hash) - } - - /// When the Consensus layer receives a new block via the consensus gossip protocol, - /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that - /// adds the block to the head of its own blockchain and attests to it. The block is then - /// broadcast over the consensus p2p network in the form of a "Beacon block". - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). - /// - /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and - /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, BeaconOnNewPayloadError> { - self.metrics.new_payload_messages.increment(1); - - // Ensures that the given payload does not violate any consensus rules that concern the - // block's layout, like: - // - missing or invalid base fee - // - invalid extra data - // - invalid transactions - // - incorrect hash - // - the versioned hashes passed with the payload do not exactly match transaction - // versioned hashes - // - the block does not contain blob transactions if it is pre-cancun - // - // This validates the following engine API rule: - // - // 3. Given the expected array of blob versioned hashes client software **MUST** run its - // validation by taking the following steps: - // - // 1. Obtain the actual array by concatenating blob versioned hashes lists - // (`tx.blob_versioned_hashes`) of each [blob - // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included - // in the payload, respecting the order of inclusion. If the payload has no blob - // transactions the expected array **MUST** be `[]`. - // - // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | - // null}` if the expected and the actual arrays don't match. - // - // This validation **MUST** be instantly run in all cases even during active sync process. - let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { - Ok(block) => block, - Err(error) => { - error!(target: "consensus::engine", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash) - .map_err(BeaconOnNewPayloadError::internal)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(Either::Left(PayloadStatus::new(status, latest_valid_hash))) - } - }; - - let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); - if lowest_buffered_ancestor == block.hash() { - lowest_buffered_ancestor = block.parent_hash; - } - - // now check the block itself - if let Some(status) = self - .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) - .map_err(BeaconOnNewPayloadError::internal)? - { - Ok(Either::Left(status)) - } else { - Ok(Either::Right(block)) - } - } - - /// Validates the payload attributes with respect to the header and fork choice state. - /// - /// Note: At this point, the fork choice update is considered to be VALID, however, we can still - /// return an error if the payload attributes are invalid. - fn process_payload_attributes( - &self, - attrs: ::PayloadAttributes, - head: Header, - state: ForkchoiceState, - version: EngineApiMessageVersion, - ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { - return OnForkChoiceUpdated::invalid_payload_attributes() - } - - // 8. Client software MUST begin a payload build process building on top of - // forkchoiceState.headBlockHash and identified via buildProcessId value if - // payloadAttributes is not null and the forkchoice state has been updated successfully. - // The build process is specified in the Payload building section. - match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( - state.head_block_hash, - attrs, - version as u8 - ) { - Ok(attributes) => { - // send the payload to the builder and return the receiver for the pending payload - // id, initiating payload job is handled asynchronously - let pending_payload_id = self.payload_builder.send_new_payload(attributes); - - // Client software MUST respond to this method call in the following way: - // { - // payloadStatus: { - // status: VALID, - // latestValidHash: forkchoiceState.headBlockHash, - // validationError: null - // }, - // payloadId: buildProcessId - // } - // - // if the payload is deemed VALID and the build process has begun. - OnForkChoiceUpdated::updated_with_pending_payload_id( - PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)), - pending_payload_id, - ) - } - Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), - } - } - - /// When the pipeline is active, the tree is unable to commit any additional blocks since the - /// pipeline holds exclusive access to the database. - /// - /// In this scenario we buffer the payload in the tree if the payload is valid, once the - /// pipeline is finished, the tree is then able to also use the buffered payloads to commit to a - /// (newer) canonical chain. - /// - /// This will return `SYNCING` if the block was buffered successfully, and an error if an error - /// occurred while buffering the block. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_buffer_payload( - &mut self, - block: SealedBlock, - ) -> Result { - self.blockchain.buffer_block_without_senders(block)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Attempts to insert a new payload into the tree. - /// - /// Caution: This expects that the pipeline is idle. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_insert_new_payload( - &mut self, - block: SealedBlock, - ) -> Result { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - let block_hash = block.hash(); - let start = Instant::now(); - let status = self - .blockchain - .insert_block_without_senders(block.clone(), BlockValidationKind::Exhaustive)?; - - let elapsed = start.elapsed(); - let mut latest_valid_hash = None; - let status = match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(attachment)) => { - latest_valid_hash = Some(block_hash); - let block = Arc::new(block); - let event = if attachment.is_canonical() { - BeaconConsensusEngineEvent::CanonicalBlockAdded(block, elapsed) - } else { - BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) - }; - self.event_sender.notify(event); - PayloadStatusEnum::Valid - } - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { - latest_valid_hash = Some(block_hash); - PayloadStatusEnum::Valid - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | - InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { - // check if the block's parent is already marked as invalid - if let Some(status) = - self.check_invalid_ancestor_with_head(block.parent_hash, block.hash()).map_err( - |error| InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)), - )? - { - return Ok(status) - } - - // not known to be invalid, but we don't know anything else - PayloadStatusEnum::Syncing - } - }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } - - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. - /// - /// This mainly compares the missing parent of the downloaded block with the current canonical - /// tip, and decides whether or not the pipeline should be run. - /// - /// The canonical tip is compared to the missing parent using `exceeds_pipeline_run_threshold`, - /// which returns true if the missing parent is sufficiently ahead of the canonical tip. If so, - /// the pipeline is run. Otherwise, we need to insert blocks using the blockchain tree, and - /// must download blocks outside of the pipeline. In this case, the distance is used to - /// determine how many blocks we should download at once. - fn on_disconnected_block( - &mut self, - downloaded_block: BlockNumHash, - missing_parent: BlockNumHash, - head: BlockNumHash, - ) { - // compare the missing parent with the canonical tip - if let Some(target) = self.can_pipeline_sync_to_finalized( - head.number, - missing_parent.number, - Some(downloaded_block), - ) { - // we don't have the block yet and the distance exceeds the allowed - // threshold - self.sync.set_pipeline_sync_target(target.into()); - // we can exit early here because the pipeline will take care of syncing - return - } - - // continue downloading the missing parent - // - // this happens if either: - // * the missing parent block num < canonical tip num - // * this case represents a missing block on a fork that is shorter than the canonical - // chain - // * the missing parent block num >= canonical tip num, but the number of missing blocks is - // less than the pipeline threshold - // * this case represents a potentially long range of blocks to download and execute - if let Some(distance) = self.distance_from_local_tip(head.number, missing_parent.number) { - self.sync.download_block_range(missing_parent.hash, distance) - } else { - // This happens when the missing parent is on an outdated - // sidechain - self.sync.download_full_block(missing_parent.hash); - } - } - - /// Attempt to form a new canonical chain based on the current sync target. - /// - /// This is invoked when we successfully __downloaded__ a new block from the network which - /// resulted in [`BlockStatus::Valid`]. - /// - /// Note: This will not succeed if the sync target has changed since the block download request - /// was issued and the new target is still disconnected and additional missing blocks are - /// downloaded - fn try_make_sync_target_canonical( - &mut self, - inserted: BlockNumHash, - ) -> Result<(), (B256, CanonicalError)> { - let Some(target) = self.forkchoice_state_tracker.sync_target_state() else { return Ok(()) }; - - // optimistically try to make the head of the current FCU target canonical, the sync - // target might have changed since the block download request was issued - // (new FCU received) - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(target.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - match make_canonical_result { - Ok(outcome) => { - if let CanonicalOutcome::Committed { head } = &outcome { - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - let new_head = outcome.into_header(); - debug!(target: "consensus::engine", hash=?new_head.hash(), number=new_head.number, "Canonicalized new head"); - - // we can update the FCU blocks - if let Err(err) = self.update_canon_chain(new_head, &target) { - debug!(target: "consensus::engine", ?err, ?target, "Failed to update the canonical chain tracker"); - } - - // we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - - // clear any active block requests - self.sync.clear_block_download_requests(); - Ok(()) - } - Err(err) => { - // if we failed to make the FCU's head canonical, because we don't have that - // block yet, then we can try to make the inserted block canonical if we know - // it's part of the canonical chain: if it's the safe or the finalized block - if err.is_block_hash_not_found() { - // if the inserted block is the currently targeted `finalized` or `safe` - // block, we will attempt to make them canonical, - // because they are also part of the canonical chain and - // their missing block range might already be downloaded (buffered). - if let Some(target_hash) = - ForkchoiceStateHash::find(&target, inserted.hash).filter(|h| !h.is_head()) - { - // TODO: do not ignore this - let _ = self.blockchain.make_canonical(*target_hash.as_ref()); - } - } else if let Some(block_number) = err.optimistic_revert_block_number() { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); - } - - Err((target.head_block_hash, err)) - } - } - } - - /// Event handler for events emitted by the [`EngineSyncController`]. - /// - /// This returns a result to indicate whether the engine future should resolve (fatal error). - fn on_sync_event( - &mut self, - event: EngineSyncEvent, - ) -> Result { - let outcome = match event { - EngineSyncEvent::FetchedFullBlock(block) => { - trace!(target: "consensus::engine", hash=?block.hash(), number=%block.number, "Downloaded full block"); - // Insert block only if the block's parent is not marked as invalid - if self - .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) - .map_err(|error| BeaconConsensusEngineError::Common(error.into()))? - .is_none() - { - self.set_blockchain_tree_action( - BlockchainTreeAction::InsertDownloadedPayload { block }, - ); - } - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineStarted(target) => { - trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); - self.metrics.pipeline_runs.increment(1); - self.sync_state_updater.update_sync_state(SyncState::Syncing); - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); - // Any pipeline error at this point is fatal. - let ctrl = result?; - if reached_max_block { - // Terminate the sync early if it's reached the maximum user-configured block. - EngineEventOutcome::ReachedMaxBlock - } else { - self.on_pipeline_outcome(ctrl)?; - EngineEventOutcome::Processed - } - } - EngineSyncEvent::PipelineTaskDropped => { - error!(target: "consensus::engine", "Failed to receive spawned pipeline"); - return Err(BeaconConsensusEngineError::PipelineChannelClosed) - } - }; - - Ok(outcome) - } - - /// Invoked when the pipeline has successfully finished. - /// - /// Updates the internal sync state depending on the pipeline configuration, - /// the outcome of the pipeline run and the last observed forkchoice state. - fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { - // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); - // update the `invalid_headers` cache with the new invalid header - self.invalid_headers.insert(*bad_block); - return Ok(()) - } - - let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { - Some(current_state) => current_state, - None => { - // This is only possible if the node was run with `debug.tip` - // argument and without CL. - warn!(target: "consensus::engine", "No fork choice state available"); - return Ok(()) - } - }; - - if sync_target_state.finalized_block_hash.is_zero() { - self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; - self.blockchain.update_block_hashes_and_clear_buffered()?; - self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; - // We are on an optimistic syncing process, better to wait for the next FCU to handle - return Ok(()) - } - - // Next, we check if we need to schedule another pipeline run or transition - // to live sync via tree. - // This can arise if we buffer the forkchoice head, and if the head is an - // ancestor of an invalid block. - // - // * The forkchoice head could be buffered if it were first sent as a `newPayload` request. - // - // In this case, we won't have the head hash in the database, so we would - // set the pipeline sync target to a known-invalid head. - // - // This is why we check the invalid header cache here. - let lowest_buffered_ancestor = - self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); - - // this inserts the head into invalid headers cache - // if the lowest buffered ancestor is invalid - if self - .check_invalid_ancestor_with_head( - lowest_buffered_ancestor, - sync_target_state.head_block_hash, - )? - .is_some() - { - warn!( - target: "consensus::engine", - invalid_ancestor = %lowest_buffered_ancestor, - head = %sync_target_state.head_block_hash, - "Current head has an invalid ancestor" - ); - return Ok(()) - } - - // get the block number of the finalized block, if we have it - let newest_finalized = self - .blockchain - .buffered_header_by_hash(sync_target_state.finalized_block_hash) - .map(|header| header.number); - - // The block number that the pipeline finished at - if the progress or newest - // finalized is None then we can't check the distance anyways. - // - // If both are Some, we perform another distance check and return the desired - // pipeline target - let pipeline_target = - ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { - // Determines whether or not we should run the pipeline again, in case - // the new gap is large enough to warrant - // running the pipeline. - self.can_pipeline_sync_to_finalized(progress, finalized_number, None) - }); - - // If the distance is large enough, we should run the pipeline again to prevent - // the tree update from executing too many blocks and blocking. - if let Some(target) = pipeline_target { - // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target.into()); - } else if let Some(number) = - self.blockchain.block_number(sync_target_state.finalized_block_hash)? - { - // Finalized block is in the database, attempt to restore the tree with - // the most recent canonical hashes. - self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number).inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); - })?; - } else { - // We don't have the finalized block in the database, so we need to - // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); - } - - Ok(()) - } - - fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); - - Ok(()) - } - - fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { - if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { - error!( - target: "consensus::engine", - name = %polled_hook.name, - ?error, - "Hook finished with error" - ) - } - - if polled_hook.db_access_level.is_read_write() { - match polled_hook.event { - EngineHookEvent::NotReady => {} - EngineHookEvent::Started => { - // If the hook has read-write access to the database, it means that the engine - // can't process any FCU messages from CL. To prevent CL from sending us - // unneeded updates, we need to respond `true` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Syncing) - } - EngineHookEvent::Finished(_) => { - // Hook with read-write access to the database has finished running, so engine - // can process new FCU messages from CL again. It's safe to - // return `false` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Idle); - // If the hook had read-write access to the database, it means that the engine - // may have accumulated some buffered blocks. - if let Err(error) = - self.blockchain.connect_buffered_blocks_to_canonical_hashes() - { - error!(target: "consensus::engine", %error, "Error connecting buffered blocks to canonical hashes on hook result"); - return Err(RethError::Canonical(error).into()) - } - } - } - } - - Ok(()) - } - - /// Process the next set blockchain tree action. - /// The handler might set next blockchain tree action to perform, - /// so the state change should be handled accordingly. - fn on_blockchain_tree_action( - &mut self, - action: BlockchainTreeAction, - ) -> RethResult { - match action { - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { - let start = Instant::now(); - let result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &result); - match self - .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) - { - Ok(on_updated) => { - trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); - let fcu_status = on_updated.forkchoice_status(); - self.on_forkchoice_updated_status(state, on_updated, tx); - - if fcu_status.is_valid() { - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached - // the maximum user configured block. - return Ok(EngineEventOutcome::ReachedMaxBlock) - } - } - } - Err(error) => { - let _ = tx.send(Err(RethError::Canonical(error.clone()))); - if error.is_fatal() { - return Err(RethError::Canonical(error)) - } - } - }; - } - BlockchainTreeAction::InsertNewPayload { block, tx } => { - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); - let result = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it - // holds exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match result { - Ok(status) => status, - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - let (block, error) = error.split(); - if !error.is_invalid_block() { - // TODO: revise if any error should be considered fatal at this point. - let _ = - tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); - return Ok(EngineEventOutcome::Processed) - } - - // If the error was due to an invalid payload, the payload is added to the - // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is - // returned. - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = if error.is_block_pre_merge() { - // zero hash must be returned if block is pre-merge - Some(B256::ZERO) - } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? - }; - // keep track of the invalid header - self.invalid_headers.insert(block.sealed_header().block_with_parent()); - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - latest_valid_hash, - ) - } - }; - - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted - // FCU head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeNewPayloadCanonical { - payload_num_hash: block_num_hash, - status, - tx, - }, - ); - return Ok(EngineEventOutcome::Processed) - } - } - // block was successfully inserted, so we can cancel the full block - // request, if any exists - self.sync.cancel_full_block_request(block_hash); - } - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { - let status = match self.try_make_sync_target_canonical(payload_num_hash) { - Ok(()) => status, - Err((_hash, error)) => { - if error.is_fatal() { - let response = - Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); - let _ = tx.send(response); - return Err(RethError::Canonical(error)) - } else if error.optimistic_revert_block_number().is_some() { - // engine already set the pipeline unwind target on - // `try_make_sync_target_canonical` - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } else { - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - ) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - - BlockchainTreeAction::InsertDownloadedPayload { block } => { - let downloaded_num_hash = block.num_hash(); - match self.blockchain.insert_block_without_senders( - block, - BlockValidationKind::SkipStateRootValidation, - ) { - Ok(status) => { - match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { - // block is connected to the canonical chain and is valid. - // if it's not connected to current canonical head, the state root - // has not been validated. - if let Err((hash, error)) = - self.try_make_sync_target_canonical(downloaded_num_hash) - { - if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); - } else if !error.is_block_hash_not_found() { - debug!( - target: "consensus::engine", - "Unexpected error while making sync target canonical: {:?}, {:?}", - error, - hash - ) - } - } - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: missing_parent, - }) => { - // block is not connected to the canonical head, we need to download - // its missing branch first - self.on_disconnected_block( - downloaded_num_hash, - missing_parent, - head, - ); - } - _ => (), - } - } - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to insert downloaded block"); - if err.kind().is_invalid_block() { - let (block, err) = err.split(); - warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - - self.invalid_headers.insert(block.sealed_header().block_with_parent()); - } - } - } - } - }; - Ok(EngineEventOutcome::Processed) - } -} - -/// On initialization, the consensus engine will poll the message receiver and return -/// [`Poll::Pending`] until the first forkchoice update message is received. -/// -/// As soon as the consensus engine receives the first forkchoice updated message and updates the -/// local forkchoice state, it will launch the pipeline to sync to the head hash. -/// While the pipeline is syncing, the consensus engine will keep processing messages from the -/// receiver and forwarding them to the blockchain tree. -impl Future for BeaconConsensusEngine -where - N: TreeNodeTypes, - Client: EthBlockClient + 'static, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + Unpin - + 'static, -{ - type Output = Result<(), BeaconConsensusEngineError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // Control loop that advances the state - 'main: loop { - // Poll a running hook with db write access (if any) and CL messages first, draining - // both and then proceeding to polling other parts such as SyncController and hooks. - loop { - // Poll a running hook with db write access first, as we will not be able to process - // any engine messages until it's finished. - if let Poll::Ready(result) = - this.hooks.poll_active_db_write_hook(cx, this.current_engine_hook_context()?)? - { - this.on_hook_result(result)?; - continue - } - - // Process any blockchain tree action result as set forth during engine message - // processing. - if let Some(action) = this.blockchain_tree_action.take() { - match this.on_blockchain_tree_action(action) { - Ok(EngineEventOutcome::Processed) => {} - Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), - Err(error) => { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - return Poll::Ready(Err(error.into())) - } - }; - - // Blockchain tree action handler might set next action to take. - continue - } - - // If the db write hook is no longer active and we have a pending forkchoice update, - // process it first. - if this.hooks.active_db_write_hook().is_none() { - if let Some((state, attrs, tx)) = this.pending_forkchoice_update.take() { - this.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - continue - } - } - - // Process one incoming message from the CL. We don't drain the messages right away, - // because we want to sneak a polling of running hook in between them. - // - // These messages can affect the state of the SyncController and they're also time - // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { - match msg { - BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - version: _version, - } => { - this.on_forkchoice_updated(state, payload_attrs, tx); - } - BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { - match this.on_new_payload(payload, sidecar) { - Ok(Either::Right(block)) => { - this.set_blockchain_tree_action( - BlockchainTreeAction::InsertNewPayload { block, tx }, - ); - } - Ok(Either::Left(status)) => { - let _ = tx.send(Ok(status)); - } - Err(error) => { - let _ = tx.send(Err(error)); - } - } - } - BeaconEngineMessage::TransitionConfigurationExchanged => { - this.blockchain.on_transition_configuration_exchanged(); - } - } - continue - } - - // Both running hook with db write access and engine messages are pending, - // proceed to other polls - break - } - - // process sync events if any - if let Poll::Ready(sync_event) = this.sync.poll(cx) { - match this.on_sync_event(sync_event)? { - // Sync event was successfully processed - EngineEventOutcome::Processed => (), - // Max block has been reached, exit the engine loop - EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), - } - - // this could have taken a while, so we start the next cycle to handle any new - // engine messages - continue 'main - } - - // at this point, all engine messages and sync events are fully drained - - // Poll next hook if all conditions are met: - // 1. Engine and sync messages are fully drained (both pending) - // 2. Latest FCU status is not INVALID - if !this.forkchoice_state_tracker.is_latest_invalid() { - if let Poll::Ready(result) = this.hooks.poll_next_hook( - cx, - this.current_engine_hook_context()?, - this.sync.is_pipeline_active(), - )? { - this.on_hook_result(result)?; - - // ensure we're polling until pending while also checking for new engine - // messages before polling the next hook - continue 'main - } - } - - // incoming engine messages and sync events are drained, so we can yield back - // control - return Poll::Pending - } - } -} - -enum BlockchainTreeAction { - MakeForkchoiceHeadCanonical { - state: ForkchoiceState, - attrs: Option, - tx: oneshot::Sender>, - }, - InsertNewPayload { - block: SealedBlock, - tx: oneshot::Sender>, - }, - MakeNewPayloadCanonical { - payload_num_hash: BlockNumHash, - status: PayloadStatus, - tx: oneshot::Sender>, - }, - /// Action to insert a new block that we successfully downloaded from the network. - /// There are several outcomes for inserting a downloaded block into the tree: - /// - /// ## [`BlockStatus::Valid`] - /// - /// The block is connected to the current canonical chain and is valid. - /// If the block is an ancestor of the current forkchoice head, then we can try again to - /// make the chain canonical. - /// - /// ## [`BlockStatus::Disconnected`] - /// - /// The block is not connected to the canonical chain, and we need to download the - /// missing parent first. - /// - /// ## Insert Error - /// - /// If the insertion into the tree failed, then the block was well-formed (valid hash), - /// but its chain is invalid, which means the FCU that triggered the - /// download is invalid. Here we can stop because there's nothing to do here - /// and the engine needs to wait for another FCU. - InsertDownloadedPayload { block: SealedBlock }, -} - -/// Represents outcomes of processing an engine event -#[derive(Debug)] -enum EngineEventOutcome { - /// Engine event was processed successfully, engine should continue. - Processed, - /// Engine event was processed successfully and reached max block. - ReachedMaxBlock, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - test_utils::{spawn_consensus_engine, TestConsensusEngineBuilder}, - BeaconForkChoiceUpdateError, - }; - use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use assert_matches::assert_matches; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_node_types::FullNodePrimitives; - use reth_primitives::BlockExt; - use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; - use reth_rpc_types_compat::engine::payload::block_to_payload_v1; - use reth_stages::{ExecOutput, PipelineError, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_testing_utils::generators::{self, Rng}; - use std::{collections::VecDeque, sync::Arc}; - use tokio::sync::oneshot::error::TryRecvError; - - // Pipeline error is propagated. - #[tokio::test] - async fn pipeline_error_is_propagated() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let res = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!( - res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - // Test that the consensus engine is idle until first forkchoice updated is received. - #[tokio::test] - async fn is_idle_until_forkchoice_is_set() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let mut rx = spawn_consensus_engine(consensus_engine); - - // consensus engine is idle - tokio::time::sleep(Duration::from_millis(100)).await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because no FCUs were received - let _ = env - .send_new_payload( - block_to_payload_v1(SealedBlock::<_>::default()), - ExecutionPayloadSidecar::none(), - ) - .await; - - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because pruning is running - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine receives a forkchoice state and triggers the pipeline when pruning is - // finished - loop { - match rx.try_recv() { - Ok(result) => { - assert_matches!( - result, - Err(BeaconConsensusEngineError::Pipeline(n)) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - break - } - Err(TryRecvError::Empty) => { - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - } - Err(err) => panic!("receive error: {err}"), - } - } - } - - // Test that the consensus engine runs the pipeline again if the tree cannot be restored. - // The consensus engine will propagate the second result (error) only if it runs the pipeline - // for the second time. - #[tokio::test] - async fn runs_pipeline_again_if_tree_not_restored() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), - Err(StageError::ChannelClosed), - ])) - .disable_blockchain_tree_sync() - .with_max_block(2) - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: rng.gen(), - ..Default::default() - }) - .await; - - assert_matches!( - rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - #[tokio::test] - async fn terminates_upon_reaching_max_block() { - let mut rng = generators::rng(); - let max_block = 1000; - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(max_block), - done: true, - })])) - .with_max_block(max_block) - .disable_blockchain_tree_sync() - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.await, Ok(Ok(()))); - } - - fn insert_blocks< - 'a, - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - provider_factory: ProviderFactory, - mut blocks: impl Iterator, - ) { - let provider = provider_factory.provider_rw().unwrap(); - blocks - .try_for_each(|b| { - provider - .insert_block( - b.clone().try_seal_with_senders().expect("invalid tx signature in block"), - StorageLocation::Database, - ) - .map(drop) - }) - .expect("failed to insert"); - provider.commit().unwrap(); - } - - mod fork_choice_updated { - use super::*; - use alloy_primitives::U256; - use alloy_rpc_types_engine::ForkchoiceUpdateError; - use generators::BlockParams; - use reth_db::{tables, test_utils::create_test_static_files_dir, Database}; - use reth_db_api::transaction::DbTxMut; - use reth_provider::{providers::StaticFileProvider, test_utils::MockNodeTypesWithDB}; - use reth_testing_utils::generators::random_block; - - #[tokio::test] - async fn empty_head() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let res = env.send_forkchoice_updated(ForkchoiceState::default()).await; - assert_matches!( - res, - Err(BeaconForkChoiceUpdateError::ForkchoiceUpdateError( - ForkchoiceUpdateError::InvalidState - )) - ); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn valid_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - env.db - .update(|tx| { - tx.put::( - StageId::Finish.to_string(), - StageCheckpoint::new(block1.number), - ) - }) - .unwrap() - .unwrap(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let forkchoice = ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - let result = env.send_forkchoice_updated(forkchoice).await.unwrap(); - let expected_result = ForkchoiceUpdated::new(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(block1.hash()), - )); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_head_hash() { - let mut rng = generators::rng(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { parent: Some(genesis.hash()), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - let next_head = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let next_forkchoice_state = ForkchoiceState { - head_block_hash: next_head.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - // if we `await` in the assert, the forkchoice will poll after we've inserted the block, - // and it will return VALID instead of SYNCING - let invalid_rx = env.send_forkchoice_updated(next_forkchoice_state).await; - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - // Insert next head immediately after sending forkchoice update - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&next_head), - ); - - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(invalid_rx, Ok(result) => assert_eq!(result, expected_result)); - - let result = env.send_forkchoice_retry_on_syncing(next_forkchoice_state).await.unwrap(); - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(next_head.hash()); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_finalized_hash() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - drop(engine); - } - - #[tokio::test] - async fn forkchoice_updated_pre_merge() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .paris_at_ttd(U256::from(3), 3) - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let mut block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block1.set_difficulty(U256::from(1)); - - // a second pre-merge block - let mut block2 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block2.set_difficulty(U256::from(1)); - - // a transition block - let mut block3 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block3.set_difficulty(U256::from(1)); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2, &block3].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - assert_matches!(res, Ok(result) => { - let ForkchoiceUpdated { payload_status, .. } = result; - assert_matches!(payload_status.status, PayloadStatusEnum::Invalid { .. }); - assert_eq!(payload_status.latest_valid_hash, Some(B256::ZERO)); - }); - } - - #[tokio::test] - async fn forkchoice_updated_invalid_pow() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_temp_dir, temp_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(temp_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - } - } - - mod new_payload { - use super::*; - use alloy_genesis::Genesis; - use alloy_primitives::U256; - use generators::BlockParams; - use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::EthereumHardfork; - use reth_provider::{ - providers::StaticFileProvider, - test_utils::{blocks::BlockchainTestData, MockNodeTypesWithDB}, - }; - use reth_testing_utils::{generators::random_block, GenesisAllocator}; - #[tokio::test] - async fn new_payload_before_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - // Invalid, because this is a genesis block - assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 1, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_known() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let block2 = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block2.hash()); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn simple_validate_block() { - let mut rng = generators::rng(); - let amount = U256::from(1000000000000000000u64); - let mut allocator = GenesisAllocator::default().with_rng(&mut rng); - for _ in 0..16 { - // add 16 new accounts - allocator.new_funded_account(amount); - } - - let alloc = allocator.build(); - - let genesis = Genesis::default().extend_accounts(alloc); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .shanghai_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_real_pipeline() - .with_real_executor() - .with_real_consensus() - .build(); - - let genesis = SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(chain_spec.genesis_hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - // TODO: add transactions that transfer from the alloc accounts, generating the new - // block tx and state root - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_parent_unknown() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&genesis), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: genesis.hash(), - finalized_block_hash: genesis.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(genesis.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let parent = rng.gen(); - let block = random_block( - &mut rng, - 2, - BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, - ); - let res = env - .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_pre_merge() { - let data = BlockchainTestData::default(); - let mut block1 = data.blocks[0].0.block.clone(); - block1.set_difficulty( - MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), - ); - block1 = block1.unseal::().seal_slow(); - let (block2, exec_result2) = data.blocks[1].clone(); - let mut block2 = block2.unseal().block; - block2.body.withdrawals = None; - block2.header.parent_hash = block1.hash(); - block2.header.base_fee_per_gas = Some(100); - block2.header.difficulty = U256::ZERO; - let block2 = block2.clone().seal_slow(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .with_executor_results(Vec::from([exec_result2])) - .build(); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&data.genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - } -} diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs deleted file mode 100644 index adbb531b2..000000000 --- a/crates/consensus/beacon/src/engine/sync.rs +++ /dev/null @@ -1,672 +0,0 @@ -//! Sync management for the engine implementation. - -use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, EthBeaconConsensus, -}; -use alloy_consensus::Header; -use alloy_primitives::{BlockNumber, B256}; -use futures::FutureExt; -use reth_network_p2p::{ - full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, -}; -use reth_node_types::{BodyTy, HeaderTy}; -use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; -use reth_provider::providers::ProviderNodeTypes; -use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - cmp::{Ordering, Reverse}, - collections::{binary_heap::PeekMut, BinaryHeap}, - sync::Arc, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages syncing under the control of the engine. -/// -/// This type controls the [Pipeline] and supports (single) full block downloads. -/// -/// Caution: If the pipeline is running, this type will not emit blocks downloaded from the network -/// [`EngineSyncEvent::FetchedFullBlock`] until the pipeline is idle to prevent commits to the -/// database while the pipeline is still active. -pub(crate) struct EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// A downloader that can download full blocks from the network. - full_block_client: FullBlockClient, - /// The type that can spawn the pipeline task. - pipeline_task_spawner: Box, - /// The current state of the pipeline. - /// The pipeline is used for large ranges. - pipeline_state: PipelineState, - /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, - /// In-flight full block requests in progress. - inflight_full_block_requests: Vec>, - /// In-flight full block _range_ requests in progress. - inflight_block_range_requests: Vec>, - /// Sender for engine events. - event_sender: EventSender>, - /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for - /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - range_buffered_blocks: BinaryHeap, BodyTy>>>, - /// Max block after which the consensus engine would terminate the sync. Used for debugging - /// purposes. - max_block: Option, - /// Engine sync metrics. - metrics: EngineSyncMetrics, -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// Create a new instance - pub(crate) fn new( - pipeline: Pipeline, - client: Client, - pipeline_task_spawner: Box, - max_block: Option, - chain_spec: Arc, - event_sender: EventSender>, - ) -> Self { - Self { - full_block_client: FullBlockClient::new( - client, - Arc::new(EthBeaconConsensus::new(chain_spec)), - ), - pipeline_task_spawner, - pipeline_state: PipelineState::Idle(Some(pipeline)), - pending_pipeline_target: None, - inflight_full_block_requests: Vec::new(), - inflight_block_range_requests: Vec::new(), - range_buffered_blocks: BinaryHeap::new(), - event_sender, - max_block, - metrics: EngineSyncMetrics::default(), - } - } -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, -{ - /// Sets the metrics for the active downloads - fn update_block_download_metrics(&self) { - self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); - // TODO: full block range metrics - } - - /// Sets the max block value for testing - #[cfg(test)] - pub(crate) fn set_max_block(&mut self, block: BlockNumber) { - self.max_block = Some(block); - } - - /// Cancels all download requests that are in progress and buffered blocks. - pub(crate) fn clear_block_download_requests(&mut self) { - self.inflight_full_block_requests.clear(); - self.inflight_block_range_requests.clear(); - self.range_buffered_blocks.clear(); - self.update_block_download_metrics(); - } - - /// Cancels the full block request with the given hash. - pub(crate) fn cancel_full_block_request(&mut self, hash: B256) { - self.inflight_full_block_requests.retain(|req| *req.hash() != hash); - self.update_block_download_metrics(); - } - - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. - #[allow(dead_code)] - pub(crate) const fn is_pipeline_sync_pending(&self) -> bool { - self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is idle. - pub(crate) const fn is_pipeline_idle(&self) -> bool { - self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is active. - pub(crate) const fn is_pipeline_active(&self) -> bool { - !self.is_pipeline_idle() - } - - /// Returns true if there's already a request for the given hash. - pub(crate) fn is_inflight_request(&self, hash: B256) -> bool { - self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) - } - - /// Starts requesting a range of blocks from the network, in reverse from the given hash. - /// - /// If the `count` is 1, this will use the `download_full_block` method instead, because it - /// downloads headers and bodies for the block concurrently. - pub(crate) fn download_block_range(&mut self, hash: B256, count: u64) { - if count == 1 { - self.download_full_block(hash); - } else { - trace!( - target: "consensus::engine", - ?hash, - ?count, - "start downloading full block range." - ); - - // notify listeners that we're downloading a block range - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: count, - target: hash, - }, - )); - let request = self.full_block_client.get_full_block_range(hash, count); - self.inflight_block_range_requests.push(request); - } - - // // TODO: need more metrics for block ranges - // self.update_block_download_metrics(); - } - - /// Starts requesting a full block from the network. - /// - /// Returns `true` if the request was started, `false` if there's already a request for the - /// given hash. - pub(crate) fn download_full_block(&mut self, hash: B256) -> bool { - if self.is_inflight_request(hash) { - return false - } - trace!( - target: "consensus::engine::sync", - ?hash, - "Start downloading full block" - ); - - // notify listeners that we're downloading a block - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: 1, - target: hash, - }, - )); - - let request = self.full_block_client.get_full_block(hash); - self.inflight_full_block_requests.push(request); - - self.update_block_download_metrics(); - - true - } - - /// Sets a new target to sync the pipeline to. - /// - /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { - if target.sync_target().is_some_and(|target| target.is_zero()) { - trace!( - target: "consensus::engine::sync", - "Pipeline target cannot be zero hash." - ); - // precaution to never sync to the zero hash - return - } - self.pending_pipeline_target = Some(target); - } - - /// Check if the engine reached max block as specified by `max_block` parameter. - /// - /// Note: this is mainly for debugging purposes. - pub(crate) fn has_reached_max_block(&self, progress: BlockNumber) -> bool { - let has_reached_max_block = self.max_block.is_some_and(|target| progress >= target); - if has_reached_max_block { - trace!( - target: "consensus::engine::sync", - ?progress, - max_block = ?self.max_block, - "Consensus engine reached max block" - ); - } - has_reached_max_block - } - - /// Advances the pipeline state. - /// - /// This checks for the result in the channel, or returns pending if the pipeline is idle. - fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll> { - let res = match self.pipeline_state { - PipelineState::Idle(_) => return Poll::Pending, - PipelineState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - let ev = match res { - Ok((pipeline, result)) => { - let minimum_block_number = pipeline.minimum_block_number(); - let reached_max_block = - self.has_reached_max_block(minimum_block_number.unwrap_or_default()); - self.pipeline_state = PipelineState::Idle(Some(pipeline)); - EngineSyncEvent::PipelineFinished { result, reached_max_block } - } - Err(_) => { - // failed to receive the pipeline - EngineSyncEvent::PipelineTaskDropped - } - }; - Poll::Ready(ev) - } - - /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to - /// run continuously. - fn try_spawn_pipeline(&mut self) -> Option> { - match &mut self.pipeline_state { - PipelineState::Idle(pipeline) => { - let target = self.pending_pipeline_target.take()?; - let (tx, rx) = oneshot::channel(); - - let pipeline = pipeline.take().expect("exists"); - self.pipeline_task_spawner.spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let result = pipeline.run_as_fut(Some(target)).await; - let _ = tx.send(result); - }), - ); - self.pipeline_state = PipelineState::Running(rx); - - // we also clear any pending full block requests because we expect them to be - // outdated (included in the range the pipeline is syncing anyway) - self.clear_block_download_requests(); - - Some(EngineSyncEvent::PipelineStarted(Some(target))) - } - PipelineState::Running(_) => None, - } - } - - /// Advances the sync process. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { - // try to spawn a pipeline if a target is set - if let Some(event) = self.try_spawn_pipeline() { - return Poll::Ready(event) - } - - // make sure we poll the pipeline if it's active, and return any ready pipeline events - if !self.is_pipeline_idle() { - // advance the pipeline - if let Poll::Ready(event) = self.poll_pipeline(cx) { - return Poll::Ready(event) - } - } - - // advance all full block requests - for idx in (0..self.inflight_full_block_requests.len()).rev() { - let mut request = self.inflight_full_block_requests.swap_remove(idx); - if let Poll::Ready(block) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); - self.range_buffered_blocks.push(Reverse(OrderedSealedBlock(block))); - } else { - // still pending - self.inflight_full_block_requests.push(request); - } - } - - // advance all full block range requests - for idx in (0..self.inflight_block_range_requests.len()).rev() { - let mut request = self.inflight_block_range_requests.swap_remove(idx); - if let Poll::Ready(blocks) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); - self.range_buffered_blocks - .extend(blocks.into_iter().map(OrderedSealedBlock).map(Reverse)); - } else { - // still pending - self.inflight_block_range_requests.push(request); - } - } - - self.update_block_download_metrics(); - - // drain an element of the block buffer if there are any - if let Some(block) = self.range_buffered_blocks.pop() { - // peek ahead and pop duplicates - while let Some(peek) = self.range_buffered_blocks.peek_mut() { - if peek.0 .0.hash() == block.0 .0.hash() { - PeekMut::pop(peek); - } else { - break - } - } - return Poll::Ready(EngineSyncEvent::FetchedFullBlock(block.0 .0)) - } - - Poll::Pending - } -} - -/// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. -#[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); - -impl PartialOrd for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn cmp(&self, other: &Self) -> Ordering { - self.0.number().cmp(&other.0.number()) - } -} - -/// The event type emitted by the [`EngineSyncController`]. -#[derive(Debug)] -pub(crate) enum EngineSyncEvent { - /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), - /// Pipeline started syncing - /// - /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), - /// Pipeline finished - /// - /// If this is returned, the pipeline is idle. - PipelineFinished { - /// Final result of the pipeline run. - result: Result, - /// Whether the pipeline reached the configured `max_block`. - /// - /// Note: this is only relevant in debugging scenarios. - reached_max_block: bool, - }, - /// Pipeline task was dropped after it was started, unable to receive it because channel - /// closed. This would indicate a panicked pipeline task - PipelineTaskDropped, -} - -/// The possible pipeline states within the sync controller. -/// -/// [`PipelineState::Idle`] means that the pipeline is currently idle. -/// [`PipelineState::Running`] means that the pipeline is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pipeline is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PipelineState { - /// Pipeline is idle. - Idle(Option>), - /// Pipeline is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl PipelineState { - /// Returns `true` if the state matches idle. - const fn is_idle(&self) -> bool { - matches!(self, Self::Idle(_)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; - use assert_matches::assert_matches; - use futures::poll; - use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; - use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient, EthBlockClient}; - use reth_primitives::{BlockBody, SealedHeader}; - use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, - }; - use reth_prune_types::PruneModes; - use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_static_file::StaticFileProducer; - use reth_tasks::TokioTaskExecutor; - use std::{collections::VecDeque, future::poll_fn, ops::Range}; - use tokio::sync::watch; - - struct TestPipelineBuilder { - pipeline_exec_outputs: VecDeque>, - executor_results: Vec, - max_block: Option, - } - - impl TestPipelineBuilder { - /// Create a new [`TestPipelineBuilder`]. - const fn new() -> Self { - Self { - pipeline_exec_outputs: VecDeque::new(), - executor_results: Vec::new(), - max_block: None, - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_exec_outputs = pipeline_exec_outputs; - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Builds the pipeline. - fn build(self, chain_spec: Arc) -> Pipeline { - reth_tracing::init_test_tracing(); - - // Setup pipeline - let (tip_tx, _tip_rx) = watch::channel(B256::default()); - let mut pipeline = Pipeline::::builder() - .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) - .with_tip_sender(tip_tx); - - if let Some(max_block) = self.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - pipeline.build(provider_factory, static_file_producer) - } - } - - struct TestSyncControllerBuilder { - max_block: Option, - client: Option, - } - - impl TestSyncControllerBuilder { - /// Create a new [`TestSyncControllerBuilder`]. - const fn new() -> Self { - Self { max_block: None, client: None } - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Sets the client to use for network operations. - fn with_client(mut self, client: Client) -> Self { - self.client = Some(client); - self - } - - /// Builds the sync controller. - fn build( - self, - pipeline: Pipeline, - chain_spec: Arc, - ) -> EngineSyncController> - where - N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - { - let client = self - .client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(TestFullBlockClient::default())); - - EngineSyncController::new( - pipeline, - client, - Box::::default(), - self.max_block, - chain_spec, - Default::default(), - ) - } - } - - #[tokio::test] - async fn pipeline_started_after_setting_target() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - insert_headers_into_client(&client, SealedHeader::default(), 0..10); - // force the pipeline to be "done" after 5 blocks - let pipeline = TestPipelineBuilder::new() - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(5), - done: true, - })])) - .build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash().into()); - - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_event = poll!(sync_future); - - // can assert that the first event here is PipelineStarted because we set the sync target, - // and we should get Ready because the pipeline should be spawned immediately - assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target.sync_target().unwrap(), tip.hash()); - }); - - // the next event should be the pipeline finishing in a good state - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: 5 })); - // no max block configured - assert!(!reached_max_block); - }); - } - - fn insert_headers_into_client( - client: &TestFullBlockClient, - genesis_header: SealedHeader, - range: Range, - ) { - let mut sealed_header = genesis_header; - let body = BlockBody::default(); - for _ in range { - let (mut header, hash) = sealed_header.split(); - // update to the next header - header.parent_hash = hash; - header.number += 1; - header.timestamp += 1; - sealed_header = SealedHeader::seal(header); - client.insert(sealed_header.clone(), body.clone()); - } - } - - #[tokio::test] - async fn controller_sends_range_request() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - let header = Header { - base_fee_per_gas: Some(7), - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - ..Default::default() - }; - let header = SealedHeader::seal(header); - insert_headers_into_client(&client, header, 0..10); - - // set up a pipeline - let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - - // call the download range method - sync_controller.download_block_range(tip.hash(), tip.number); - - // ensure we have one in flight range request - assert_eq!(sync_controller.inflight_block_range_requests.len(), 1); - - // ensure the range request is made correctly - let first_req = sync_controller.inflight_block_range_requests.first().unwrap(); - assert_eq!(first_req.start_hash(), tip.hash()); - assert_eq!(first_req.count(), tip.number); - - // ensure they are in ascending order - for num in 1..=10 { - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::FetchedFullBlock(block) => { - assert_eq!(block.number, num); - }); - } - } -} diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs deleted file mode 100644 index 56de724ad..000000000 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ /dev/null @@ -1,467 +0,0 @@ -#![allow(missing_docs)] -use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, - BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; -use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, -}; -use reth_chainspec::ChainSpec; -use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, ConsensusError, FullConsensus}; -use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm::{either::Either, test_utils::MockExecutorProvider}; -use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{ - sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, -}; -use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::SealedHeader; -use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, -}; -use reth_prune::Pruner; -use reth_prune_types::PruneModes; -use reth_stages::{sets::DefaultStages, test_utils::TestStages, ExecOutput, Pipeline, StageError}; -use reth_static_file::StaticFileProducer; -use reth_tasks::TokioTaskExecutor; -use std::{collections::VecDeque, sync::Arc}; -use tokio::sync::{oneshot, watch}; - -type DatabaseEnv = TempDatabase; - -type TestBeaconConsensusEngine = BeaconConsensusEngine< - MockNodeTypesWithDB, - BlockchainProvider, - Arc>, ->; - -#[derive(Debug)] -pub struct TestEnv { - pub db: DB, - // Keep the tip receiver around, so it's not dropped. - #[allow(dead_code)] - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, -} - -impl TestEnv { - const fn new( - db: DB, - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, - ) -> Self { - Self { db, tip_rx, engine_handle } - } - - pub async fn send_new_payload>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - self.engine_handle.new_payload(payload.into(), sidecar).await - } - - /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_new_payload_retry_on_syncing>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - let payload: ExecutionPayload = payload.into(); - loop { - let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; - if !result.is_syncing() { - return Ok(result) - } - } - } - - pub async fn send_forkchoice_updated( - &self, - state: ForkchoiceState, - ) -> Result { - self.engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await - } - - /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_forkchoice_retry_on_syncing( - &self, - state: ForkchoiceState, - ) -> Result { - loop { - let result = self - .engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await?; - if !result.is_syncing() { - return Ok(result) - } - } - } -} - -// TODO: add with_consensus in case we want to use the TestConsensus purposeful failure - this -// would require similar patterns to how we use with_client and the downloader -/// Represents either a real consensus engine, or a test consensus engine. -#[derive(Debug, Default)] -enum TestConsensusConfig { - /// Test consensus engine - #[default] - Test, - /// Real consensus engine - Real, -} - -/// Represents either test pipeline outputs, or real pipeline configuration. -#[derive(Debug)] -enum TestPipelineConfig { - /// Test pipeline outputs. - Test(VecDeque>), - /// Real pipeline configuration. - Real, -} - -impl Default for TestPipelineConfig { - fn default() -> Self { - Self::Test(VecDeque::new()) - } -} - -/// Represents either test executor results, or real executor configuration. -#[derive(Debug)] -enum TestExecutorConfig { - /// Test executor results. - Test(Vec), - /// Real executor configuration. - Real, -} - -impl Default for TestExecutorConfig { - fn default() -> Self { - Self::Test(Vec::new()) - } -} - -/// The basic configuration for a `TestConsensusEngine`, without generics for the client or -/// consensus engine. -#[derive(Debug)] -pub struct TestConsensusEngineBuilder { - chain_spec: Arc, - pipeline_config: TestPipelineConfig, - executor_config: TestExecutorConfig, - pipeline_run_threshold: Option, - max_block: Option, - consensus: TestConsensusConfig, -} - -impl TestConsensusEngineBuilder { - /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. - pub fn new(chain_spec: Arc) -> Self { - Self { - chain_spec, - pipeline_config: Default::default(), - executor_config: Default::default(), - pipeline_run_threshold: None, - max_block: None, - consensus: Default::default(), - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - pub fn with_real_pipeline(mut self) -> Self { - self.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - pub fn with_real_executor(mut self) -> Self { - self.executor_config = TestExecutorConfig::Real; - self - } - - /// Uses a real consensus engine instead of a test consensus engine. - pub const fn with_real_consensus(mut self) -> Self { - self.consensus = TestConsensusConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub const fn with_client( - self, - client: Client, - ) -> NetworkedTestConsensusEngineBuilder - where - Client: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build( - self, - ) -> (TestBeaconConsensusEngine, TestEnv>) { - let networked = NetworkedTestConsensusEngineBuilder { base_config: self, client: None }; - - networked.build() - } -} - -/// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and -/// mocked executor results. -/// -/// This optionally includes a client for network operations. -#[derive(Debug)] -pub struct NetworkedTestConsensusEngineBuilder { - base_config: TestConsensusEngineBuilder, - client: Option, -} - -impl NetworkedTestConsensusEngineBuilder -where - Client: EthBlockClient + 'static, -{ - /// Set the pipeline execution outputs to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.base_config.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.base_config.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - #[allow(dead_code)] - pub fn with_real_pipeline(mut self) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - #[allow(dead_code)] - pub fn with_real_executor(mut self) -> Self { - self.base_config.executor_config = TestExecutorConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - #[allow(dead_code)] - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.base_config.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub fn with_client( - self, - client: ClientType, - ) -> NetworkedTestConsensusEngineBuilder - where - ClientType: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { - reth_tracing::init_test_tracing(); - let provider_factory = - create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - - let consensus: Arc> = - match self.base_config.consensus { - TestConsensusConfig::Real => { - Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) - } - TestConsensusConfig::Test => Arc::new(TestConsensus::default()), - }; - let payload_builder = spawn_test_payload_service::(); - - // use either noop client or a user provided client (for example TestFullBlockClient) - let client = Arc::new( - self.client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(NoopFullBlockClient::default())), - ); - - // use either test executor or real executor - let executor_factory = match self.base_config.executor_config { - TestExecutorConfig::Test(results) => { - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(results); - Either::Left(executor_factory) - } - TestExecutorConfig::Real => { - Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) - } - }; - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - // Setup pipeline - let (tip_tx, tip_rx) = watch::channel(B256::default()); - let mut pipeline = match self.base_config.pipeline_config { - TestPipelineConfig::Test(outputs) => Pipeline::::builder() - .add_stages(TestStages::new(outputs, Default::default())) - .with_tip_sender(tip_tx), - TestPipelineConfig::Real => { - let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone().as_header_validator()) - .into_task(); - - let body_downloader = BodiesDownloaderBuilder::default() - .build( - client.clone(), - consensus.clone().as_consensus(), - provider_factory.clone(), - ) - .into_task(); - - Pipeline::::builder().add_stages(DefaultStages::new( - provider_factory.clone(), - tip_rx.clone(), - consensus.clone().as_consensus(), - header_downloader, - body_downloader, - executor_factory.clone(), - StageConfig::default(), - PruneModes::default(), - )) - } - }; - - if let Some(max_block) = self.base_config.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let pipeline = pipeline.build(provider_factory.clone(), static_file_producer); - - // Setup blockchain tree - let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); - let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) - .expect("failed to create tree"), - )); - let header = self.base_config.chain_spec.genesis_header().clone(); - let genesis_block = SealedHeader::seal(header); - - let blockchain_provider = BlockchainProvider::with_blocks( - provider_factory.clone(), - tree, - genesis_block, - None, - None, - ); - - let pruner = Pruner::new_with_factory( - provider_factory.clone(), - vec![], - 5, - self.base_config.chain_spec.prune_delete_limit, - None, - watch::channel(FinishedExExHeight::NoExExs).1, - ); - - let mut hooks = EngineHooks::new(); - hooks.add(PruneHook::new(pruner, Box::::default())); - - let (mut engine, handle) = BeaconConsensusEngine::new( - client, - pipeline, - blockchain_provider, - Box::::default(), - Box::::default(), - None, - payload_builder, - None, - self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), - hooks, - ) - .expect("failed to create consensus engine"); - - if let Some(max_block) = self.base_config.max_block { - engine.sync.set_max_block(max_block) - } - - (engine, TestEnv::new(provider_factory.db_ref().clone(), tip_rx, handle)) - } -} - -pub fn spawn_consensus_engine( - engine: TestBeaconConsensusEngine, -) -> oneshot::Receiver> -where - Client: EthBlockClient + 'static, -{ - let (tx, rx) = oneshot::channel(); - tokio::spawn(async move { - let result = engine.await; - tx.send(result).expect("failed to forward consensus engine result"); - }); - rx -} diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs deleted file mode 100644 index f62a75f94..000000000 --- a/crates/consensus/beacon/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Beacon consensus implementation. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -pub use reth_ethereum_consensus::EthBeaconConsensus; - -mod engine; -pub use engine::*;