From d19032fca1a49e0d826cc1f1dd8475e25493ecb5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 8 Nov 2024 01:29:49 +0400 Subject: [PATCH] chore: remove auto-seal consensus (#12385) --- .github/assets/check_wasm.sh | 1 - Cargo.lock | 34 -- Cargo.toml | 2 - crates/consensus/auto-seal/Cargo.toml | 57 -- crates/consensus/auto-seal/src/client.rs | 132 ----- crates/consensus/auto-seal/src/lib.rs | 690 ----------------------- crates/consensus/auto-seal/src/mode.rs | 166 ------ crates/consensus/auto-seal/src/task.rs | 221 -------- crates/ethereum/node/Cargo.toml | 1 - crates/ethereum/node/src/node.rs | 7 +- crates/ethereum/node/tests/e2e/dev.rs | 12 - crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/launch/common.rs | 11 +- crates/node/builder/src/launch/engine.rs | 9 +- crates/node/builder/src/launch/mod.rs | 46 +- crates/optimism/node/Cargo.toml | 2 - crates/optimism/node/src/node.rs | 6 +- docs/repo/layout.md | 1 - 18 files changed, 11 insertions(+), 1388 deletions(-) delete mode 100644 crates/consensus/auto-seal/Cargo.toml delete mode 100644 crates/consensus/auto-seal/src/client.rs delete mode 100644 crates/consensus/auto-seal/src/lib.rs delete mode 100644 crates/consensus/auto-seal/src/mode.rs delete mode 100644 crates/consensus/auto-seal/src/task.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 3b2f2d6b7..c34f82d2e 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,7 +11,6 @@ exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 # The following require investigation if they can be fixed - reth-auto-seal-consensus reth-basic-payload-builder reth-beacon-consensus reth-bench diff --git a/Cargo.lock b/Cargo.lock index 34a6fe0f9..60bf29c2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6385,37 +6385,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-auto-seal-consensus" -version = "1.1.1" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-rpc-types-engine", - "futures-util", - "reth-beacon-consensus", - "reth-chainspec", - "reth-consensus", - "reth-engine-primitives", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-network-p2p", - "reth-network-peers", - "reth-optimism-consensus", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-tokio-util", - "reth-transaction-pool", - "reth-trie", - "revm-primitives", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" version = "1.1.1" @@ -7924,7 +7893,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chain-state", @@ -8043,7 +8011,6 @@ dependencies = [ "futures", "rand 0.8.5", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8266,7 +8233,6 @@ dependencies = [ "op-alloy-rpc-types-engine", "parking_lot", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", diff --git a/Cargo.toml b/Cargo.toml index 8ff8c1eb7..dc783f071 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/auto-seal/", "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", @@ -299,7 +298,6 @@ overflow-checks = true # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml deleted file mode 100644 index f2bfb43bc..000000000 --- a/crates/consensus/auto-seal/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "reth-auto-seal-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "A consensus impl for local testing purposes" - -[lints] -workspace = true - -[dependencies] -# reth -reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true -reth-primitives.workspace = true -reth-execution-errors.workspace = true -reth-execution-types.workspace = true -reth-network-p2p.workspace = true -reth-provider.workspace = true -reth-stages-api.workspace = true -reth-revm.workspace = true -reth-transaction-pool.workspace = true -reth-evm.workspace = true -reth-engine-primitives.workspace = true -reth-consensus.workspace = true -reth-network-peers.workspace = true -reth-tokio-util.workspace = true -reth-trie.workspace = true - -# ethereum -alloy-eips.workspace = true -alloy-primitives.workspace = true -revm-primitives.workspace = true -alloy-rpc-types-engine.workspace = true - -# optimism -reth-optimism-consensus = { workspace = true, optional = true } - -# async -futures-util.workspace = true -tokio = { workspace = true, features = ["sync", "time"] } -tokio-stream.workspace = true -tracing.workspace = true - -[features] -optimism = [ - "reth-provider/optimism", - "reth-optimism-consensus", - "reth-beacon-consensus/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus?/optimism", - "reth-primitives/optimism", - "revm-primitives/optimism" -] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs deleted file mode 100644 index 0083192d7..000000000 --- a/crates/consensus/auto-seal/src/client.rs +++ /dev/null @@ -1,132 +0,0 @@ -//! This includes download client implementations for auto sealing miners. - -use crate::Storage; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::B256; -use reth_network_p2p::{ - bodies::client::{BodiesClient, BodiesFut}, - download::DownloadClient, - headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, - priority::Priority, -}; -use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header}; -use std::fmt::Debug; -use tracing::{trace, warn}; - -/// A download client that polls the miner for transactions and assembles blocks to be returned in -/// the download process. -/// -/// When polled, the miner will assemble blocks when miners produce ready transactions and store the -/// blocks in memory. -#[derive(Debug, Clone)] -pub struct AutoSealClient { - storage: Storage, -} - -impl AutoSealClient { - pub(crate) const fn new(storage: Storage) -> Self { - Self { storage } - } - - async fn fetch_headers(&self, request: HeadersRequest) -> Vec
{ - trace!(target: "consensus::auto", ?request, "received headers request"); - - let storage = self.storage.read().await; - let HeadersRequest { start, limit, direction } = request; - let mut headers = Vec::new(); - - let mut block: BlockHashOrNumber = match start { - BlockHashOrNumber::Hash(start) => start.into(), - BlockHashOrNumber::Number(num) => { - if let Some(hash) = storage.block_hash(num) { - hash.into() - } else { - warn!(target: "consensus::auto", num, "no matching block found"); - return headers - } - } - }; - - for _ in 0..limit { - // fetch from storage - if let Some(header) = storage.header_by_hash_or_number(block) { - match direction { - HeadersDirection::Falling => block = header.parent_hash.into(), - HeadersDirection::Rising => { - let next = header.number + 1; - block = next.into() - } - } - headers.push(header); - } else { - break - } - } - - trace!(target: "consensus::auto", ?headers, "returning headers"); - - headers - } - - async fn fetch_bodies(&self, hashes: Vec) -> Vec { - trace!(target: "consensus::auto", ?hashes, "received bodies request"); - let storage = self.storage.read().await; - let mut bodies = Vec::new(); - for hash in hashes { - if let Some(body) = storage.bodies.get(&hash).cloned() { - bodies.push(body); - } else { - break - } - } - - trace!(target: "consensus::auto", ?bodies, "returning bodies"); - - bodies - } -} - -impl HeadersClient for AutoSealClient { - type Output = HeadersFut; - - fn get_headers_with_priority( - &self, - request: HeadersRequest, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let headers = this.fetch_headers(request).await; - Ok(WithPeerId::new(PeerId::random(), headers)) - }) - } -} - -impl BodiesClient for AutoSealClient { - type Output = BodiesFut; - - fn get_block_bodies_with_priority( - &self, - hashes: Vec, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let bodies = this.fetch_bodies(hashes).await; - Ok(WithPeerId::new(PeerId::random(), bodies)) - }) - } -} - -impl DownloadClient for AutoSealClient { - fn report_bad_message(&self, _peer_id: PeerId) { - warn!("Reported a bad message on a miner, we should never produce bad blocks"); - // noop - } - - fn num_connected_peers(&self) -> usize { - // no such thing as connected peers when we are mining ourselves - 1 - } -} diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs deleted file mode 100644 index a2f9fa4fa..000000000 --- a/crates/consensus/auto-seal/src/lib.rs +++ /dev/null @@ -1,690 +0,0 @@ -//! A [Consensus] implementation for local testing purposes -//! that automatically seals blocks. -//! -//! The Mining task polls a [`MiningMode`], and will return a list of transactions that are ready to -//! be mined. -//! -//! These downloaders poll the miner, assemble the block, and return transactions that are ready to -//! be mined. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use alloy_eips::{eip1898::BlockHashOrNumber, eip7685::Requests}; -use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - proofs, Block, BlockBody, BlockWithSenders, Header, SealedBlock, SealedHeader, - TransactionSigned, Withdrawals, -}; -use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::TransactionPool; -use reth_trie::HashedPostState; -use revm_primitives::calc_excess_blob_gas; -use std::{ - collections::HashMap, - fmt::Debug, - sync::Arc, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::sync::{mpsc::UnboundedSender, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tracing::trace; - -mod client; -mod mode; -mod task; - -pub use crate::client::AutoSealClient; -pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -pub use task::MiningTask; - -/// A consensus implementation intended for local development and testing purposes. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct AutoSealConsensus { - /// Configuration - chain_spec: Arc, -} - -impl AutoSealConsensus { - /// Create a new instance of [`AutoSealConsensus`] - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } - } -} - -impl Consensus for AutoSealConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_post_execution( - &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - Ok(()) - } -} - -/// Builder type for configuring the setup -#[derive(Debug)] -pub struct AutoSealBuilder { - client: Client, - consensus: AutoSealConsensus, - pool: Pool, - mode: MiningMode, - storage: Storage, - to_engine: UnboundedSender>, - evm_config: EvmConfig, -} - -// === impl AutoSealBuilder === - -impl - AutoSealBuilder -where - Client: BlockReaderIdExt, - Pool: TransactionPool, - Engine: EngineTypes, - ChainSpec: EthChainSpec, -{ - /// Creates a new builder instance to configure all parts. - pub fn new( - chain_spec: Arc, - client: Client, - pool: Pool, - to_engine: UnboundedSender>, - mode: MiningMode, - evm_config: EvmConfig, - ) -> Self { - let latest_header = client.latest_header().ok().flatten().unwrap_or_else(|| { - SealedHeader::new(chain_spec.genesis_header().clone(), chain_spec.genesis_hash()) - }); - - Self { - storage: Storage::new(latest_header), - client, - consensus: AutoSealConsensus::new(chain_spec), - pool, - mode, - to_engine, - evm_config, - } - } - - /// Sets the [`MiningMode`] it operates in, default is [`MiningMode::Auto`] - pub fn mode(mut self, mode: MiningMode) -> Self { - self.mode = mode; - self - } - - /// Consumes the type and returns all components - #[track_caller] - pub fn build( - self, - ) -> ( - AutoSealConsensus, - AutoSealClient, - MiningTask, - ) { - let Self { client, consensus, pool, mode, storage, to_engine, evm_config } = self; - let auto_client = AutoSealClient::new(storage.clone()); - let task = MiningTask::new( - Arc::clone(&consensus.chain_spec), - mode, - to_engine, - storage, - client, - pool, - evm_config, - ); - (consensus, auto_client, task) - } -} - -/// In memory storage -#[derive(Debug, Clone, Default)] -pub(crate) struct Storage { - inner: Arc>, -} - -// == impl Storage === - -impl Storage { - /// Initializes the [Storage] with the given best block. This should be initialized with the - /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { - let (header, best_hash) = best_block.split(); - let mut storage = StorageInner { - best_hash, - total_difficulty: header.difficulty, - best_block: header.number, - ..Default::default() - }; - storage.headers.insert(header.number, header); - storage.bodies.insert(best_hash, BlockBody::default()); - Self { inner: Arc::new(RwLock::new(storage)) } - } - - /// Returns the write lock of the storage - pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { - self.inner.write().await - } - - /// Returns the read lock of the storage - pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { - self.inner.read().await - } -} - -/// In-memory storage for the chain the auto seal engine is building. -#[derive(Default, Debug)] -pub(crate) struct StorageInner { - /// Headers buffered for download. - pub(crate) headers: HashMap, - /// A mapping between block hash and number. - pub(crate) hash_to_number: HashMap, - /// Bodies buffered for download. - pub(crate) bodies: HashMap, - /// Tracks best block - pub(crate) best_block: u64, - /// Tracks hash of best block - pub(crate) best_hash: B256, - /// The total difficulty of the chain until this block - pub(crate) total_difficulty: U256, -} - -// === impl StorageInner === - -impl StorageInner { - /// Returns the block hash for the given block number if it exists. - pub(crate) fn block_hash(&self, num: u64) -> Option { - self.hash_to_number.iter().find_map(|(k, v)| num.eq(v).then_some(*k)) - } - - /// Returns the matching header if it exists. - pub(crate) fn header_by_hash_or_number( - &self, - hash_or_num: BlockHashOrNumber, - ) -> Option
{ - let num = match hash_or_num { - BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, - BlockHashOrNumber::Number(num) => num, - }; - self.headers.get(&num).cloned() - } - - /// Inserts a new header+body pair - pub(crate) fn insert_new_block(&mut self, mut header: Header, body: BlockBody) { - header.number = self.best_block + 1; - header.parent_hash = self.best_hash; - - self.best_hash = header.hash_slow(); - self.best_block = header.number; - self.total_difficulty += header.difficulty; - - trace!(target: "consensus::auto", num=self.best_block, hash=?self.best_hash, "inserting new block"); - self.headers.insert(header.number, header); - self.bodies.insert(self.best_hash, body); - self.hash_to_number.insert(self.best_hash, self.best_block); - } - - /// Fills in pre-execution header fields based on the current best block and given - /// transactions. - pub(crate) fn build_header_template( - &self, - timestamp: u64, - transactions: &[TransactionSigned], - ommers: &[Header], - withdrawals: Option<&Withdrawals>, - requests: Option<&Requests>, - chain_spec: &ChainSpec, - ) -> Header - where - ChainSpec: EthChainSpec + EthereumHardforks, - { - // check previous block for base fee - let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { - parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) - }); - - let blob_gas_used = chain_spec.is_cancun_active_at_timestamp(timestamp).then(|| { - transactions - .iter() - .filter_map(|tx| tx.transaction.as_eip4844()) - .map(|blob_tx| blob_tx.blob_gas()) - .sum::() - }); - - let mut header = Header { - parent_hash: self.best_hash, - ommers_hash: proofs::calculate_ommers_root(ommers), - transactions_root: proofs::calculate_transaction_root(transactions), - withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), - difficulty: U256::from(2), - number: self.best_block + 1, - gas_limit: chain_spec.max_gas_limit(), - timestamp, - base_fee_per_gas, - blob_gas_used, - requests_hash: requests.map(|r| r.requests_hash()), - ..Default::default() - }; - - if chain_spec.is_cancun_active_at_timestamp(timestamp) { - let parent = self.headers.get(&self.best_block); - header.parent_beacon_block_root = - parent.and_then(|parent| parent.parent_beacon_block_root); - header.blob_gas_used = Some(0); - - let (parent_excess_blob_gas, parent_blob_gas_used) = match parent { - Some(parent) if chain_spec.is_cancun_active_at_timestamp(parent.timestamp) => ( - parent.excess_blob_gas.unwrap_or_default(), - parent.blob_gas_used.unwrap_or_default(), - ), - _ => (0, 0), - }; - header.excess_blob_gas = - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } - - header - } - - /// Builds and executes a new block with the given transactions, on the provided executor. - /// - /// This returns the header of the executed block, as well as the poststate from execution. - #[allow(clippy::too_many_arguments)] - pub(crate) fn build_and_execute( - &mut self, - transactions: Vec, - ommers: Vec
, - provider: &Provider, - chain_spec: Arc, - executor: &Executor, - ) -> Result<(SealedHeader, ExecutionOutcome), BlockExecutionError> - where - Executor: BlockExecutorProvider, - Provider: StateProviderFactory, - ChainSpec: EthChainSpec + EthereumHardforks, - { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - - // if shanghai is active, include empty withdrawals - let withdrawals = - chain_spec.is_shanghai_active_at_timestamp(timestamp).then_some(Withdrawals::default()); - // if prague is active, include empty requests - let requests = - chain_spec.is_prague_active_at_timestamp(timestamp).then_some(Requests::default()); - - let header = self.build_header_template( - timestamp, - &transactions, - &ommers, - withdrawals.as_ref(), - requests.as_ref(), - &chain_spec, - ); - - let block = Block { - header, - body: BlockBody { - transactions, - ommers: ommers.clone(), - withdrawals: withdrawals.clone(), - }, - } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; - - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - - let mut db = StateProviderDatabase::new( - provider.latest().map_err(InternalBlockExecutionError::LatestBlock)?, - ); - - // execute the block - let block_execution_output = - executor.executor(&mut db).execute((&block, U256::ZERO).into())?; - let gas_used = block_execution_output.gas_used; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - - // todo(onbjerg): we should not pass requests around as this is building a block, which - // means we need to extract the requests from the execution output and compute the requests - // root here - - let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body.transactions, ommers, withdrawals }; - - trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); - - // now we need to update certain header fields with the results of the execution - header.state_root = db.state_root(hashed_state)?; - header.gas_used = gas_used; - - let receipts = execution_outcome.receipts_by_block(header.number); - - // update logs bloom - let receipts_with_bloom = - receipts.iter().map(|r| r.as_ref().unwrap().bloom_slow()).collect::>(); - header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | *r); - - // update receipts root - header.receipts_root = { - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .generic_receipts_root_slow(header.number, |receipts| { - reth_optimism_consensus::calculate_receipt_root_no_memo_optimism( - receipts, - &chain_spec, - header.timestamp, - ) - }) - .expect("Receipts is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(header.number).expect("Receipts is present"); - - receipts_root - }; - trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); - - // finally insert into storage - self.insert_new_block(header.clone(), body); - - // set new header with hash that should have been updated by insert_new_block - let new_header = SealedHeader::new(header, self.best_hash); - - Ok((new_header, execution_outcome)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition}; - use reth_primitives::Transaction; - - #[test] - fn test_block_hash() { - let mut storage = StorageInner::default(); - - // Define two block hashes and their corresponding block numbers. - let block_hash_1: BlockHash = B256::random(); - let block_number_1: BlockNumber = 1; - let block_hash_2: BlockHash = B256::random(); - let block_number_2: BlockNumber = 2; - - // Insert the block number and hash pairs into the `hash_to_number` map. - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Verify that `block_hash` returns the correct block hash for the given block number. - assert_eq!(storage.block_hash(block_number_1), Some(block_hash_1)); - assert_eq!(storage.block_hash(block_number_2), Some(block_hash_2)); - - // Test that `block_hash` returns `None` for a non-existent block number. - let block_number_3: BlockNumber = 3; - assert_eq!(storage.block_hash(block_number_3), None); - } - - #[test] - fn test_header_by_hash_or_number() { - let mut storage = StorageInner::default(); - - // Define block numbers, headers, and hashes. - let block_number_1: u64 = 1; - let block_number_2: u64 = 2; - let header_1 = Header { number: block_number_1, ..Default::default() }; - let header_2 = Header { number: block_number_2, ..Default::default() }; - let block_hash_1: BlockHash = B256::random(); - let block_hash_2: BlockHash = B256::random(); - - // Insert headers and hash-to-number mappings. - storage.headers.insert(block_number_1, header_1.clone()); - storage.headers.insert(block_number_2, header_2.clone()); - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Test header retrieval by block number. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_1)), - Some(header_1.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_2)), - Some(header_2.clone()) - ); - - // Test header retrieval by block hash. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_1)), - Some(header_1) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_2)), - Some(header_2) - ); - - // Test non-existent block number and hash. - assert_eq!(storage.header_by_hash_or_number(BlockHashOrNumber::Number(999)), None); - let non_existent_hash: BlockHash = B256::random(); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(non_existent_hash)), - None - ); - } - - #[test] - fn test_insert_new_block() { - let mut storage = StorageInner::default(); - - // Define headers and block bodies. - let header_1 = Header { difficulty: U256::from(100), ..Default::default() }; - let body_1 = BlockBody::default(); - let header_2 = Header { difficulty: U256::from(200), ..Default::default() }; - let body_2 = BlockBody::default(); - - // Insert the first block. - storage.insert_new_block(header_1.clone(), body_1.clone()); - let best_block_1 = storage.best_block; - let best_hash_1 = storage.best_hash; - - // Verify the block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_1), - Some(&Header { number: 1, ..header_1.clone() }) - ); - assert_eq!(storage.bodies.get(&best_hash_1), Some(&body_1)); - assert_eq!(storage.hash_to_number.get(&best_hash_1), Some(&best_block_1)); - - // Insert the second block. - storage.insert_new_block(header_2.clone(), body_2.clone()); - let best_block_2 = storage.best_block; - let best_hash_2 = storage.best_hash; - - // Verify the second block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_2), - Some(&Header { - number: 2, - parent_hash: Header { number: 1, ..header_1 }.hash_slow(), - ..header_2 - }) - ); - assert_eq!(storage.bodies.get(&best_hash_2), Some(&body_2)); - assert_eq!(storage.hash_to_number.get(&best_hash_2), Some(&best_block_2)); - - // Check that the total difficulty was updated. - assert_eq!(storage.total_difficulty, header_1.difficulty + header_2.difficulty); - } - - #[test] - fn test_build_basic_header_template() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - - let best_block_number = 1; - let best_block_hash = B256::random(); - let timestamp = 1_600_000_000; - - // Set up best block information - storage.best_block = best_block_number; - storage.best_hash = best_block_hash; - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify basic fields - assert_eq!(header.parent_hash, best_block_hash); - assert_eq!(header.number, best_block_number + 1); - assert_eq!(header.timestamp, timestamp); - assert_eq!(header.gas_limit, chain_spec.max_gas_limit); - } - - #[test] - fn test_ommers_and_transactions_roots() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Setup ommers and transactions - let ommers = vec![Header::default()]; - let transactions = vec![TransactionSigned::default()]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &ommers, - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify ommers and transactions roots - assert_eq!(header.ommers_hash, proofs::calculate_ommers_root(&ommers)); - assert_eq!(header.transactions_root, proofs::calculate_transaction_root(&transactions)); - } - - // Test base fee calculation from the parent block - #[test] - fn test_base_fee_calculation() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Set up the parent header with base fee - let base_fee = Some(100); - let parent_header = Header { base_fee_per_gas: base_fee, ..Default::default() }; - storage.headers.insert(storage.best_block, parent_header); - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify base fee is correctly propagated - assert_eq!(header.base_fee_per_gas, base_fee); - } - - // Test blob gas and excess blob gas calculation when Cancun is active - #[test] - fn test_blob_gas_calculation_cancun() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec { - hardforks: ChainHardforks::new(vec![( - EthereumHardfork::Cancun.boxed(), - ForkCondition::Timestamp(25), - )]), - ..Default::default() - }; - let timestamp = 26; - - // Set up a transaction with blob gas - let blob_tx = TransactionSigned { - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }; - let transactions = vec![blob_tx]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify that the header has the correct fields including blob gas - assert_eq!( - header, - Header { - parent_hash: B256::ZERO, - ommers_hash: proofs::calculate_ommers_root(&[]), - transactions_root: proofs::calculate_transaction_root(&transactions), - withdrawals_root: None, - difficulty: U256::from(2), - number: 1, - gas_limit: chain_spec.max_gas_limit, - timestamp, - base_fee_per_gas: None, - blob_gas_used: Some(0), - requests_hash: None, - excess_blob_gas: Some(0), - ..Default::default() - } - ); - } -} diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs deleted file mode 100644 index 82750c8e4..000000000 --- a/crates/consensus/auto-seal/src/mode.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! The mode the auto seal miner is operating in. - -use alloy_primitives::TxHash; -use futures_util::{stream::Fuse, StreamExt}; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - fmt, - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::Duration, -}; -use tokio::{sync::mpsc::Receiver, time::Interval}; -use tokio_stream::{wrappers::ReceiverStream, Stream}; - -/// Mode of operations for the `Miner` -#[derive(Debug)] -pub enum MiningMode { - /// A miner that does nothing - None, - /// A miner that listens for new transactions that are ready. - /// - /// Either one transaction will be mined per block, or any number of transactions will be - /// allowed - Auto(ReadyTransactionMiner), - /// A miner that constructs a new block every `interval` tick - FixedBlockTime(FixedBlockTimeMiner), -} - -// === impl MiningMode === - -impl MiningMode { - /// Creates a new instant mining mode that listens for new transactions and tries to build - /// non-empty blocks as soon as transactions arrive. - pub fn instant(max_transactions: usize, listener: Receiver) -> Self { - Self::Auto(ReadyTransactionMiner { - max_transactions, - has_pending_txs: None, - rx: ReceiverStream::new(listener).fuse(), - }) - } - - /// Creates a new interval miner that builds a block ever `duration`. - pub fn interval(duration: Duration) -> Self { - Self::FixedBlockTime(FixedBlockTimeMiner::new(duration)) - } - - /// polls the Pool and returns those transactions that should be put in a block, if any. - pub(crate) fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - match self { - Self::None => Poll::Pending, - Self::Auto(miner) => miner.poll(pool, cx), - Self::FixedBlockTime(miner) => miner.poll(pool, cx), - } - } -} - -impl fmt::Display for MiningMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let kind = match self { - Self::None => "None", - Self::Auto(_) => "Auto", - Self::FixedBlockTime(_) => "FixedBlockTime", - }; - write!(f, "{kind}") - } -} - -/// A miner that's supposed to create a new block every `interval`, mining all transactions that are -/// ready at that time. -/// -/// The default blocktime is set to 6 seconds -#[derive(Debug)] -pub struct FixedBlockTimeMiner { - /// The interval this fixed block time miner operates with - interval: Interval, -} - -// === impl FixedBlockTimeMiner === - -impl FixedBlockTimeMiner { - /// Creates a new instance with an interval of `duration` - pub(crate) fn new(duration: Duration) -> Self { - let start = tokio::time::Instant::now() + duration; - Self { interval: tokio::time::interval_at(start, duration) } - } - - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - if self.interval.poll_tick(cx).is_ready() { - // drain the pool - return Poll::Ready(pool.best_transactions().collect()) - } - Poll::Pending - } -} - -impl Default for FixedBlockTimeMiner { - fn default() -> Self { - Self::new(Duration::from_secs(6)) - } -} - -/// A miner that Listens for new ready transactions -pub struct ReadyTransactionMiner { - /// how many transactions to mine per block - max_transactions: usize, - /// stores whether there are pending transactions (if known) - has_pending_txs: Option, - /// Receives hashes of transactions that are ready - rx: Fuse>, -} - -// === impl ReadyTransactionMiner === - -impl ReadyTransactionMiner { - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - // drain the notification stream - while let Poll::Ready(Some(_hash)) = Pin::new(&mut self.rx).poll_next(cx) { - self.has_pending_txs = Some(true); - } - - if self.has_pending_txs == Some(false) { - return Poll::Pending - } - - let transactions = pool.best_transactions().take(self.max_transactions).collect::>(); - - // there are pending transactions if we didn't drain the pool - self.has_pending_txs = Some(transactions.len() >= self.max_transactions); - - if transactions.is_empty() { - return Poll::Pending - } - - Poll::Ready(transactions) - } -} - -impl fmt::Debug for ReadyTransactionMiner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReadyTransactionMiner") - .field("max_transactions", &self.max_transactions) - .finish_non_exhaustive() - } -} diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs deleted file mode 100644 index 75ddda908..000000000 --- a/crates/consensus/auto-seal/src/task.rs +++ /dev/null @@ -1,221 +0,0 @@ -use crate::{mode::MiningMode, Storage}; -use alloy_rpc_types_engine::ForkchoiceState; -use futures_util::{future::BoxFuture, FutureExt}; -use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; -use reth_evm::execute::BlockExecutorProvider; -use reth_provider::{CanonChainTracker, StateProviderFactory}; -use reth_stages_api::PipelineEvent; -use reth_tokio_util::EventStream; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - collections::VecDeque, - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tracing::{debug, error, warn}; - -/// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { - /// The configured chain spec - chain_spec: Arc, - /// The client used to interact with the state - client: Client, - /// The active miner - miner: MiningMode, - /// Single active future that inserts a new block into `storage` - insert_task: Option>>>, - /// Shared storage to insert new blocks - storage: Storage, - /// Pool where transactions are stored - pool: Pool, - /// backlog of sets of transactions ready to be mined - queued: VecDeque::Transaction>>>>, - // TODO: ideally this would just be a sender of hashes - to_engine: UnboundedSender>, - /// The pipeline events to listen on - pipe_line_events: Option>, - /// The type used for block execution - block_executor: Executor, -} - -// === impl MiningTask === - -impl - MiningTask -{ - /// Creates a new instance of the task - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - chain_spec: Arc, - miner: MiningMode, - to_engine: UnboundedSender>, - storage: Storage, - client: Client, - pool: Pool, - block_executor: Executor, - ) -> Self { - Self { - chain_spec, - client, - miner, - insert_task: None, - storage, - pool, - to_engine, - queued: Default::default(), - pipe_line_events: None, - block_executor, - } - } - - /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: EventStream) { - self.pipe_line_events = Some(events); - } -} - -impl Future - for MiningTask -where - Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, - Engine: EngineTypes, - Executor: BlockExecutorProvider, - ChainSpec: EthChainSpec + EthereumHardforks + 'static, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // this drives block production and - loop { - if let Poll::Ready(transactions) = this.miner.poll(&this.pool, cx) { - // miner returned a set of transaction that we feed to the producer - this.queued.push_back(transactions); - } - - if this.insert_task.is_none() { - if this.queued.is_empty() { - // nothing to insert - break - } - - // ready to queue in new insert task - let storage = this.storage.clone(); - let transactions = this.queued.pop_front().expect("not empty"); - - let to_engine = this.to_engine.clone(); - let client = this.client.clone(); - let chain_spec = Arc::clone(&this.chain_spec); - let events = this.pipe_line_events.take(); - let executor = this.block_executor.clone(); - - // Create the mining future that creates a block, notifies the engine that drives - // the pipeline - this.insert_task = Some(Box::pin(async move { - let mut storage = storage.write().await; - - let transactions: Vec<_> = transactions - .into_iter() - .map(|tx| { - let recovered = tx.to_recovered_transaction(); - recovered.into_signed() - }) - .collect(); - let ommers = vec![]; - - match storage.build_and_execute( - transactions.clone(), - ommers.clone(), - &client, - chain_spec, - &executor, - ) { - Ok((new_header, _bundle_state)) => { - let state = ForkchoiceState { - head_block_hash: new_header.hash(), - finalized_block_hash: new_header.hash(), - safe_block_hash: new_header.hash(), - }; - drop(storage); - - // TODO: make this a future - // await the fcu call rx for SYNCING, then wait for a VALID response - loop { - // send the new update to the engine, this will trigger the engine - // to download and execute the block we just inserted - let (tx, rx) = oneshot::channel(); - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs: None, - tx, - version: EngineApiMessageVersion::default(), - }); - debug!(target: "consensus::auto", ?state, "Sent fork choice update"); - - match rx.await.unwrap() { - Ok(fcu_response) => { - match fcu_response.forkchoice_status() { - ForkchoiceStatus::Valid => break, - ForkchoiceStatus::Invalid => { - error!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned invalid response"); - return None - } - ForkchoiceStatus::Syncing => { - debug!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); - // wait for the next fork choice update - continue - } - } - } - Err(err) => { - error!(target: "consensus::auto", %err, "Autoseal fork choice update failed"); - return None - } - } - } - - // update canon chain for rpc - client.set_canonical_head(new_header.clone()); - client.set_safe(new_header.clone()); - client.set_finalized(new_header.clone()); - } - Err(err) => { - warn!(target: "consensus::auto", %err, "failed to execute block") - } - } - - events - })); - } - - if let Some(mut fut) = this.insert_task.take() { - match fut.poll_unpin(cx) { - Poll::Ready(events) => { - this.pipe_line_events = events; - } - Poll::Pending => { - this.insert_task = Some(fut); - break - } - } - } - } - - Poll::Pending - } -} - -impl - std::fmt::Debug for MiningTask -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MiningTask").finish_non_exhaustive() - } -} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 49663ffc2..69bbeeb5b 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -24,7 +24,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 17a952a58..95542411d 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,7 +2,6 @@ use std::sync::Arc; -use reth_auto_seal_consensus::AutoSealConsensus; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -335,11 +334,7 @@ where type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) } } diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index f0fcaf645..b6d0ffcfa 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,12 +1,10 @@ use std::sync::Arc; -use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_e2e_test_utils::setup; use reth_node_api::FullNodeComponents; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, @@ -17,16 +15,6 @@ use reth_tasks::TaskManager; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = - setup::(1, custom_chain(), true, eth_payload_attributes).await?; - - assert_chain_advances(nodes.pop().unwrap().inner).await; - Ok(()) -} - -#[tokio::test] -async fn can_run_dev_node_new_engine() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 86f755cb9..4ef2b0728 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] ## reth -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index ac2339fa6..856f86c6f 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -5,7 +5,6 @@ use std::{sync::Arc, thread::available_parallelism}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, @@ -16,6 +15,7 @@ use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; @@ -52,8 +52,9 @@ use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTar use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; +use reth_transaction_pool::TransactionPool; use tokio::sync::{ - mpsc::{unbounded_channel, Receiver, UnboundedSender}, + mpsc::{unbounded_channel, UnboundedSender}, oneshot, watch, }; @@ -386,13 +387,11 @@ impl LaunchContextWith) -> MiningMode { + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { if let Some(interval) = self.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) } else { - MiningMode::instant(1, pending_transactions_listener) + MiningMode::instant(pool) } } } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 85401b8b9..65433176b 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -8,7 +8,7 @@ use reth_beacon_consensus::{ use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; -use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder, MiningMode}; +use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -208,11 +208,6 @@ where info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); let mut engine_service = if ctx.is_dev() { - let mining_mode = if let Some(block_time) = ctx.node_config().dev.block_time { - MiningMode::interval(block_time) - } else { - MiningMode::instant(ctx.components().pool().clone()) - }; let eth_service = LocalEngineService::new( ctx.consensus(), ctx.components().block_executor().clone(), @@ -225,7 +220,7 @@ where ctx.sync_metrics_tx(), consensus_engine_tx.clone(), Box::pin(consensus_engine_stream), - mining_mode, + ctx.dev_mining_mode(ctx.components().pool()), LocalPayloadAttributesBuilder::new(ctx.chain_spec()), ); diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a623a7a9f..4f9e850c9 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,7 +11,6 @@ pub use exex::ExExLauncher; use std::{future::Future, sync::Arc}; -use alloy_primitives::utils::format_ether; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -33,7 +32,6 @@ use reth_provider::providers::BlockchainProvider; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -210,47 +208,7 @@ where let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let (pipeline, client) = if ctx.is_dev() { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in ctx.chain_spec().genesis().alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let mining_mode = - ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); - info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - ctx.chain_spec(), - ctx.blockchain_db().clone(), - ctx.components().pool().clone(), - consensus_engine_tx.clone(), - mining_mode, - ctx.components().block_executor().clone(), - ) - .build(); - - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - client.clone(), - ctx.consensus(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - ctx.task_executor().spawn(Box::pin(task)); - - (pipeline, Either::Left(client)) + eyre::bail!("Dev mode is not supported for legacy engine") } else { let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, @@ -266,7 +224,7 @@ where pipeline_exex_handle, )?; - (pipeline, Either::Right(network_client.clone())) + (pipeline, network_client.clone()) }; let pipeline_events = pipeline.events(); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index deabbac52..7674cbd37 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -16,7 +16,6 @@ reth-chainspec.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true -reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -76,7 +75,6 @@ optimism = [ "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", "revm/optimism", - "reth-auto-seal-consensus/optimism", "reth-optimism-rpc/optimism", "reth-engine-local/optimism", "reth-optimism-consensus/optimism", diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index e39fdfc27..a09dfbaa5 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -467,11 +467,7 @@ where type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 6ed91e796..f78abe961 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -82,7 +82,6 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ Different consensus mechanisms. - [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation) -- [`consensus/auto-seal`](../../crates/consensus/auto-seal): A consensus mechanism that auto-seals blocks for local development (also commonly known as "auto-mine") - [`consensus/beacon`](../../crates/consensus/beacon): Consensus mechanism that handles messages from a beacon node ("eth2") ### Execution