mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat: add Consensus to ExecutionStage (#14447)
This commit is contained in:
4
Cargo.lock
generated
4
Cargo.lock
generated
@ -2757,6 +2757,7 @@ dependencies = [
|
|||||||
"reth-chainspec",
|
"reth-chainspec",
|
||||||
"reth-db",
|
"reth-db",
|
||||||
"reth-db-api",
|
"reth-db-api",
|
||||||
|
"reth-ethereum-consensus",
|
||||||
"reth-evm-ethereum",
|
"reth-evm-ethereum",
|
||||||
"reth-primitives",
|
"reth-primitives",
|
||||||
"reth-provider",
|
"reth-provider",
|
||||||
@ -6766,7 +6767,6 @@ dependencies = [
|
|||||||
"reth-ecies",
|
"reth-ecies",
|
||||||
"reth-eth-wire",
|
"reth-eth-wire",
|
||||||
"reth-ethereum-cli",
|
"reth-ethereum-cli",
|
||||||
"reth-ethereum-consensus",
|
|
||||||
"reth-evm",
|
"reth-evm",
|
||||||
"reth-exex",
|
"reth-exex",
|
||||||
"reth-fs-util",
|
"reth-fs-util",
|
||||||
@ -8406,6 +8406,7 @@ dependencies = [
|
|||||||
"reth-node-events",
|
"reth-node-events",
|
||||||
"reth-node-metrics",
|
"reth-node-metrics",
|
||||||
"reth-optimism-chainspec",
|
"reth-optimism-chainspec",
|
||||||
|
"reth-optimism-consensus",
|
||||||
"reth-optimism-evm",
|
"reth-optimism-evm",
|
||||||
"reth-optimism-node",
|
"reth-optimism-node",
|
||||||
"reth-optimism-primitives",
|
"reth-optimism-primitives",
|
||||||
@ -9298,6 +9299,7 @@ dependencies = [
|
|||||||
"reth-db",
|
"reth-db",
|
||||||
"reth-db-api",
|
"reth-db-api",
|
||||||
"reth-downloaders",
|
"reth-downloaders",
|
||||||
|
"reth-ethereum-consensus",
|
||||||
"reth-etl",
|
"reth-etl",
|
||||||
"reth-evm",
|
"reth-evm",
|
||||||
"reth-evm-ethereum",
|
"reth-evm-ethereum",
|
||||||
|
|||||||
@ -18,7 +18,7 @@ use reth_db::DatabaseEnv;
|
|||||||
use reth_ethereum_cli::chainspec::EthereumChainSpecParser;
|
use reth_ethereum_cli::chainspec::EthereumChainSpecParser;
|
||||||
use reth_network::EthNetworkPrimitives;
|
use reth_network::EthNetworkPrimitives;
|
||||||
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
||||||
use reth_node_ethereum::{EthExecutorProvider, EthereumNode};
|
use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider, EthereumNode};
|
||||||
use reth_node_metrics::recorder::install_prometheus_recorder;
|
use reth_node_metrics::recorder::install_prometheus_recorder;
|
||||||
use reth_tracing::FileWorkerGuard;
|
use reth_tracing::FileWorkerGuard;
|
||||||
use std::{ffi::OsString, fmt, future::Future, sync::Arc};
|
use std::{ffi::OsString, fmt, future::Future, sync::Arc};
|
||||||
@ -152,6 +152,9 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>, Ext: clap::Args + fmt::Debug> Cl
|
|||||||
let _ = install_prometheus_recorder();
|
let _ = install_prometheus_recorder();
|
||||||
|
|
||||||
let runner = CliRunner::default();
|
let runner = CliRunner::default();
|
||||||
|
let components = |spec: Arc<C::ChainSpec>| {
|
||||||
|
(EthExecutorProvider::ethereum(spec.clone()), EthBeaconConsensus::new(spec))
|
||||||
|
};
|
||||||
match self.command {
|
match self.command {
|
||||||
Commands::Node(command) => {
|
Commands::Node(command) => {
|
||||||
runner.run_command_until_exit(|ctx| command.execute(ctx, launcher))
|
runner.run_command_until_exit(|ctx| command.execute(ctx, launcher))
|
||||||
@ -162,18 +165,15 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>, Ext: clap::Args + fmt::Debug> Cl
|
|||||||
Commands::InitState(command) => {
|
Commands::InitState(command) => {
|
||||||
runner.run_blocking_until_ctrl_c(command.execute::<EthereumNode>())
|
runner.run_blocking_until_ctrl_c(command.execute::<EthereumNode>())
|
||||||
}
|
}
|
||||||
Commands::Import(command) => runner.run_blocking_until_ctrl_c(
|
Commands::Import(command) => {
|
||||||
command.execute::<EthereumNode, _, _>(EthExecutorProvider::ethereum),
|
runner.run_blocking_until_ctrl_c(command.execute::<EthereumNode, _, _>(components))
|
||||||
),
|
}
|
||||||
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
||||||
Commands::Db(command) => {
|
Commands::Db(command) => {
|
||||||
runner.run_blocking_until_ctrl_c(command.execute::<EthereumNode>())
|
runner.run_blocking_until_ctrl_c(command.execute::<EthereumNode>())
|
||||||
}
|
}
|
||||||
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
|
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
|
||||||
command.execute::<EthereumNode, _, _, EthNetworkPrimitives>(
|
command.execute::<EthereumNode, _, _, EthNetworkPrimitives>(ctx, components)
|
||||||
ctx,
|
|
||||||
EthExecutorProvider::ethereum,
|
|
||||||
)
|
|
||||||
}),
|
}),
|
||||||
Commands::P2P(command) => {
|
Commands::P2P(command) => {
|
||||||
runner.run_until_ctrl_c(command.execute::<EthNetworkPrimitives>())
|
runner.run_until_ctrl_c(command.execute::<EthNetworkPrimitives>())
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
//! Command for debugging execution.
|
//! Command for debugging execution.
|
||||||
|
|
||||||
use crate::{api::BlockTy, args::NetworkArgs, utils::get_single_header};
|
use crate::{args::NetworkArgs, utils::get_single_header};
|
||||||
use alloy_eips::BlockHashOrNumber;
|
use alloy_eips::BlockHashOrNumber;
|
||||||
use alloy_primitives::{BlockNumber, B256};
|
use alloy_primitives::{BlockNumber, B256};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
@ -11,7 +11,7 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ
|
|||||||
use reth_cli_runner::CliContext;
|
use reth_cli_runner::CliContext;
|
||||||
use reth_cli_util::get_secret_key;
|
use reth_cli_util::get_secret_key;
|
||||||
use reth_config::Config;
|
use reth_config::Config;
|
||||||
use reth_consensus::Consensus;
|
use reth_consensus::FullConsensus;
|
||||||
use reth_db::DatabaseEnv;
|
use reth_db::DatabaseEnv;
|
||||||
use reth_downloaders::{
|
use reth_downloaders::{
|
||||||
bodies::bodies::BodiesDownloaderBuilder,
|
bodies::bodies::BodiesDownloaderBuilder,
|
||||||
@ -64,7 +64,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
&self,
|
&self,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
client: Client,
|
client: Client,
|
||||||
consensus: Arc<dyn Consensus<BlockTy<N>, Error = ConsensusError>>,
|
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
|
||||||
provider_factory: ProviderFactory<N>,
|
provider_factory: ProviderFactory<N>,
|
||||||
task_executor: &TaskExecutor,
|
task_executor: &TaskExecutor,
|
||||||
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
|
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
|
||||||
@ -79,7 +79,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
.into_task_with(task_executor);
|
.into_task_with(task_executor);
|
||||||
|
|
||||||
let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
|
let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
|
||||||
.build(client, Arc::clone(&consensus), provider_factory.clone())
|
.build(client, consensus.clone().as_consensus(), provider_factory.clone())
|
||||||
.into_task_with(task_executor);
|
.into_task_with(task_executor);
|
||||||
|
|
||||||
let stage_conf = &config.stages;
|
let stage_conf = &config.stages;
|
||||||
@ -94,7 +94,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
DefaultStages::new(
|
DefaultStages::new(
|
||||||
provider_factory.clone(),
|
provider_factory.clone(),
|
||||||
tip_rx,
|
tip_rx,
|
||||||
Arc::clone(&consensus),
|
consensus.clone(),
|
||||||
header_downloader,
|
header_downloader,
|
||||||
body_downloader,
|
body_downloader,
|
||||||
executor.clone(),
|
executor.clone(),
|
||||||
@ -103,6 +103,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
)
|
)
|
||||||
.set(ExecutionStage::new(
|
.set(ExecutionStage::new(
|
||||||
executor,
|
executor,
|
||||||
|
consensus.clone(),
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: None,
|
max_blocks: None,
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
@ -171,7 +172,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
let Environment { provider_factory, config, data_dir } =
|
let Environment { provider_factory, config, data_dir } =
|
||||||
self.env.init::<N>(AccessRights::RW)?;
|
self.env.init::<N>(AccessRights::RW)?;
|
||||||
|
|
||||||
let consensus: Arc<dyn Consensus<BlockTy<N>, Error = ConsensusError>> =
|
let consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>> =
|
||||||
Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec()));
|
Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec()));
|
||||||
|
|
||||||
// Configure and build network
|
// Configure and build network
|
||||||
@ -195,7 +196,7 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
let mut pipeline = self.build_pipeline(
|
let mut pipeline = self.build_pipeline(
|
||||||
&config,
|
&config,
|
||||||
fetch_client.clone(),
|
fetch_client.clone(),
|
||||||
Arc::clone(&consensus),
|
consensus.clone(),
|
||||||
provider_factory.clone(),
|
provider_factory.clone(),
|
||||||
&ctx.task_executor,
|
&ctx.task_executor,
|
||||||
static_file_producer,
|
static_file_producer,
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
//! Command for debugging merkle tree calculation.
|
//! Command for debugging merkle tree calculation.
|
||||||
use crate::{args::NetworkArgs, utils::get_single_header};
|
use crate::{args::NetworkArgs, providers::ExecutionOutcome, utils::get_single_header};
|
||||||
use alloy_consensus::BlockHeader;
|
use alloy_consensus::BlockHeader;
|
||||||
use alloy_eips::BlockHashOrNumber;
|
use alloy_eips::BlockHashOrNumber;
|
||||||
use backon::{ConstantBuilder, Retryable};
|
use backon::{ConstantBuilder, Retryable};
|
||||||
@ -14,7 +14,7 @@ use reth_consensus::{Consensus, ConsensusError};
|
|||||||
use reth_db::tables;
|
use reth_db::tables;
|
||||||
use reth_db_api::{cursor::DbCursorRO, transaction::DbTx};
|
use reth_db_api::{cursor::DbCursorRO, transaction::DbTx};
|
||||||
use reth_ethereum_primitives::EthPrimitives;
|
use reth_ethereum_primitives::EthPrimitives;
|
||||||
use reth_evm::execute::{BatchExecutor, BlockExecutorProvider};
|
use reth_evm::execute::{BlockExecutorProvider, Executor};
|
||||||
use reth_network::{BlockDownloaderProvider, NetworkHandle};
|
use reth_network::{BlockDownloaderProvider, NetworkHandle};
|
||||||
use reth_network_api::NetworkInfo;
|
use reth_network_api::NetworkInfo;
|
||||||
use reth_network_p2p::full_block::FullBlockClient;
|
use reth_network_p2p::full_block::FullBlockClient;
|
||||||
@ -161,14 +161,12 @@ impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
|
|||||||
provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?;
|
provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?;
|
||||||
|
|
||||||
td += sealed_block.difficulty();
|
td += sealed_block.difficulty();
|
||||||
let mut executor = executor_provider.batch_executor(StateProviderDatabase::new(
|
let executor = executor_provider
|
||||||
LatestStateProviderRef::new(&provider_rw),
|
.executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider_rw)));
|
||||||
));
|
let output = executor.execute(&sealed_block)?;
|
||||||
executor.execute_and_verify_one(&sealed_block)?;
|
|
||||||
let execution_outcome = executor.finalize();
|
|
||||||
|
|
||||||
provider_rw.write_state(
|
provider_rw.write_state(
|
||||||
&execution_outcome,
|
&ExecutionOutcome::single(block_number, output),
|
||||||
OriginalValuesKnown::Yes,
|
OriginalValuesKnown::Yes,
|
||||||
StorageLocation::Database,
|
StorageLocation::Database,
|
||||||
)?;
|
)?;
|
||||||
|
|||||||
@ -11,7 +11,6 @@ repository.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# reth
|
# reth
|
||||||
reth-ethereum-consensus.workspace = true
|
|
||||||
reth-chainspec.workspace = true
|
reth-chainspec.workspace = true
|
||||||
reth-cli.workspace = true
|
reth-cli.workspace = true
|
||||||
reth-ethereum-cli.workspace = true
|
reth-ethereum-cli.workspace = true
|
||||||
|
|||||||
@ -5,11 +5,11 @@ use clap::Parser;
|
|||||||
use reth_chainspec::EthChainSpec;
|
use reth_chainspec::EthChainSpec;
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
use reth_config::{config::EtlConfig, Config};
|
use reth_config::{config::EtlConfig, Config};
|
||||||
use reth_consensus::noop::NoopConsensus;
|
use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus};
|
||||||
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
|
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
|
||||||
use reth_db_common::init::init_genesis;
|
use reth_db_common::init::init_genesis;
|
||||||
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
|
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
|
||||||
use reth_evm::noop::NoopBlockExecutorProvider;
|
use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider};
|
||||||
use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine};
|
use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine};
|
||||||
use reth_node_core::{
|
use reth_node_core::{
|
||||||
args::{DatabaseArgs, DatadirArgs},
|
args::{DatabaseArgs, DatadirArgs},
|
||||||
@ -199,3 +199,33 @@ impl AccessRights {
|
|||||||
/// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI.
|
/// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI.
|
||||||
pub trait CliNodeTypes: NodeTypesWithEngine + NodeTypesForProvider {}
|
pub trait CliNodeTypes: NodeTypesWithEngine + NodeTypesForProvider {}
|
||||||
impl<N> CliNodeTypes for N where N: NodeTypesWithEngine + NodeTypesForProvider {}
|
impl<N> CliNodeTypes for N where N: NodeTypesWithEngine + NodeTypesForProvider {}
|
||||||
|
|
||||||
|
/// Helper trait aggregating components required for the CLI.
|
||||||
|
pub trait CliNodeComponents<N: CliNodeTypes> {
|
||||||
|
/// Block executor.
|
||||||
|
type Executor: BlockExecutorProvider<Primitives = N::Primitives>;
|
||||||
|
/// Consensus implementation.
|
||||||
|
type Consensus: FullConsensus<N::Primitives, Error = ConsensusError> + Clone + 'static;
|
||||||
|
|
||||||
|
/// Returns the block executor.
|
||||||
|
fn executor(&self) -> &Self::Executor;
|
||||||
|
/// Returns the consensus implementation.
|
||||||
|
fn consensus(&self) -> &Self::Consensus;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: CliNodeTypes, E, C> CliNodeComponents<N> for (E, C)
|
||||||
|
where
|
||||||
|
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
||||||
|
C: FullConsensus<N::Primitives, Error = ConsensusError> + Clone + 'static,
|
||||||
|
{
|
||||||
|
type Executor = E;
|
||||||
|
type Consensus = C;
|
||||||
|
|
||||||
|
fn executor(&self) -> &Self::Executor {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn consensus(&self) -> &Self::Consensus {
|
||||||
|
&self.1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
//! Command that initializes the node by importing a chain from a file.
|
//! Command that initializes the node by importing a chain from a file.
|
||||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
|
||||||
use alloy_primitives::B256;
|
use alloy_primitives::B256;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
use reth_config::Config;
|
use reth_config::Config;
|
||||||
use reth_consensus::{Consensus, ConsensusError};
|
use reth_consensus::{ConsensusError, FullConsensus};
|
||||||
use reth_db::tables;
|
use reth_db::tables;
|
||||||
use reth_db_api::transaction::DbTx;
|
use reth_db_api::transaction::DbTx;
|
||||||
use reth_downloaders::{
|
use reth_downloaders::{
|
||||||
@ -14,7 +14,6 @@ use reth_downloaders::{
|
|||||||
file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
|
file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
|
||||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||||
};
|
};
|
||||||
use reth_ethereum_consensus::EthBeaconConsensus;
|
|
||||||
use reth_evm::execute::BlockExecutorProvider;
|
use reth_evm::execute::BlockExecutorProvider;
|
||||||
use reth_network_p2p::{
|
use reth_network_p2p::{
|
||||||
bodies::downloader::BodyDownloader,
|
bodies::downloader::BodyDownloader,
|
||||||
@ -58,11 +57,11 @@ pub struct ImportCommand<C: ChainSpecParser> {
|
|||||||
|
|
||||||
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ImportCommand<C> {
|
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ImportCommand<C> {
|
||||||
/// Execute `import` command
|
/// Execute `import` command
|
||||||
pub async fn execute<N, E, F>(self, executor: F) -> eyre::Result<()>
|
pub async fn execute<N, Comp, F>(self, components: F) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
Comp: CliNodeComponents<N>,
|
||||||
F: FnOnce(Arc<N::ChainSpec>) -> E,
|
F: FnOnce(Arc<N::ChainSpec>) -> Comp,
|
||||||
{
|
{
|
||||||
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
|
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
|
||||||
|
|
||||||
@ -77,8 +76,9 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ImportComm
|
|||||||
|
|
||||||
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
|
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||||
|
|
||||||
let executor = executor(provider_factory.chain_spec());
|
let components = components(provider_factory.chain_spec());
|
||||||
let consensus = Arc::new(EthBeaconConsensus::new(self.env.chain.clone()));
|
let executor = components.executor().clone();
|
||||||
|
let consensus = Arc::new(components.consensus().clone());
|
||||||
info!(target: "reth::cli", "Consensus engine initialized");
|
info!(target: "reth::cli", "Consensus engine initialized");
|
||||||
|
|
||||||
// open file
|
// open file
|
||||||
@ -179,7 +179,7 @@ pub fn build_import_pipeline<N, C, E>(
|
|||||||
) -> eyre::Result<(Pipeline<N>, impl Stream<Item = NodeEvent<N::Primitives>>)>
|
) -> eyre::Result<(Pipeline<N>, impl Stream<Item = NodeEvent<N::Primitives>>)>
|
||||||
where
|
where
|
||||||
N: ProviderNodeTypes + CliNodeTypes,
|
N: ProviderNodeTypes + CliNodeTypes,
|
||||||
C: Consensus<BlockTy<N>, Error = ConsensusError> + 'static,
|
C: FullConsensus<N::Primitives, Error = ConsensusError> + 'static,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
||||||
{
|
{
|
||||||
if !file_client.has_canonical_blocks() {
|
if !file_client.has_canonical_blocks() {
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::setup;
|
use super::setup;
|
||||||
|
use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus};
|
||||||
use reth_db::{tables, DatabaseEnv};
|
use reth_db::{tables, DatabaseEnv};
|
||||||
use reth_db_api::{
|
use reth_db_api::{
|
||||||
cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx,
|
cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx,
|
||||||
@ -16,17 +17,19 @@ use reth_provider::{
|
|||||||
use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput};
|
use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
pub(crate) async fn dump_execution_stage<N, E>(
|
pub(crate) async fn dump_execution_stage<N, E, C>(
|
||||||
db_tool: &DbTool<N>,
|
db_tool: &DbTool<N>,
|
||||||
from: u64,
|
from: u64,
|
||||||
to: u64,
|
to: u64,
|
||||||
output_datadir: ChainPath<DataDirPath>,
|
output_datadir: ChainPath<DataDirPath>,
|
||||||
should_run: bool,
|
should_run: bool,
|
||||||
executor: E,
|
executor: E,
|
||||||
|
consensus: C,
|
||||||
) -> eyre::Result<()>
|
) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>,
|
N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
||||||
|
C: FullConsensus<E::Primitives, Error = ConsensusError> + 'static,
|
||||||
{
|
{
|
||||||
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
||||||
|
|
||||||
@ -44,6 +47,7 @@ where
|
|||||||
to,
|
to,
|
||||||
from,
|
from,
|
||||||
executor,
|
executor,
|
||||||
|
consensus,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,8 +143,10 @@ fn unwind_and_copy<N: ProviderNodeTypes>(
|
|||||||
) -> eyre::Result<()> {
|
) -> eyre::Result<()> {
|
||||||
let provider = db_tool.provider_factory.database_provider_rw()?;
|
let provider = db_tool.provider_factory.database_provider_rw()?;
|
||||||
|
|
||||||
let mut exec_stage =
|
let mut exec_stage = ExecutionStage::new_with_executor(
|
||||||
ExecutionStage::new_with_executor(NoopBlockExecutorProvider::<N::Primitives>::default());
|
NoopBlockExecutorProvider::<N::Primitives>::default(),
|
||||||
|
NoopConsensus::arc(),
|
||||||
|
);
|
||||||
|
|
||||||
exec_stage.unwind(
|
exec_stage.unwind(
|
||||||
&provider,
|
&provider,
|
||||||
@ -162,19 +168,21 @@ fn unwind_and_copy<N: ProviderNodeTypes>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Try to re-execute the stage without committing
|
/// Try to re-execute the stage without committing
|
||||||
fn dry_run<N, E>(
|
fn dry_run<N, E, C>(
|
||||||
output_provider_factory: ProviderFactory<N>,
|
output_provider_factory: ProviderFactory<N>,
|
||||||
to: u64,
|
to: u64,
|
||||||
from: u64,
|
from: u64,
|
||||||
executor: E,
|
executor: E,
|
||||||
|
consensus: C,
|
||||||
) -> eyre::Result<()>
|
) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: ProviderNodeTypes,
|
N: ProviderNodeTypes,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
||||||
|
C: FullConsensus<E::Primitives, Error = ConsensusError> + 'static,
|
||||||
{
|
{
|
||||||
info!(target: "reth::cli", "Executing stage. [dry-run]");
|
info!(target: "reth::cli", "Executing stage. [dry-run]");
|
||||||
|
|
||||||
let mut exec_stage = ExecutionStage::new_with_executor(executor);
|
let mut exec_stage = ExecutionStage::new_with_executor(executor, Arc::new(consensus));
|
||||||
|
|
||||||
let input =
|
let input =
|
||||||
reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) };
|
reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) };
|
||||||
|
|||||||
@ -4,6 +4,7 @@ use super::setup;
|
|||||||
use alloy_primitives::BlockNumber;
|
use alloy_primitives::BlockNumber;
|
||||||
use eyre::Result;
|
use eyre::Result;
|
||||||
use reth_config::config::EtlConfig;
|
use reth_config::config::EtlConfig;
|
||||||
|
use reth_consensus::noop::NoopConsensus;
|
||||||
use reth_db::{tables, DatabaseEnv};
|
use reth_db::{tables, DatabaseEnv};
|
||||||
use reth_db_api::{database::Database, table::TableImporter};
|
use reth_db_api::{database::Database, table::TableImporter};
|
||||||
use reth_db_common::DbTool;
|
use reth_db_common::DbTool;
|
||||||
@ -96,6 +97,7 @@ fn unwind_and_copy<N: ProviderNodeTypes>(
|
|||||||
// Bring Plainstate to TO (hashing stage execution requires it)
|
// Bring Plainstate to TO (hashing stage execution requires it)
|
||||||
let mut exec_stage = ExecutionStage::new(
|
let mut exec_stage = ExecutionStage::new(
|
||||||
NoopBlockExecutorProvider::<N::Primitives>::default(), // Not necessary for unwinding.
|
NoopBlockExecutorProvider::<N::Primitives>::default(), // Not necessary for unwinding.
|
||||||
|
NoopConsensus::arc(),
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: Some(u64::MAX),
|
max_blocks: Some(u64::MAX),
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
//! Database debugging tool
|
//! Database debugging tool
|
||||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
@ -9,7 +9,6 @@ use reth_db_api::{
|
|||||||
transaction::DbTx,
|
transaction::DbTx,
|
||||||
};
|
};
|
||||||
use reth_db_common::DbTool;
|
use reth_db_common::DbTool;
|
||||||
use reth_evm::execute::BlockExecutorProvider;
|
|
||||||
use reth_node_builder::NodeTypesWithDB;
|
use reth_node_builder::NodeTypesWithDB;
|
||||||
use reth_node_core::{
|
use reth_node_core::{
|
||||||
args::DatadirArgs,
|
args::DatadirArgs,
|
||||||
@ -80,29 +79,31 @@ macro_rules! handle_stage {
|
|||||||
$stage_fn($tool, *from, *to, output_datadir, *dry_run).await?
|
$stage_fn($tool, *from, *to, output_datadir, *dry_run).await?
|
||||||
}};
|
}};
|
||||||
|
|
||||||
($stage_fn:ident, $tool:expr, $command:expr, $executor:expr) => {{
|
($stage_fn:ident, $tool:expr, $command:expr, $executor:expr, $consensus:expr) => {{
|
||||||
let StageCommand { output_datadir, from, to, dry_run, .. } = $command;
|
let StageCommand { output_datadir, from, to, dry_run, .. } = $command;
|
||||||
let output_datadir =
|
let output_datadir =
|
||||||
output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default());
|
output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default());
|
||||||
$stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor).await?
|
$stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor, $consensus).await?
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
|
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
|
||||||
/// Execute `dump-stage` command
|
/// Execute `dump-stage` command
|
||||||
pub async fn execute<N, E, F>(self, executor: F) -> eyre::Result<()>
|
pub async fn execute<N, Comp, F>(self, components: F) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
Comp: CliNodeComponents<N>,
|
||||||
F: FnOnce(Arc<C::ChainSpec>) -> E,
|
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
|
||||||
{
|
{
|
||||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
|
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
|
||||||
let tool = DbTool::new(provider_factory)?;
|
let tool = DbTool::new(provider_factory)?;
|
||||||
|
|
||||||
match &self.command {
|
match &self.command {
|
||||||
Stages::Execution(cmd) => {
|
Stages::Execution(cmd) => {
|
||||||
let executor = executor(tool.chain());
|
let components = components(tool.chain());
|
||||||
handle_stage!(dump_execution_stage, &tool, cmd, executor)
|
let executor = components.executor().clone();
|
||||||
|
let consensus = components.consensus().clone();
|
||||||
|
handle_stage!(dump_execution_stage, &tool, cmd, executor, consensus)
|
||||||
}
|
}
|
||||||
Stages::StorageHashing(cmd) => handle_stage!(dump_hashing_storage_stage, &tool, cmd),
|
Stages::StorageHashing(cmd) => handle_stage!(dump_hashing_storage_stage, &tool, cmd),
|
||||||
Stages::AccountHashing(cmd) => handle_stage!(dump_hashing_account_stage, &tool, cmd),
|
Stages::AccountHashing(cmd) => handle_stage!(dump_hashing_account_stage, &tool, cmd),
|
||||||
|
|||||||
@ -2,13 +2,12 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::common::CliNodeTypes;
|
use crate::common::{CliNodeComponents, CliNodeTypes};
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
use reth_cli_runner::CliContext;
|
use reth_cli_runner::CliContext;
|
||||||
use reth_eth_wire::NetPrimitivesFor;
|
use reth_eth_wire::NetPrimitivesFor;
|
||||||
use reth_evm::execute::BlockExecutorProvider;
|
|
||||||
|
|
||||||
pub mod drop;
|
pub mod drop;
|
||||||
pub mod dump;
|
pub mod dump;
|
||||||
@ -42,17 +41,17 @@ pub enum Subcommands<C: ChainSpecParser> {
|
|||||||
|
|
||||||
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
|
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
|
||||||
/// Execute `stage` command
|
/// Execute `stage` command
|
||||||
pub async fn execute<N, E, F, P>(self, ctx: CliContext, executor: F) -> eyre::Result<()>
|
pub async fn execute<N, Comp, F, P>(self, ctx: CliContext, components: F) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
Comp: CliNodeComponents<N>,
|
||||||
F: FnOnce(Arc<C::ChainSpec>) -> E,
|
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
|
||||||
P: NetPrimitivesFor<N::Primitives>,
|
P: NetPrimitivesFor<N::Primitives>,
|
||||||
{
|
{
|
||||||
match self.command {
|
match self.command {
|
||||||
Subcommands::Run(command) => command.execute::<N, _, _, P>(ctx, executor).await,
|
Subcommands::Run(command) => command.execute::<N, _, _, P>(ctx, components).await,
|
||||||
Subcommands::Drop(command) => command.execute::<N>().await,
|
Subcommands::Drop(command) => command.execute::<N>().await,
|
||||||
Subcommands::Dump(command) => command.execute::<N, _, _>(executor).await,
|
Subcommands::Dump(command) => command.execute::<N, _, _>(components).await,
|
||||||
Subcommands::Unwind(command) => command.execute::<N>().await,
|
Subcommands::Unwind(command) => command.execute::<N>().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Stage debugging tool
|
//! Stage debugging tool
|
||||||
|
|
||||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
|
||||||
use alloy_eips::BlockHashOrNumber;
|
use alloy_eips::BlockHashOrNumber;
|
||||||
use alloy_primitives::Sealable;
|
use alloy_primitives::Sealable;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
@ -17,8 +17,6 @@ use reth_downloaders::{
|
|||||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||||
};
|
};
|
||||||
use reth_eth_wire::NetPrimitivesFor;
|
use reth_eth_wire::NetPrimitivesFor;
|
||||||
use reth_ethereum_consensus::EthBeaconConsensus;
|
|
||||||
use reth_evm::execute::BlockExecutorProvider;
|
|
||||||
use reth_exex::ExExManagerHandle;
|
use reth_exex::ExExManagerHandle;
|
||||||
use reth_network::BlockDownloaderProvider;
|
use reth_network::BlockDownloaderProvider;
|
||||||
use reth_network_p2p::HeadersClient;
|
use reth_network_p2p::HeadersClient;
|
||||||
@ -105,11 +103,11 @@ pub struct Command<C: ChainSpecParser> {
|
|||||||
|
|
||||||
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
|
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
|
||||||
/// Execute `stage` command
|
/// Execute `stage` command
|
||||||
pub async fn execute<N, E, F, P>(self, ctx: CliContext, executor: F) -> eyre::Result<()>
|
pub async fn execute<N, Comp, F, P>(self, ctx: CliContext, components: F) -> eyre::Result<()>
|
||||||
where
|
where
|
||||||
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
|
||||||
E: BlockExecutorProvider<Primitives = N::Primitives>,
|
Comp: CliNodeComponents<N>,
|
||||||
F: FnOnce(Arc<C::ChainSpec>) -> E,
|
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
|
||||||
P: NetPrimitivesFor<N::Primitives>,
|
P: NetPrimitivesFor<N::Primitives>,
|
||||||
{
|
{
|
||||||
// Raise the fd limit of the process.
|
// Raise the fd limit of the process.
|
||||||
@ -120,6 +118,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
|||||||
self.env.init::<N>(AccessRights::RW)?;
|
self.env.init::<N>(AccessRights::RW)?;
|
||||||
|
|
||||||
let mut provider_rw = provider_factory.database_provider_rw()?;
|
let mut provider_rw = provider_factory.database_provider_rw()?;
|
||||||
|
let components = components(provider_factory.chain_spec());
|
||||||
|
|
||||||
if let Some(listen_addr) = self.metrics {
|
if let Some(listen_addr) = self.metrics {
|
||||||
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
|
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
|
||||||
@ -162,8 +161,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
|||||||
let (mut exec_stage, mut unwind_stage): (Box<dyn Stage<_>>, Option<Box<dyn Stage<_>>>) =
|
let (mut exec_stage, mut unwind_stage): (Box<dyn Stage<_>>, Option<Box<dyn Stage<_>>>) =
|
||||||
match self.stage {
|
match self.stage {
|
||||||
StageEnum::Headers => {
|
StageEnum::Headers => {
|
||||||
let consensus =
|
let consensus = Arc::new(components.consensus().clone());
|
||||||
Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec()));
|
|
||||||
|
|
||||||
let network_secret_path = self
|
let network_secret_path = self
|
||||||
.network
|
.network
|
||||||
@ -215,8 +213,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
StageEnum::Bodies => {
|
StageEnum::Bodies => {
|
||||||
let consensus =
|
let consensus = Arc::new(components.consensus().clone());
|
||||||
Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec()));
|
|
||||||
|
|
||||||
let mut config = config;
|
let mut config = config;
|
||||||
config.peers.trusted_nodes_only = self.network.trusted_only;
|
config.peers.trusted_nodes_only = self.network.trusted_only;
|
||||||
@ -267,7 +264,8 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
|||||||
),
|
),
|
||||||
StageEnum::Execution => (
|
StageEnum::Execution => (
|
||||||
Box::new(ExecutionStage::new(
|
Box::new(ExecutionStage::new(
|
||||||
executor(provider_factory.chain_spec()),
|
components.executor().clone(),
|
||||||
|
Arc::new(components.consensus().clone()),
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: Some(batch_size),
|
max_blocks: Some(batch_size),
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
|
|||||||
@ -115,7 +115,12 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
|||||||
|
|
||||||
let builder = if self.offline {
|
let builder = if self.offline {
|
||||||
Pipeline::<N>::builder().add_stages(
|
Pipeline::<N>::builder().add_stages(
|
||||||
OfflineStages::new(executor, config.stages, prune_modes.clone())
|
OfflineStages::new(
|
||||||
|
executor,
|
||||||
|
NoopConsensus::arc(),
|
||||||
|
config.stages,
|
||||||
|
prune_modes.clone(),
|
||||||
|
)
|
||||||
.builder()
|
.builder()
|
||||||
.disable(reth_stages::StageId::SenderRecovery),
|
.disable(reth_stages::StageId::SenderRecovery),
|
||||||
)
|
)
|
||||||
@ -133,6 +138,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
|||||||
)
|
)
|
||||||
.set(ExecutionStage::new(
|
.set(ExecutionStage::new(
|
||||||
executor,
|
executor,
|
||||||
|
Arc::new(NoopConsensus::default()),
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: None,
|
max_blocks: None,
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
|
|||||||
@ -21,7 +21,7 @@ use reth_consensus_common::validation::{
|
|||||||
validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header,
|
validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header,
|
||||||
validate_header_base_fee, validate_header_extra_data, validate_header_gas,
|
validate_header_base_fee, validate_header_extra_data, validate_header_gas,
|
||||||
};
|
};
|
||||||
use reth_primitives::{NodePrimitives, Receipt, RecoveredBlock, SealedBlock, SealedHeader};
|
use reth_primitives::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader};
|
||||||
use reth_primitives_traits::{
|
use reth_primitives_traits::{
|
||||||
constants::{GAS_LIMIT_BOUND_DIVISOR, MINIMUM_GAS_LIMIT},
|
constants::{GAS_LIMIT_BOUND_DIVISOR, MINIMUM_GAS_LIMIT},
|
||||||
Block, BlockHeader,
|
Block, BlockHeader,
|
||||||
@ -99,7 +99,7 @@ impl<ChainSpec: EthChainSpec + EthereumHardforks> EthBeaconConsensus<ChainSpec>
|
|||||||
impl<ChainSpec, N> FullConsensus<N> for EthBeaconConsensus<ChainSpec>
|
impl<ChainSpec, N> FullConsensus<N> for EthBeaconConsensus<ChainSpec>
|
||||||
where
|
where
|
||||||
ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug,
|
ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug,
|
||||||
N: NodePrimitives<Receipt = Receipt>,
|
N: NodePrimitives,
|
||||||
{
|
{
|
||||||
fn validate_block_post_execution(
|
fn validate_block_post_execution(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@ -3,26 +3,27 @@ use alloy_eips::eip7685::Requests;
|
|||||||
use alloy_primitives::{Bloom, B256};
|
use alloy_primitives::{Bloom, B256};
|
||||||
use reth_chainspec::EthereumHardforks;
|
use reth_chainspec::EthereumHardforks;
|
||||||
use reth_consensus::ConsensusError;
|
use reth_consensus::ConsensusError;
|
||||||
use reth_primitives::{gas_spent_by_transactions, GotExpected, Receipt, RecoveredBlock};
|
use reth_primitives::{gas_spent_by_transactions, GotExpected, RecoveredBlock};
|
||||||
use reth_primitives_traits::Block;
|
use reth_primitives_traits::{Block, Receipt};
|
||||||
|
|
||||||
/// Validate a block with regard to execution results:
|
/// Validate a block with regard to execution results:
|
||||||
///
|
///
|
||||||
/// - Compares the receipts root in the block header to the block body
|
/// - Compares the receipts root in the block header to the block body
|
||||||
/// - Compares the gas used in the block header to the actual gas usage after execution
|
/// - Compares the gas used in the block header to the actual gas usage after execution
|
||||||
pub fn validate_block_post_execution<B, ChainSpec>(
|
pub fn validate_block_post_execution<B, R, ChainSpec>(
|
||||||
block: &RecoveredBlock<B>,
|
block: &RecoveredBlock<B>,
|
||||||
chain_spec: &ChainSpec,
|
chain_spec: &ChainSpec,
|
||||||
receipts: &[Receipt],
|
receipts: &[R],
|
||||||
requests: &Requests,
|
requests: &Requests,
|
||||||
) -> Result<(), ConsensusError>
|
) -> Result<(), ConsensusError>
|
||||||
where
|
where
|
||||||
B: Block,
|
B: Block,
|
||||||
|
R: Receipt,
|
||||||
ChainSpec: EthereumHardforks,
|
ChainSpec: EthereumHardforks,
|
||||||
{
|
{
|
||||||
// Check if gas used matches the value set in header.
|
// Check if gas used matches the value set in header.
|
||||||
let cumulative_gas_used =
|
let cumulative_gas_used =
|
||||||
receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0);
|
receipts.last().map(|receipt| receipt.cumulative_gas_used()).unwrap_or(0);
|
||||||
if block.header().gas_used() != cumulative_gas_used {
|
if block.header().gas_used() != cumulative_gas_used {
|
||||||
return Err(ConsensusError::BlockGasUsed {
|
return Err(ConsensusError::BlockGasUsed {
|
||||||
gas: GotExpected { got: cumulative_gas_used, expected: block.header().gas_used() },
|
gas: GotExpected { got: cumulative_gas_used, expected: block.header().gas_used() },
|
||||||
@ -61,13 +62,13 @@ where
|
|||||||
|
|
||||||
/// Calculate the receipts root, and compare it against against the expected receipts root and logs
|
/// Calculate the receipts root, and compare it against against the expected receipts root and logs
|
||||||
/// bloom.
|
/// bloom.
|
||||||
fn verify_receipts(
|
fn verify_receipts<R: Receipt>(
|
||||||
expected_receipts_root: B256,
|
expected_receipts_root: B256,
|
||||||
expected_logs_bloom: Bloom,
|
expected_logs_bloom: Bloom,
|
||||||
receipts: &[Receipt],
|
receipts: &[R],
|
||||||
) -> Result<(), ConsensusError> {
|
) -> Result<(), ConsensusError> {
|
||||||
// Calculate receipts root.
|
// Calculate receipts root.
|
||||||
let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::<Vec<_>>();
|
let receipts_with_bloom = receipts.iter().map(TxReceipt::with_bloom_ref).collect::<Vec<_>>();
|
||||||
let receipts_root = calculate_receipt_root(&receipts_with_bloom);
|
let receipts_root = calculate_receipt_root(&receipts_with_bloom);
|
||||||
|
|
||||||
// Calculate header logs bloom.
|
// Calculate header logs bloom.
|
||||||
@ -109,6 +110,7 @@ fn compare_receipts_root_and_logs_bloom(
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use alloy_primitives::hex;
|
use alloy_primitives::hex;
|
||||||
|
use reth_primitives::Receipt;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
|||||||
@ -291,10 +291,8 @@ mod tests {
|
|||||||
};
|
};
|
||||||
use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256, U256};
|
use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256, U256};
|
||||||
use reth_chainspec::{ChainSpecBuilder, ForkCondition};
|
use reth_chainspec::{ChainSpecBuilder, ForkCondition};
|
||||||
use reth_evm::execute::{
|
use reth_evm::execute::{BasicBlockExecutorProvider, BlockExecutorProvider, Executor};
|
||||||
BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor,
|
use reth_execution_types::BlockExecutionResult;
|
||||||
};
|
|
||||||
use reth_execution_types::BlockExecutionOutput;
|
|
||||||
use reth_primitives::{Account, Block, BlockBody, Transaction};
|
use reth_primitives::{Account, Block, BlockBody, Transaction};
|
||||||
use reth_primitives_traits::{crypto::secp256k1::public_key_to_address, Block as _};
|
use reth_primitives_traits::{crypto::secp256k1::public_key_to_address, Block as _};
|
||||||
use reth_revm::{
|
use reth_revm::{
|
||||||
@ -368,11 +366,11 @@ mod tests {
|
|||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
|
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute a block without parent beacon block root, expect err
|
// attempt to execute a block without parent beacon block root, expect err
|
||||||
let err = executor
|
let err = executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header: header.clone(),
|
header: header.clone(),
|
||||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||||
@ -393,7 +391,7 @@ mod tests {
|
|||||||
|
|
||||||
// Now execute a block with the fixed header, ensure that it does not fail
|
// Now execute a block with the fixed header, ensure that it does not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header: header.clone(),
|
header: header.clone(),
|
||||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||||
@ -452,8 +450,8 @@ mod tests {
|
|||||||
|
|
||||||
// attempt to execute an empty block with parent beacon block root, this should not fail
|
// attempt to execute an empty block with parent beacon block root, this should not fail
|
||||||
provider
|
provider
|
||||||
.batch_executor(StateProviderDatabase::new(&db))
|
.executor(StateProviderDatabase::new(&db))
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header,
|
header,
|
||||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||||
@ -493,11 +491,11 @@ mod tests {
|
|||||||
..Header::default()
|
..Header::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute an empty block with parent beacon block root, this should not fail
|
// attempt to execute an empty block with parent beacon block root, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header,
|
header,
|
||||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||||
@ -528,12 +526,12 @@ mod tests {
|
|||||||
|
|
||||||
let mut header = chain_spec.genesis_header().clone();
|
let mut header = chain_spec.genesis_header().clone();
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute the genesis block with non-zero parent beacon block root, expect err
|
// attempt to execute the genesis block with non-zero parent beacon block root, expect err
|
||||||
header.parent_beacon_block_root = Some(B256::with_last_byte(0x69));
|
header.parent_beacon_block_root = Some(B256::with_last_byte(0x69));
|
||||||
let _err = executor
|
let _err = executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header: header.clone(), body: Default::default() },
|
Block { header: header.clone(), body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -548,7 +546,7 @@ mod tests {
|
|||||||
// now try to process the genesis block again, this time ensuring that a system contract
|
// now try to process the genesis block again, this time ensuring that a system contract
|
||||||
// call does not occur
|
// call does not occur
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -592,11 +590,11 @@ mod tests {
|
|||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
|
|
||||||
// execute header
|
// execute header
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// Now execute a block with the fixed header, ensure that it does not fail
|
// Now execute a block with the fixed header, ensure that it does not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header: header.clone(), body: Default::default() },
|
Block { header: header.clone(), body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -659,14 +657,14 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// construct the header for block one
|
// construct the header for block one
|
||||||
let header = Header { timestamp: 1, number: 1, ..Header::default() };
|
let header = Header { timestamp: 1, number: 1, ..Header::default() };
|
||||||
|
|
||||||
// attempt to execute an empty block, this should not fail
|
// attempt to execute an empty block, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -700,11 +698,11 @@ mod tests {
|
|||||||
|
|
||||||
let header = chain_spec.genesis_header().clone();
|
let header = chain_spec.genesis_header().clone();
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute genesis block, this should not fail
|
// attempt to execute genesis block, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -747,11 +745,11 @@ mod tests {
|
|||||||
..Header::default()
|
..Header::default()
|
||||||
};
|
};
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute the fork activation block, this should not fail
|
// attempt to execute the fork activation block, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -791,7 +789,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
let header = Header {
|
let header = Header {
|
||||||
parent_hash: B256::random(),
|
parent_hash: B256::random(),
|
||||||
@ -805,7 +803,7 @@ mod tests {
|
|||||||
|
|
||||||
// attempt to execute the fork activation block, this should not fail
|
// attempt to execute the fork activation block, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -834,11 +832,11 @@ mod tests {
|
|||||||
let header_hash = header.hash_slow();
|
let header_hash = header.hash_slow();
|
||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// attempt to execute the genesis block, this should not fail
|
// attempt to execute the genesis block, this should not fail
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -869,7 +867,7 @@ mod tests {
|
|||||||
let header_hash = header.hash_slow();
|
let header_hash = header.hash_slow();
|
||||||
|
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -903,7 +901,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
executor
|
executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute_one(&RecoveredBlock::new_unhashed(
|
||||||
Block { header, body: Default::default() },
|
Block { header, body: Default::default() },
|
||||||
vec![],
|
vec![],
|
||||||
))
|
))
|
||||||
@ -984,10 +982,10 @@ mod tests {
|
|||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
|
|
||||||
let executor = provider.executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
let BlockExecutionOutput { receipts, requests, .. } = executor
|
let BlockExecutionResult { receipts, requests, .. } = executor
|
||||||
.execute(
|
.execute_one(
|
||||||
&Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } }
|
&Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } }
|
||||||
.try_into_recovered()
|
.try_into_recovered()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
@ -1060,10 +1058,10 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Create an executor from the state provider
|
// Create an executor from the state provider
|
||||||
let executor = executor_provider(chain_spec).executor(StateProviderDatabase::new(&db));
|
let mut executor = executor_provider(chain_spec).executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// Execute the block and capture the result
|
// Execute the block and capture the result
|
||||||
let exec_result = executor.execute(
|
let exec_result = executor.execute_one(
|
||||||
&Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } }
|
&Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } }
|
||||||
.try_into_recovered()
|
.try_into_recovered()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
use crate::BlockExecutionOutput;
|
use crate::{BlockExecutionOutput, BlockExecutionResult};
|
||||||
use alloc::{vec, vec::Vec};
|
use alloc::{vec, vec::Vec};
|
||||||
use alloy_eips::eip7685::Requests;
|
use alloy_eips::eip7685::Requests;
|
||||||
use alloy_primitives::{logs_bloom, map::HashMap, Address, BlockNumber, Bloom, Log, B256, U256};
|
use alloy_primitives::{logs_bloom, map::HashMap, Address, BlockNumber, Bloom, Log, B256, U256};
|
||||||
@ -128,6 +128,30 @@ impl<T> ExecutionOutcome<T> {
|
|||||||
Self { bundle, receipts, first_block, requests }
|
Self { bundle, receipts, first_block, requests }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a new `ExecutionOutcome` from a single block execution result.
|
||||||
|
pub fn single(block_number: u64, result: BlockExecutionOutput<T>) -> Self {
|
||||||
|
Self {
|
||||||
|
bundle: result.state,
|
||||||
|
receipts: vec![result.receipts],
|
||||||
|
first_block: block_number,
|
||||||
|
requests: vec![result.requests],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new `ExecutionOutcome` from multiple [`BlockExecutionResult`]s.
|
||||||
|
pub fn from_blocks(
|
||||||
|
first_block: u64,
|
||||||
|
bundle: BundleState,
|
||||||
|
results: Vec<BlockExecutionResult<T>>,
|
||||||
|
) -> Self {
|
||||||
|
let mut value = Self { bundle, first_block, receipts: Vec::new(), requests: Vec::new() };
|
||||||
|
for result in results {
|
||||||
|
value.receipts.push(result.receipts);
|
||||||
|
value.requests.push(result.requests);
|
||||||
|
}
|
||||||
|
value
|
||||||
|
}
|
||||||
|
|
||||||
/// Return revm bundle state.
|
/// Return revm bundle state.
|
||||||
pub const fn state(&self) -> &BundleState {
|
pub const fn state(&self) -> &BundleState {
|
||||||
&self.bundle
|
&self.bundle
|
||||||
|
|||||||
@ -65,6 +65,30 @@ pub trait Executor<DB: Database>: Sized {
|
|||||||
Ok(BlockExecutionOutput { state: state.take_bundle(), receipts, requests, gas_used })
|
Ok(BlockExecutionOutput { state: state.take_bundle(), receipts, requests, gas_used })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Executes multiple inputs in the batch, and returns an aggregated [`ExecutionOutcome`].
|
||||||
|
fn execute_batch<'a, I>(
|
||||||
|
mut self,
|
||||||
|
blocks: I,
|
||||||
|
) -> Result<ExecutionOutcome<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = &'a RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>>,
|
||||||
|
{
|
||||||
|
let mut results = Vec::new();
|
||||||
|
let mut first_block = None;
|
||||||
|
for block in blocks {
|
||||||
|
if first_block.is_none() {
|
||||||
|
first_block = Some(block.header().number());
|
||||||
|
}
|
||||||
|
results.push(self.execute_one(block)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ExecutionOutcome::from_blocks(
|
||||||
|
first_block.unwrap_or_default(),
|
||||||
|
self.into_state().take_bundle(),
|
||||||
|
results,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// Executes the EVM with the given input and accepts a state closure that is invoked with
|
/// Executes the EVM with the given input and accepts a state closure that is invoked with
|
||||||
/// the EVM state after execution.
|
/// the EVM state after execution.
|
||||||
fn execute_with_state_closure<F>(
|
fn execute_with_state_closure<F>(
|
||||||
@ -377,13 +401,10 @@ where
|
|||||||
F: OnStateHook + 'static,
|
F: OnStateHook + 'static,
|
||||||
{
|
{
|
||||||
self.strategy.with_state_hook(Some(Box::new(state_hook)));
|
self.strategy.with_state_hook(Some(Box::new(state_hook)));
|
||||||
|
let result = self.execute_one(block);
|
||||||
|
self.strategy.with_state_hook(None);
|
||||||
|
|
||||||
self.strategy.apply_pre_execution_changes(block)?;
|
result
|
||||||
let ExecuteOutput { receipts, gas_used } = self.strategy.execute_transactions(block)?;
|
|
||||||
let requests = self.strategy.apply_post_execution_changes(block, &receipts)?;
|
|
||||||
self.strategy.state_mut().merge_transitions(BundleRetention::Reverts);
|
|
||||||
|
|
||||||
Ok(BlockExecutionResult { receipts, requests, gas_used })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_state(self) -> State<DB> {
|
fn into_state(self) -> State<DB> {
|
||||||
|
|||||||
@ -7,13 +7,14 @@ use std::{
|
|||||||
use alloy_consensus::BlockHeader;
|
use alloy_consensus::BlockHeader;
|
||||||
use alloy_primitives::BlockNumber;
|
use alloy_primitives::BlockNumber;
|
||||||
use reth_evm::execute::{
|
use reth_evm::execute::{
|
||||||
BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor,
|
BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor,
|
||||||
};
|
};
|
||||||
use reth_node_api::{Block as _, BlockBody as _, NodePrimitives};
|
use reth_node_api::{Block as _, BlockBody as _, NodePrimitives};
|
||||||
use reth_primitives::{Receipt, RecoveredBlock};
|
use reth_primitives::{Receipt, RecoveredBlock};
|
||||||
use reth_primitives_traits::{format_gas_throughput, SignedTransaction};
|
use reth_primitives_traits::{format_gas_throughput, SignedTransaction};
|
||||||
use reth_provider::{
|
use reth_provider::{
|
||||||
BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant,
|
BlockReader, Chain, ExecutionOutcome, HeaderProvider, ProviderError, StateProviderFactory,
|
||||||
|
TransactionVariant,
|
||||||
};
|
};
|
||||||
use reth_prune_types::PruneModes;
|
use reth_prune_types::PruneModes;
|
||||||
use reth_revm::database::StateProviderDatabase;
|
use reth_revm::database::StateProviderDatabase;
|
||||||
@ -75,7 +76,7 @@ where
|
|||||||
"Executing block range"
|
"Executing block range"
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut executor = self.executor.batch_executor(StateProviderDatabase::new(
|
let mut executor = self.executor.executor(StateProviderDatabase::new(
|
||||||
self.provider.history_by_block_number(self.range.start().saturating_sub(1))?,
|
self.provider.history_by_block_number(self.range.start().saturating_sub(1))?,
|
||||||
));
|
));
|
||||||
|
|
||||||
@ -85,6 +86,7 @@ where
|
|||||||
let batch_start = Instant::now();
|
let batch_start = Instant::now();
|
||||||
|
|
||||||
let mut blocks = Vec::new();
|
let mut blocks = Vec::new();
|
||||||
|
let mut results = Vec::new();
|
||||||
for block_number in self.range.clone() {
|
for block_number in self.range.clone() {
|
||||||
// Fetch the block
|
// Fetch the block
|
||||||
let fetch_block_start = Instant::now();
|
let fetch_block_start = Instant::now();
|
||||||
@ -110,19 +112,17 @@ where
|
|||||||
let (header, body) = block.split_sealed_header_body();
|
let (header, body) = block.split_sealed_header_body();
|
||||||
let block = P::Block::new_sealed(header, body).with_senders(senders);
|
let block = P::Block::new_sealed(header, body).with_senders(senders);
|
||||||
|
|
||||||
executor.execute_and_verify_one(&block)?;
|
results.push(executor.execute_one(&block)?);
|
||||||
execution_duration += execute_start.elapsed();
|
execution_duration += execute_start.elapsed();
|
||||||
|
|
||||||
// TODO(alexey): report gas metrics using `block.header.gas_used`
|
// TODO(alexey): report gas metrics using `block.header.gas_used`
|
||||||
|
|
||||||
// Seal the block back and save it
|
// Seal the block back and save it
|
||||||
blocks.push(block);
|
blocks.push(block);
|
||||||
|
|
||||||
// Check if we should commit now
|
// Check if we should commit now
|
||||||
let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64;
|
|
||||||
if self.thresholds.is_end_of_batch(
|
if self.thresholds.is_end_of_batch(
|
||||||
block_number - *self.range.start(),
|
block_number - *self.range.start(),
|
||||||
bundle_size_hint,
|
executor.size_hint() as u64,
|
||||||
cumulative_gas,
|
cumulative_gas,
|
||||||
batch_start.elapsed(),
|
batch_start.elapsed(),
|
||||||
) {
|
) {
|
||||||
@ -130,6 +130,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let first_block_number = blocks.first().expect("blocks should not be empty").number();
|
||||||
let last_block_number = blocks.last().expect("blocks should not be empty").number();
|
let last_block_number = blocks.last().expect("blocks should not be empty").number();
|
||||||
debug!(
|
debug!(
|
||||||
target: "exex::backfill",
|
target: "exex::backfill",
|
||||||
@ -141,7 +142,12 @@ where
|
|||||||
);
|
);
|
||||||
self.range = last_block_number + 1..=*self.range.end();
|
self.range = last_block_number + 1..=*self.range.end();
|
||||||
|
|
||||||
let chain = Chain::new(blocks, executor.finalize(), None);
|
let outcome = ExecutionOutcome::from_blocks(
|
||||||
|
first_block_number,
|
||||||
|
executor.into_state().take_bundle(),
|
||||||
|
results,
|
||||||
|
);
|
||||||
|
let chain = Chain::new(blocks, outcome, None);
|
||||||
Ok(chain)
|
Ok(chain)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,7 @@ use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930};
|
|||||||
use alloy_genesis::{Genesis, GenesisAccount};
|
use alloy_genesis::{Genesis, GenesisAccount};
|
||||||
use alloy_primitives::{b256, Address, TxKind, U256};
|
use alloy_primitives::{b256, Address, TxKind, U256};
|
||||||
use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS};
|
use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS};
|
||||||
use reth_evm::execute::{BatchExecutor, BlockExecutionOutput, BlockExecutorProvider, Executor};
|
use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor};
|
||||||
use reth_evm_ethereum::execute::EthExecutorProvider;
|
use reth_evm_ethereum::execute::EthExecutorProvider;
|
||||||
use reth_node_api::FullNodePrimitives;
|
use reth_node_api::FullNodePrimitives;
|
||||||
use reth_primitives::{Block, BlockBody, Receipt, RecoveredBlock, Transaction};
|
use reth_primitives::{Block, BlockBody, Receipt, RecoveredBlock, Transaction};
|
||||||
@ -195,9 +195,9 @@ where
|
|||||||
let provider = provider_factory.provider()?;
|
let provider = provider_factory.provider()?;
|
||||||
|
|
||||||
let executor = EthExecutorProvider::ethereum(chain_spec)
|
let executor = EthExecutorProvider::ethereum(chain_spec)
|
||||||
.batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider)));
|
.executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider)));
|
||||||
|
|
||||||
let mut execution_outcome = executor.execute_and_verify_batch(vec![&block1, &block2])?;
|
let mut execution_outcome = executor.execute_batch(vec![&block1, &block2])?;
|
||||||
execution_outcome.state_mut().reverts.sort();
|
execution_outcome.state_mut().reverts.sort();
|
||||||
|
|
||||||
// Commit the block's execution outcome to the database
|
// Commit the block's execution outcome to the database
|
||||||
|
|||||||
@ -5,7 +5,7 @@ use std::sync::Arc;
|
|||||||
use crate::BlockTy;
|
use crate::BlockTy;
|
||||||
use alloy_primitives::{BlockNumber, B256};
|
use alloy_primitives::{BlockNumber, B256};
|
||||||
use reth_config::{config::StageConfig, PruneConfig};
|
use reth_config::{config::StageConfig, PruneConfig};
|
||||||
use reth_consensus::{Consensus, ConsensusError};
|
use reth_consensus::{ConsensusError, FullConsensus};
|
||||||
use reth_downloaders::{
|
use reth_downloaders::{
|
||||||
bodies::bodies::BodiesDownloaderBuilder,
|
bodies::bodies::BodiesDownloaderBuilder,
|
||||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||||
@ -28,7 +28,7 @@ use tokio::sync::watch;
|
|||||||
pub fn build_networked_pipeline<N, Client, Executor>(
|
pub fn build_networked_pipeline<N, Client, Executor>(
|
||||||
config: &StageConfig,
|
config: &StageConfig,
|
||||||
client: Client,
|
client: Client,
|
||||||
consensus: Arc<dyn Consensus<BlockTy<N>, Error = ConsensusError>>,
|
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
|
||||||
provider_factory: ProviderFactory<N>,
|
provider_factory: ProviderFactory<N>,
|
||||||
task_executor: &TaskExecutor,
|
task_executor: &TaskExecutor,
|
||||||
metrics_tx: reth_stages::MetricEventsSender,
|
metrics_tx: reth_stages::MetricEventsSender,
|
||||||
@ -49,7 +49,7 @@ where
|
|||||||
.into_task_with(task_executor);
|
.into_task_with(task_executor);
|
||||||
|
|
||||||
let body_downloader = BodiesDownloaderBuilder::new(config.bodies)
|
let body_downloader = BodiesDownloaderBuilder::new(config.bodies)
|
||||||
.build(client, Arc::clone(&consensus), provider_factory.clone())
|
.build(client, consensus.clone().as_consensus(), provider_factory.clone())
|
||||||
.into_task_with(task_executor);
|
.into_task_with(task_executor);
|
||||||
|
|
||||||
let pipeline = build_pipeline(
|
let pipeline = build_pipeline(
|
||||||
@ -76,7 +76,7 @@ pub fn build_pipeline<N, H, B, Executor>(
|
|||||||
stage_config: &StageConfig,
|
stage_config: &StageConfig,
|
||||||
header_downloader: H,
|
header_downloader: H,
|
||||||
body_downloader: B,
|
body_downloader: B,
|
||||||
consensus: Arc<dyn Consensus<BlockTy<N>, Error = ConsensusError>>,
|
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
|
||||||
max_block: Option<u64>,
|
max_block: Option<u64>,
|
||||||
metrics_tx: reth_stages::MetricEventsSender,
|
metrics_tx: reth_stages::MetricEventsSender,
|
||||||
prune_config: Option<PruneConfig>,
|
prune_config: Option<PruneConfig>,
|
||||||
@ -117,6 +117,7 @@ where
|
|||||||
)
|
)
|
||||||
.set(ExecutionStage::new(
|
.set(ExecutionStage::new(
|
||||||
executor,
|
executor,
|
||||||
|
consensus,
|
||||||
stage_config.execution.into(),
|
stage_config.execution.into(),
|
||||||
stage_config.execution_external_clean_threshold(),
|
stage_config.execution_external_clean_threshold(),
|
||||||
exex_manager_handle,
|
exex_manager_handle,
|
||||||
|
|||||||
@ -34,6 +34,7 @@ reth-node-metrics.workspace = true
|
|||||||
## optimism
|
## optimism
|
||||||
reth-optimism-primitives.workspace = true
|
reth-optimism-primitives.workspace = true
|
||||||
reth-optimism-chainspec.workspace = true
|
reth-optimism-chainspec.workspace = true
|
||||||
|
reth-optimism-consensus.workspace = true
|
||||||
|
|
||||||
reth-chainspec.workspace = true
|
reth-chainspec.workspace = true
|
||||||
reth-node-events.workspace = true
|
reth-node-events.workspace = true
|
||||||
@ -84,6 +85,7 @@ optimism = [
|
|||||||
"reth-db-api/optimism",
|
"reth-db-api/optimism",
|
||||||
"reth-optimism-primitives/optimism",
|
"reth-optimism-primitives/optimism",
|
||||||
"reth-downloaders/optimism",
|
"reth-downloaders/optimism",
|
||||||
|
"reth-optimism-consensus/optimism",
|
||||||
]
|
]
|
||||||
asm-keccak = [
|
asm-keccak = [
|
||||||
"alloy-primitives/asm-keccak",
|
"alloy-primitives/asm-keccak",
|
||||||
|
|||||||
@ -51,6 +51,7 @@ use reth_node_core::{
|
|||||||
args::LogArgs,
|
args::LogArgs,
|
||||||
version::{LONG_VERSION, SHORT_VERSION},
|
version::{LONG_VERSION, SHORT_VERSION},
|
||||||
};
|
};
|
||||||
|
use reth_optimism_consensus::OpBeaconConsensus;
|
||||||
use reth_optimism_evm::OpExecutorProvider;
|
use reth_optimism_evm::OpExecutorProvider;
|
||||||
use reth_optimism_node::{OpNetworkPrimitives, OpNode};
|
use reth_optimism_node::{OpNetworkPrimitives, OpNode};
|
||||||
use reth_tracing::FileWorkerGuard;
|
use reth_tracing::FileWorkerGuard;
|
||||||
@ -169,8 +170,9 @@ where
|
|||||||
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
||||||
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<OpNode>()),
|
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<OpNode>()),
|
||||||
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
|
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
|
||||||
command
|
command.execute::<OpNode, _, _, OpNetworkPrimitives>(ctx, |spec| {
|
||||||
.execute::<OpNode, _, _, OpNetworkPrimitives>(ctx, OpExecutorProvider::optimism)
|
(OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec))
|
||||||
|
})
|
||||||
}),
|
}),
|
||||||
Commands::P2P(command) => {
|
Commands::P2P(command) => {
|
||||||
runner.run_until_ctrl_c(command.execute::<OpNetworkPrimitives>())
|
runner.run_until_ctrl_c(command.execute::<OpNetworkPrimitives>())
|
||||||
|
|||||||
@ -329,7 +329,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
use op_alloy_consensus::{OpTypedTransaction, TxDeposit};
|
use op_alloy_consensus::{OpTypedTransaction, TxDeposit};
|
||||||
use reth_chainspec::MIN_TRANSACTION_GAS;
|
use reth_chainspec::MIN_TRANSACTION_GAS;
|
||||||
use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider};
|
use reth_evm::execute::{BasicBlockExecutorProvider, BlockExecutorProvider, Executor};
|
||||||
use reth_optimism_chainspec::OpChainSpecBuilder;
|
use reth_optimism_chainspec::OpChainSpecBuilder;
|
||||||
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
|
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
|
||||||
use reth_primitives_traits::Account;
|
use reth_primitives_traits::Account;
|
||||||
@ -416,7 +416,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// make sure the L1 block contract state is preloaded.
|
// make sure the L1 block contract state is preloaded.
|
||||||
executor.with_state_mut(|state| {
|
executor.with_state_mut(|state| {
|
||||||
@ -424,8 +424,8 @@ mod tests {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Attempt to execute a block with one deposit and one non-deposit transaction
|
// Attempt to execute a block with one deposit and one non-deposit transaction
|
||||||
executor
|
let output = executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header,
|
header,
|
||||||
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
||||||
@ -434,9 +434,9 @@ mod tests {
|
|||||||
))
|
))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let receipts = executor.receipts();
|
let receipts = output.receipts;
|
||||||
let tx_receipt = &receipts[0][0];
|
let tx_receipt = &receipts[0];
|
||||||
let deposit_receipt = &receipts[0][1];
|
let deposit_receipt = &receipts[1];
|
||||||
|
|
||||||
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
||||||
// deposit_nonce is present only in deposit transactions
|
// deposit_nonce is present only in deposit transactions
|
||||||
@ -492,7 +492,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let provider = executor_provider(chain_spec);
|
let provider = executor_provider(chain_spec);
|
||||||
let mut executor = provider.batch_executor(StateProviderDatabase::new(&db));
|
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||||
|
|
||||||
// make sure the L1 block contract state is preloaded.
|
// make sure the L1 block contract state is preloaded.
|
||||||
executor.with_state_mut(|state| {
|
executor.with_state_mut(|state| {
|
||||||
@ -500,8 +500,8 @@ mod tests {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// attempt to execute an empty block with parent beacon block root, this should not fail
|
// attempt to execute an empty block with parent beacon block root, this should not fail
|
||||||
executor
|
let output = executor
|
||||||
.execute_and_verify_one(&RecoveredBlock::new_unhashed(
|
.execute(&RecoveredBlock::new_unhashed(
|
||||||
Block {
|
Block {
|
||||||
header,
|
header,
|
||||||
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
||||||
@ -510,9 +510,9 @@ mod tests {
|
|||||||
))
|
))
|
||||||
.expect("Executing a block while canyon is active should not fail");
|
.expect("Executing a block while canyon is active should not fail");
|
||||||
|
|
||||||
let receipts = executor.receipts();
|
let receipts = output.receipts;
|
||||||
let tx_receipt = &receipts[0][0];
|
let tx_receipt = &receipts[0];
|
||||||
let deposit_receipt = &receipts[0][1];
|
let deposit_receipt = &receipts[1];
|
||||||
|
|
||||||
// deposit_receipt_version is set to 1 for post canyon deposit transactions
|
// deposit_receipt_version is set to 1 for post canyon deposit transactions
|
||||||
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
||||||
|
|||||||
@ -66,6 +66,7 @@ reth-chainspec.workspace = true
|
|||||||
reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] }
|
reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] }
|
||||||
reth-db = { workspace = true, features = ["test-utils", "mdbx"] }
|
reth-db = { workspace = true, features = ["test-utils", "mdbx"] }
|
||||||
reth-evm-ethereum.workspace = true
|
reth-evm-ethereum.workspace = true
|
||||||
|
reth-ethereum-consensus.workspace = true
|
||||||
reth-execution-errors.workspace = true
|
reth-execution-errors.workspace = true
|
||||||
reth-consensus = { workspace = true, features = ["test-utils"] }
|
reth-consensus = { workspace = true, features = ["test-utils"] }
|
||||||
reth-network-p2p = { workspace = true, features = ["test-utils"] }
|
reth-network-p2p = { workspace = true, features = ["test-utils"] }
|
||||||
|
|||||||
@ -32,9 +32,10 @@
|
|||||||
//! # use reth_config::config::StageConfig;
|
//! # use reth_config::config::StageConfig;
|
||||||
//! # use reth_consensus::{Consensus, ConsensusError};
|
//! # use reth_consensus::{Consensus, ConsensusError};
|
||||||
//! # use reth_consensus::test_utils::TestConsensus;
|
//! # use reth_consensus::test_utils::TestConsensus;
|
||||||
|
//! # use reth_consensus::FullConsensus;
|
||||||
//! #
|
//! #
|
||||||
//! # let chain_spec = MAINNET.clone();
|
//! # let chain_spec = MAINNET.clone();
|
||||||
//! # let consensus: Arc<dyn Consensus<reth_primitives::Block, Error = ConsensusError>> = Arc::new(TestConsensus::default());
|
//! # let consensus: Arc<dyn FullConsensus<reth_primitives::EthPrimitives, Error = ConsensusError>> = Arc::new(TestConsensus::default());
|
||||||
//! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build(
|
//! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build(
|
||||||
//! # Arc::new(TestHeadersClient::default()),
|
//! # Arc::new(TestHeadersClient::default()),
|
||||||
//! # consensus.clone().as_header_validator()
|
//! # consensus.clone().as_header_validator()
|
||||||
@ -42,7 +43,7 @@
|
|||||||
//! # let provider_factory = create_test_provider_factory();
|
//! # let provider_factory = create_test_provider_factory();
|
||||||
//! # let bodies_downloader = BodiesDownloaderBuilder::default().build(
|
//! # let bodies_downloader = BodiesDownloaderBuilder::default().build(
|
||||||
//! # Arc::new(TestBodiesClient { responder: |_| Ok((PeerId::ZERO, vec![]).into()) }),
|
//! # Arc::new(TestBodiesClient { responder: |_| Ok((PeerId::ZERO, vec![]).into()) }),
|
||||||
//! # consensus.clone(),
|
//! # consensus.clone().as_consensus(),
|
||||||
//! # provider_factory.clone()
|
//! # provider_factory.clone()
|
||||||
//! # );
|
//! # );
|
||||||
//! # let (tip_tx, tip_rx) = watch::channel(B256::default());
|
//! # let (tip_tx, tip_rx) = watch::channel(B256::default());
|
||||||
|
|||||||
@ -21,15 +21,17 @@
|
|||||||
//! # use reth_config::config::StageConfig;
|
//! # use reth_config::config::StageConfig;
|
||||||
//! # use reth_evm::execute::BlockExecutorProvider;
|
//! # use reth_evm::execute::BlockExecutorProvider;
|
||||||
//! # use reth_primitives::EthPrimitives;
|
//! # use reth_primitives::EthPrimitives;
|
||||||
|
//! # use std::sync::Arc;
|
||||||
|
//! # use reth_consensus::{FullConsensus, ConsensusError};
|
||||||
//!
|
//!
|
||||||
//! # fn create(exec: impl BlockExecutorProvider<Primitives = EthPrimitives>) {
|
//! # fn create(exec: impl BlockExecutorProvider<Primitives = EthPrimitives>, consensus: impl FullConsensus<EthPrimitives, Error = ConsensusError> + 'static) {
|
||||||
//!
|
//!
|
||||||
//! let provider_factory = create_test_provider_factory();
|
//! let provider_factory = create_test_provider_factory();
|
||||||
//! let static_file_producer =
|
//! let static_file_producer =
|
||||||
//! StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
|
//! StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
|
||||||
//! // Build a pipeline with all offline stages.
|
//! // Build a pipeline with all offline stages.
|
||||||
//! let pipeline = Pipeline::<MockNodeTypesWithDB>::builder()
|
//! let pipeline = Pipeline::<MockNodeTypesWithDB>::builder()
|
||||||
//! .add_stages(OfflineStages::new(exec, StageConfig::default(), PruneModes::default()))
|
//! .add_stages(OfflineStages::new(exec, Arc::new(consensus), StageConfig::default(), PruneModes::default()))
|
||||||
//! .build(provider_factory, static_file_producer);
|
//! .build(provider_factory, static_file_producer);
|
||||||
//!
|
//!
|
||||||
//! # }
|
//! # }
|
||||||
@ -44,9 +46,10 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use alloy_primitives::B256;
|
use alloy_primitives::B256;
|
||||||
use reth_config::config::StageConfig;
|
use reth_config::config::StageConfig;
|
||||||
use reth_consensus::{Consensus, ConsensusError};
|
use reth_consensus::{Consensus, ConsensusError, FullConsensus};
|
||||||
use reth_evm::execute::BlockExecutorProvider;
|
use reth_evm::execute::BlockExecutorProvider;
|
||||||
use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader};
|
use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader};
|
||||||
|
use reth_primitives::NodePrimitives;
|
||||||
use reth_primitives_traits::Block;
|
use reth_primitives_traits::Block;
|
||||||
use reth_provider::HeaderSyncGapProvider;
|
use reth_provider::HeaderSyncGapProvider;
|
||||||
use reth_prune_types::PruneModes;
|
use reth_prune_types::PruneModes;
|
||||||
@ -78,15 +81,18 @@ use tokio::sync::watch;
|
|||||||
/// - [`PruneStage`] (execute)
|
/// - [`PruneStage`] (execute)
|
||||||
/// - [`FinishStage`]
|
/// - [`FinishStage`]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DefaultStages<Provider, H, B, EF>
|
pub struct DefaultStages<Provider, H, B, E>
|
||||||
where
|
where
|
||||||
H: HeaderDownloader,
|
H: HeaderDownloader,
|
||||||
B: BodyDownloader,
|
B: BodyDownloader,
|
||||||
|
E: BlockExecutorProvider,
|
||||||
{
|
{
|
||||||
/// Configuration for the online stages
|
/// Configuration for the online stages
|
||||||
online: OnlineStages<Provider, H, B>,
|
online: OnlineStages<Provider, H, B>,
|
||||||
/// Executor factory needs for execution stage
|
/// Executor factory needs for execution stage
|
||||||
executor_factory: EF,
|
executor_provider: E,
|
||||||
|
/// Consensus instance
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
/// Configuration for each stage in the pipeline
|
/// Configuration for each stage in the pipeline
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
/// Prune configuration for every segment that can be pruned
|
/// Prune configuration for every segment that can be pruned
|
||||||
@ -97,32 +103,31 @@ impl<Provider, H, B, E> DefaultStages<Provider, H, B, E>
|
|||||||
where
|
where
|
||||||
H: HeaderDownloader,
|
H: HeaderDownloader,
|
||||||
B: BodyDownloader,
|
B: BodyDownloader,
|
||||||
|
E: BlockExecutorProvider<Primitives: NodePrimitives<BlockHeader = H::Header, Block = B::Block>>,
|
||||||
{
|
{
|
||||||
/// Create a new set of default stages with default values.
|
/// Create a new set of default stages with default values.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
provider: Provider,
|
provider: Provider,
|
||||||
tip: watch::Receiver<B256>,
|
tip: watch::Receiver<B256>,
|
||||||
consensus: Arc<dyn Consensus<B::Block, Error = ConsensusError>>,
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
header_downloader: H,
|
header_downloader: H,
|
||||||
body_downloader: B,
|
body_downloader: B,
|
||||||
executor_factory: E,
|
executor_provider: E,
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
prune_modes: PruneModes,
|
prune_modes: PruneModes,
|
||||||
) -> Self
|
) -> Self {
|
||||||
where
|
|
||||||
E: BlockExecutorProvider,
|
|
||||||
{
|
|
||||||
Self {
|
Self {
|
||||||
online: OnlineStages::new(
|
online: OnlineStages::new(
|
||||||
provider,
|
provider,
|
||||||
tip,
|
tip,
|
||||||
consensus,
|
consensus.clone().as_consensus(),
|
||||||
header_downloader,
|
header_downloader,
|
||||||
body_downloader,
|
body_downloader,
|
||||||
stages_config.clone(),
|
stages_config.clone(),
|
||||||
),
|
),
|
||||||
executor_factory,
|
executor_provider,
|
||||||
|
consensus,
|
||||||
stages_config,
|
stages_config,
|
||||||
prune_modes,
|
prune_modes,
|
||||||
}
|
}
|
||||||
@ -138,7 +143,8 @@ where
|
|||||||
/// Appends the default offline stages and default finish stage to the given builder.
|
/// Appends the default offline stages and default finish stage to the given builder.
|
||||||
pub fn add_offline_stages<Provider>(
|
pub fn add_offline_stages<Provider>(
|
||||||
default_offline: StageSetBuilder<Provider>,
|
default_offline: StageSetBuilder<Provider>,
|
||||||
executor_factory: E,
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
prune_modes: PruneModes,
|
prune_modes: PruneModes,
|
||||||
) -> StageSetBuilder<Provider>
|
) -> StageSetBuilder<Provider>
|
||||||
@ -147,7 +153,7 @@ where
|
|||||||
{
|
{
|
||||||
StageSetBuilder::default()
|
StageSetBuilder::default()
|
||||||
.add_set(default_offline)
|
.add_set(default_offline)
|
||||||
.add_set(OfflineStages::new(executor_factory, stages_config, prune_modes))
|
.add_set(OfflineStages::new(executor_provider, consensus, stages_config, prune_modes))
|
||||||
.add_stage(FinishStage)
|
.add_stage(FinishStage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,7 +170,8 @@ where
|
|||||||
fn builder(self) -> StageSetBuilder<Provider> {
|
fn builder(self) -> StageSetBuilder<Provider> {
|
||||||
Self::add_offline_stages(
|
Self::add_offline_stages(
|
||||||
self.online.builder(),
|
self.online.builder(),
|
||||||
self.executor_factory,
|
self.executor_provider,
|
||||||
|
self.consensus,
|
||||||
self.stages_config.clone(),
|
self.stages_config.clone(),
|
||||||
self.prune_modes,
|
self.prune_modes,
|
||||||
)
|
)
|
||||||
@ -286,25 +293,28 @@ where
|
|||||||
/// - [`HashingStages`]
|
/// - [`HashingStages`]
|
||||||
/// - [`HistoryIndexingStages`]
|
/// - [`HistoryIndexingStages`]
|
||||||
/// - [`PruneStage`]
|
/// - [`PruneStage`]
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct OfflineStages<EF> {
|
pub struct OfflineStages<E: BlockExecutorProvider> {
|
||||||
/// Executor factory needs for execution stage
|
/// Executor factory needs for execution stage
|
||||||
executor_factory: EF,
|
executor_provider: E,
|
||||||
|
/// Consensus instance for validating blocks.
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
/// Configuration for each stage in the pipeline
|
/// Configuration for each stage in the pipeline
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
/// Prune configuration for every segment that can be pruned
|
/// Prune configuration for every segment that can be pruned
|
||||||
prune_modes: PruneModes,
|
prune_modes: PruneModes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<EF> OfflineStages<EF> {
|
impl<E: BlockExecutorProvider> OfflineStages<E> {
|
||||||
/// Create a new set of offline stages with default values.
|
/// Create a new set of offline stages with default values.
|
||||||
pub const fn new(
|
pub const fn new(
|
||||||
executor_factory: EF,
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
prune_modes: PruneModes,
|
prune_modes: PruneModes,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self { executor_factory, stages_config, prune_modes }
|
Self { executor_provider, consensus, stages_config, prune_modes }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,7 +328,7 @@ where
|
|||||||
PruneStage: Stage<Provider>,
|
PruneStage: Stage<Provider>,
|
||||||
{
|
{
|
||||||
fn builder(self) -> StageSetBuilder<Provider> {
|
fn builder(self) -> StageSetBuilder<Provider> {
|
||||||
ExecutionStages::new(self.executor_factory, self.stages_config.clone())
|
ExecutionStages::new(self.executor_provider, self.consensus, self.stages_config.clone())
|
||||||
.builder()
|
.builder()
|
||||||
// If sender recovery prune mode is set, add the prune sender recovery stage.
|
// If sender recovery prune mode is set, add the prune sender recovery stage.
|
||||||
.add_stage_opt(self.prune_modes.sender_recovery.map(|prune_mode| {
|
.add_stage_opt(self.prune_modes.sender_recovery.map(|prune_mode| {
|
||||||
@ -341,17 +351,23 @@ where
|
|||||||
/// A set containing all stages that are required to execute pre-existing block data.
|
/// A set containing all stages that are required to execute pre-existing block data.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct ExecutionStages<E> {
|
pub struct ExecutionStages<E: BlockExecutorProvider> {
|
||||||
/// Executor factory that will create executors.
|
/// Executor factory that will create executors.
|
||||||
executor_factory: E,
|
executor_provider: E,
|
||||||
|
/// Consensus instance for validating blocks.
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
/// Configuration for each stage in the pipeline
|
/// Configuration for each stage in the pipeline
|
||||||
stages_config: StageConfig,
|
stages_config: StageConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E> ExecutionStages<E> {
|
impl<E: BlockExecutorProvider> ExecutionStages<E> {
|
||||||
/// Create a new set of execution stages with default values.
|
/// Create a new set of execution stages with default values.
|
||||||
pub const fn new(executor_factory: E, stages_config: StageConfig) -> Self {
|
pub const fn new(
|
||||||
Self { executor_factory, stages_config }
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
|
stages_config: StageConfig,
|
||||||
|
) -> Self {
|
||||||
|
Self { executor_provider, consensus, stages_config }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,7 +381,8 @@ where
|
|||||||
StageSetBuilder::default()
|
StageSetBuilder::default()
|
||||||
.add_stage(SenderRecoveryStage::new(self.stages_config.sender_recovery))
|
.add_stage(SenderRecoveryStage::new(self.stages_config.sender_recovery))
|
||||||
.add_stage(ExecutionStage::from_config(
|
.add_stage(ExecutionStage::from_config(
|
||||||
self.executor_factory,
|
self.executor_provider,
|
||||||
|
self.consensus,
|
||||||
self.stages_config.execution,
|
self.stages_config.execution,
|
||||||
self.stages_config.execution_external_clean_threshold(),
|
self.stages_config.execution_external_clean_threshold(),
|
||||||
))
|
))
|
||||||
|
|||||||
@ -4,9 +4,10 @@ use alloy_eips::{eip1898::BlockWithParent, NumHash};
|
|||||||
use alloy_primitives::BlockNumber;
|
use alloy_primitives::BlockNumber;
|
||||||
use num_traits::Zero;
|
use num_traits::Zero;
|
||||||
use reth_config::config::ExecutionConfig;
|
use reth_config::config::ExecutionConfig;
|
||||||
|
use reth_consensus::{ConsensusError, FullConsensus, PostExecutionInput};
|
||||||
use reth_db::{static_file::HeaderMask, tables};
|
use reth_db::{static_file::HeaderMask, tables};
|
||||||
use reth_evm::{
|
use reth_evm::{
|
||||||
execute::{BatchExecutor, BlockExecutorProvider},
|
execute::{BlockExecutorProvider, Executor},
|
||||||
metrics::ExecutorMetrics,
|
metrics::ExecutorMetrics,
|
||||||
};
|
};
|
||||||
use reth_execution_types::Chain;
|
use reth_execution_types::Chain;
|
||||||
@ -15,9 +16,9 @@ use reth_primitives::StaticFileSegment;
|
|||||||
use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives};
|
use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives};
|
||||||
use reth_provider::{
|
use reth_provider::{
|
||||||
providers::{StaticFileProvider, StaticFileWriter},
|
providers::{StaticFileProvider, StaticFileWriter},
|
||||||
BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef,
|
BlockHashReader, BlockReader, DBProvider, ExecutionOutcome, HeaderProvider,
|
||||||
OriginalValuesKnown, ProviderError, StateCommitmentProvider, StateWriter,
|
LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateCommitmentProvider,
|
||||||
StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant,
|
StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant,
|
||||||
};
|
};
|
||||||
use reth_revm::database::StateProviderDatabase;
|
use reth_revm::database::StateProviderDatabase;
|
||||||
use reth_stages_api::{
|
use reth_stages_api::{
|
||||||
@ -72,6 +73,9 @@ where
|
|||||||
{
|
{
|
||||||
/// The stage's internal block executor
|
/// The stage's internal block executor
|
||||||
executor_provider: E,
|
executor_provider: E,
|
||||||
|
/// The consensus instance for validating blocks.
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
|
/// The consensu
|
||||||
/// The commit thresholds of the execution stage.
|
/// The commit thresholds of the execution stage.
|
||||||
thresholds: ExecutionStageThresholds,
|
thresholds: ExecutionStageThresholds,
|
||||||
/// The highest threshold (in number of blocks) for switching between incremental
|
/// The highest threshold (in number of blocks) for switching between incremental
|
||||||
@ -100,6 +104,7 @@ where
|
|||||||
/// Create new execution stage with specified config.
|
/// Create new execution stage with specified config.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
executor_provider: E,
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
thresholds: ExecutionStageThresholds,
|
thresholds: ExecutionStageThresholds,
|
||||||
external_clean_threshold: u64,
|
external_clean_threshold: u64,
|
||||||
exex_manager_handle: ExExManagerHandle<E::Primitives>,
|
exex_manager_handle: ExExManagerHandle<E::Primitives>,
|
||||||
@ -107,6 +112,7 @@ where
|
|||||||
Self {
|
Self {
|
||||||
external_clean_threshold,
|
external_clean_threshold,
|
||||||
executor_provider,
|
executor_provider,
|
||||||
|
consensus,
|
||||||
thresholds,
|
thresholds,
|
||||||
post_execute_commit_input: None,
|
post_execute_commit_input: None,
|
||||||
post_unwind_commit_input: None,
|
post_unwind_commit_input: None,
|
||||||
@ -118,9 +124,13 @@ where
|
|||||||
/// Create an execution stage with the provided executor.
|
/// Create an execution stage with the provided executor.
|
||||||
///
|
///
|
||||||
/// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD`].
|
/// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD`].
|
||||||
pub fn new_with_executor(executor_provider: E) -> Self {
|
pub fn new_with_executor(
|
||||||
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
|
) -> Self {
|
||||||
Self::new(
|
Self::new(
|
||||||
executor_provider,
|
executor_provider,
|
||||||
|
consensus,
|
||||||
ExecutionStageThresholds::default(),
|
ExecutionStageThresholds::default(),
|
||||||
MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD,
|
MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD,
|
||||||
ExExManagerHandle::empty(),
|
ExExManagerHandle::empty(),
|
||||||
@ -130,11 +140,13 @@ where
|
|||||||
/// Create new instance of [`ExecutionStage`] from configuration.
|
/// Create new instance of [`ExecutionStage`] from configuration.
|
||||||
pub fn from_config(
|
pub fn from_config(
|
||||||
executor_provider: E,
|
executor_provider: E,
|
||||||
|
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
|
||||||
config: ExecutionConfig,
|
config: ExecutionConfig,
|
||||||
external_clean_threshold: u64,
|
external_clean_threshold: u64,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self::new(
|
Self::new(
|
||||||
executor_provider,
|
executor_provider,
|
||||||
|
consensus,
|
||||||
config.into(),
|
config.into(),
|
||||||
external_clean_threshold,
|
external_clean_threshold,
|
||||||
ExExManagerHandle::empty(),
|
ExExManagerHandle::empty(),
|
||||||
@ -283,7 +295,7 @@ where
|
|||||||
self.ensure_consistency(provider, input.checkpoint().block_number, None)?;
|
self.ensure_consistency(provider, input.checkpoint().block_number, None)?;
|
||||||
|
|
||||||
let db = StateProviderDatabase(LatestStateProviderRef::new(provider));
|
let db = StateProviderDatabase(LatestStateProviderRef::new(provider));
|
||||||
let mut executor = self.executor_provider.batch_executor(db);
|
let mut executor = self.executor_provider.executor(db);
|
||||||
|
|
||||||
// Progress tracking
|
// Progress tracking
|
||||||
let mut stage_progress = start_block;
|
let mut stage_progress = start_block;
|
||||||
@ -310,6 +322,7 @@ where
|
|||||||
let batch_start = Instant::now();
|
let batch_start = Instant::now();
|
||||||
|
|
||||||
let mut blocks = Vec::new();
|
let mut blocks = Vec::new();
|
||||||
|
let mut results = Vec::new();
|
||||||
for block_number in start_block..=max_block {
|
for block_number in start_block..=max_block {
|
||||||
// Fetch the block
|
// Fetch the block
|
||||||
let fetch_block_start = Instant::now();
|
let fetch_block_start = Instant::now();
|
||||||
@ -329,8 +342,8 @@ where
|
|||||||
// Execute the block
|
// Execute the block
|
||||||
let execute_start = Instant::now();
|
let execute_start = Instant::now();
|
||||||
|
|
||||||
self.metrics.metered_one(&block, |input| {
|
let result = self.metrics.metered_one(&block, |input| {
|
||||||
executor.execute_and_verify_one(input).map_err(|error| {
|
executor.execute_one(input).map_err(|error| {
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
StageError::Block {
|
StageError::Block {
|
||||||
block: Box::new(BlockWithParent::new(
|
block: Box::new(BlockWithParent::new(
|
||||||
@ -342,6 +355,20 @@ where
|
|||||||
})
|
})
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
if let Err(err) = self.consensus.validate_block_post_execution(
|
||||||
|
&block,
|
||||||
|
PostExecutionInput::new(&result.receipts, &result.requests),
|
||||||
|
) {
|
||||||
|
return Err(StageError::Block {
|
||||||
|
block: Box::new(BlockWithParent::new(
|
||||||
|
block.header().parent_hash(),
|
||||||
|
NumHash::new(block.header().number(), block.hash_slow()),
|
||||||
|
)),
|
||||||
|
error: BlockErrorKind::Validation(err),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
results.push(result);
|
||||||
|
|
||||||
execution_duration += execute_start.elapsed();
|
execution_duration += execute_start.elapsed();
|
||||||
|
|
||||||
// Log execution throughput
|
// Log execution throughput
|
||||||
@ -369,10 +396,9 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if we should commit now
|
// Check if we should commit now
|
||||||
let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64;
|
|
||||||
if self.thresholds.is_end_of_batch(
|
if self.thresholds.is_end_of_batch(
|
||||||
block_number - start_block,
|
block_number - start_block,
|
||||||
bundle_size_hint,
|
executor.size_hint() as u64,
|
||||||
cumulative_gas,
|
cumulative_gas,
|
||||||
batch_start.elapsed(),
|
batch_start.elapsed(),
|
||||||
) {
|
) {
|
||||||
@ -382,7 +408,11 @@ where
|
|||||||
|
|
||||||
// prepare execution output for writing
|
// prepare execution output for writing
|
||||||
let time = Instant::now();
|
let time = Instant::now();
|
||||||
let mut state = executor.finalize();
|
let mut state = ExecutionOutcome::from_blocks(
|
||||||
|
start_block,
|
||||||
|
executor.into_state().take_bundle(),
|
||||||
|
results,
|
||||||
|
);
|
||||||
let write_preparation_duration = time.elapsed();
|
let write_preparation_duration = time.elapsed();
|
||||||
|
|
||||||
// log the gas per second for the range we just executed
|
// log the gas per second for the range we just executed
|
||||||
@ -649,6 +679,7 @@ mod tests {
|
|||||||
use reth_chainspec::ChainSpecBuilder;
|
use reth_chainspec::ChainSpecBuilder;
|
||||||
use reth_db::transaction::DbTx;
|
use reth_db::transaction::DbTx;
|
||||||
use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut};
|
use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut};
|
||||||
|
use reth_ethereum_consensus::EthBeaconConsensus;
|
||||||
use reth_evm::execute::BasicBlockExecutorProvider;
|
use reth_evm::execute::BasicBlockExecutorProvider;
|
||||||
use reth_evm_ethereum::execute::EthExecutionStrategyFactory;
|
use reth_evm_ethereum::execute::EthExecutionStrategyFactory;
|
||||||
use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry};
|
use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry};
|
||||||
@ -666,8 +697,12 @@ mod tests {
|
|||||||
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
||||||
));
|
));
|
||||||
let executor_provider = BasicBlockExecutorProvider::new(strategy_factory);
|
let executor_provider = BasicBlockExecutorProvider::new(strategy_factory);
|
||||||
|
let consensus = Arc::new(EthBeaconConsensus::new(Arc::new(
|
||||||
|
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
||||||
|
)));
|
||||||
ExecutionStage::new(
|
ExecutionStage::new(
|
||||||
executor_provider,
|
executor_provider,
|
||||||
|
consensus,
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: Some(100),
|
max_blocks: Some(100),
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
|
|||||||
@ -57,6 +57,7 @@ mod tests {
|
|||||||
table::Table,
|
table::Table,
|
||||||
transaction::{DbTx, DbTxMut},
|
transaction::{DbTx, DbTxMut},
|
||||||
};
|
};
|
||||||
|
use reth_ethereum_consensus::EthBeaconConsensus;
|
||||||
use reth_evm_ethereum::execute::EthExecutorProvider;
|
use reth_evm_ethereum::execute::EthExecutorProvider;
|
||||||
use reth_exex::ExExManagerHandle;
|
use reth_exex::ExExManagerHandle;
|
||||||
use reth_primitives::{Account, Bytecode, SealedBlock, StaticFileSegment};
|
use reth_primitives::{Account, Bytecode, SealedBlock, StaticFileSegment};
|
||||||
@ -152,6 +153,9 @@ mod tests {
|
|||||||
EthExecutorProvider::ethereum(Arc::new(
|
EthExecutorProvider::ethereum(Arc::new(
|
||||||
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
||||||
)),
|
)),
|
||||||
|
Arc::new(EthBeaconConsensus::new(Arc::new(
|
||||||
|
ChainSpecBuilder::mainnet().berlin_activated().build(),
|
||||||
|
))),
|
||||||
ExecutionStageThresholds {
|
ExecutionStageThresholds {
|
||||||
max_blocks: Some(100),
|
max_blocks: Some(100),
|
||||||
max_changes: None,
|
max_changes: None,
|
||||||
|
|||||||
@ -27,6 +27,7 @@ reth-db-api.workspace = true
|
|||||||
reth-provider = { workspace = true, features = ["test-utils"] }
|
reth-provider = { workspace = true, features = ["test-utils"] }
|
||||||
reth-stages.workspace = true
|
reth-stages.workspace = true
|
||||||
reth-evm-ethereum.workspace = true
|
reth-evm-ethereum.workspace = true
|
||||||
|
reth-ethereum-consensus.workspace = true
|
||||||
reth-revm = { workspace = true, features = ["std"] }
|
reth-revm = { workspace = true, features = ["std"] }
|
||||||
|
|
||||||
revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] }
|
revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] }
|
||||||
|
|||||||
@ -7,6 +7,7 @@ use crate::{
|
|||||||
use alloy_rlp::Decodable;
|
use alloy_rlp::Decodable;
|
||||||
use rayon::iter::{ParallelBridge, ParallelIterator};
|
use rayon::iter::{ParallelBridge, ParallelIterator};
|
||||||
use reth_chainspec::ChainSpec;
|
use reth_chainspec::ChainSpec;
|
||||||
|
use reth_ethereum_consensus::EthBeaconConsensus;
|
||||||
use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment};
|
use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment};
|
||||||
use reth_provider::{
|
use reth_provider::{
|
||||||
providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec,
|
providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec,
|
||||||
@ -126,7 +127,8 @@ impl Case for BlockchainTestCase {
|
|||||||
// Execute the execution stage using the EVM processor factory for the test case
|
// Execute the execution stage using the EVM processor factory for the test case
|
||||||
// network.
|
// network.
|
||||||
let _ = ExecutionStage::new_with_executor(
|
let _ = ExecutionStage::new_with_executor(
|
||||||
reth_evm_ethereum::execute::EthExecutorProvider::ethereum(chain_spec),
|
reth_evm_ethereum::execute::EthExecutorProvider::ethereum(chain_spec.clone()),
|
||||||
|
Arc::new(EthBeaconConsensus::new(chain_spec)),
|
||||||
)
|
)
|
||||||
.execute(
|
.execute(
|
||||||
&provider,
|
&provider,
|
||||||
|
|||||||
Reference in New Issue
Block a user