feat: integrate builder (#6611)

This commit is contained in:
Matthias Seitz
2024-02-29 17:50:04 +01:00
committed by GitHub
parent 7d36206dfe
commit c5955f1305
73 changed files with 2201 additions and 3022 deletions

73
Cargo.lock generated
View File

@ -20,6 +20,7 @@ dependencies = [
"eyre",
"jsonrpsee",
"reth",
"reth-node-ethereum",
"reth-transaction-pool",
"tokio",
]
@ -798,12 +799,10 @@ name = "beacon-api-sse"
version = "0.0.0"
dependencies = [
"clap",
"eyre",
"futures-util",
"mev-share-sse",
"reth",
"serde",
"serde_json",
"reth-node-ethereum",
"tokio",
"tracing",
]
@ -1391,9 +1390,8 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
name = "cli-extension-event-hooks"
version = "0.0.0"
dependencies = [
"clap",
"eyre",
"reth",
"reth-node-ethereum",
]
[[package]]
@ -1816,14 +1814,43 @@ dependencies = [
"syn 2.0.50",
]
[[package]]
name = "custom-dev-node"
version = "0.0.0"
dependencies = [
"eyre",
"futures-util",
"reth",
"reth-node-core",
"reth-node-ethereum",
"reth-primitives",
"serde_json",
"tokio",
]
[[package]]
name = "custom-evm"
version = "0.0.0"
dependencies = [
"alloy-chains",
"eyre",
"reth",
"reth-node-api",
"reth-node-core",
"reth-node-ethereum",
"reth-primitives",
"reth-tracing",
"tokio",
]
[[package]]
name = "custom-inspector"
version = "0.0.0"
dependencies = [
"clap",
"eyre",
"futures-util",
"reth",
"reth-node-ethereum",
]
[[package]]
@ -1832,32 +1859,45 @@ version = "0.0.0"
dependencies = [
"alloy-chains",
"eyre",
"jsonrpsee",
"reth",
"reth-basic-payload-builder",
"reth-ethereum-payload-builder",
"reth-node-api",
"reth-node-core",
"reth-node-ethereum",
"reth-payload-builder",
"reth-primitives",
"reth-rpc-api",
"reth-rpc-types",
"reth-tracing",
"serde",
"thiserror",
"tokio",
]
[[package]]
name = "custom-node-components"
version = "0.0.0"
dependencies = [
"eyre",
"reth",
"reth-node-ethereum",
"reth-tracing",
"reth-transaction-pool",
]
[[package]]
name = "custom-payload-builder"
version = "0.0.0"
dependencies = [
"clap",
"eyre",
"futures-util",
"reth",
"reth-basic-payload-builder",
"reth-ethereum-payload-builder",
"reth-node-api",
"reth-node-ethereum",
"reth-payload-builder",
"reth-primitives",
"tokio",
"tracing",
]
@ -5739,6 +5779,7 @@ dependencies = [
"reth-network-api",
"reth-nippy-jar",
"reth-node-api",
"reth-node-builder",
"reth-node-core",
"reth-node-ethereum",
"reth-node-optimism",
@ -5774,11 +5815,7 @@ dependencies = [
name = "reth-auto-seal-consensus"
version = "0.1.0-alpha.21"
dependencies = [
"clap",
"eyre",
"futures-util",
"jsonrpsee",
"reth",
"reth-beacon-consensus",
"reth-interfaces",
"reth-node-api",
@ -5787,8 +5824,6 @@ dependencies = [
"reth-revm",
"reth-stages",
"reth-transaction-pool",
"serde_json",
"tempfile",
"tokio",
"tokio-stream",
"tracing",
@ -6472,6 +6507,7 @@ dependencies = [
name = "reth-node-ethereum"
version = "0.1.0-alpha.21"
dependencies = [
"async-trait",
"eyre",
"reth-basic-payload-builder",
"reth-db",
@ -6493,6 +6529,7 @@ dependencies = [
name = "reth-node-optimism"
version = "0.1.0-alpha.21"
dependencies = [
"clap",
"eyre",
"reth-basic-payload-builder",
"reth-db",
@ -8600,11 +8637,9 @@ name = "trace-transaction-cli"
version = "0.0.0"
dependencies = [
"clap",
"eyre",
"futures-util",
"jsonrpsee",
"reth",
"tokio",
"reth-node-ethereum",
]
[[package]]

View File

@ -60,7 +60,10 @@ members = [
"examples/additional-rpc-namespace-in-cli/",
"examples/beacon-api-sse/",
"examples/cli-extension-event-hooks/",
"examples/custom-evm/",
"examples/custom-node/",
"examples/custom-node-components/",
"examples/custom-dev-node/",
"examples/custom-payload-builder/",
"examples/manual-p2p/",
"examples/rpc-db/",

View File

@ -53,6 +53,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [
"optimism",
] }
reth-node-core.workspace = true
reth-node-builder.workspace = true
# crypto
alloy-rlp.workspace = true
@ -137,8 +138,6 @@ optimism = [
"reth-blockchain-tree/optimism",
"reth-payload-builder/optimism",
"reth-optimism-payload-builder/optimism",
"reth-ethereum-payload-builder/optimism",
"reth-node-ethereum/optimism",
"dep:reth-node-optimism",
"reth-node-core/optimism",
]

View File

@ -1,849 +0,0 @@
//! Contains types and methods that can be used to launch a node based off of a [NodeConfig].
use crate::commands::debug_cmd::engine_api_store::EngineApiStore;
use eyre::Context;
use fdlimit::raise_fd_limit;
use futures::{future::Either, stream, stream_select, StreamExt};
use reth_auto_seal_consensus::AutoSealBuilder;
use reth_beacon_consensus::{
hooks::{EngineHooks, PruneHook, StaticFileHook},
BeaconConsensusEngine, MIN_BLOCKS_FOR_PIPELINE_RUN,
};
use reth_blockchain_tree::{config::BlockchainTreeConfig, ShareableBlockchainTree};
use reth_config::Config;
use reth_db::{
database::Database,
database_metrics::{DatabaseMetadata, DatabaseMetrics},
};
use reth_interfaces::p2p::either::EitherDownloader;
use reth_network::NetworkEvents;
use reth_network_api::{NetworkInfo, PeersInfo};
use reth_node_core::{
cli::{
components::{RethNodeComponentsImpl, RethRpcServerHandles},
config::RethRpcConfig,
db_type::DatabaseInstance,
ext::{DefaultRethNodeCommandConfig, RethCliExt, RethNodeCommandConfig},
},
dirs::{ChainPath, DataDirPath},
events::cl::ConsensusLayerHealthEvents,
exit::NodeExitFuture,
init::init_genesis,
version::SHORT_VERSION,
};
#[cfg(not(feature = "optimism"))]
use reth_node_ethereum::{EthEngineTypes, EthEvmConfig};
#[cfg(feature = "optimism")]
use reth_node_optimism::{OptimismEngineTypes, OptimismEvmConfig};
use reth_payload_builder::PayloadBuilderHandle;
use reth_primitives::format_ether;
use reth_provider::{providers::BlockchainProvider, ProviderFactory};
use reth_prune::PrunerBuilder;
use reth_rpc_engine_api::EngineApi;
use reth_static_file::StaticFileProducer;
use reth_tasks::{TaskExecutor, TaskManager};
use reth_transaction_pool::TransactionPool;
use std::{path::PathBuf, sync::Arc};
use tokio::sync::{mpsc::unbounded_channel, oneshot};
use tracing::*;
/// Re-export `NodeConfig` from `reth_node_core`.
pub use reth_node_core::node_config::NodeConfig;
/// Launches the node, also adding any RPC extensions passed.
///
/// # Example
/// ```rust
/// # use reth_tasks::{TaskManager, TaskSpawner};
/// # use reth_node_core::node_config::NodeConfig;
/// # use reth_node_core::cli::{
/// # ext::DefaultRethNodeCommandConfig,
/// # };
/// # use tokio::runtime::Handle;
/// # use reth::builder::launch_from_config;
///
/// async fn t() {
/// let handle = Handle::current();
/// let manager = TaskManager::new(handle);
/// let executor = manager.executor();
/// let builder = NodeConfig::default();
/// let ext = DefaultRethNodeCommandConfig::default();
/// let handle = launch_from_config::<()>(builder, ext, executor).await.unwrap();
/// }
/// ```
pub async fn launch_from_config<E: RethCliExt>(
mut config: NodeConfig,
ext: E::Node,
executor: TaskExecutor,
) -> eyre::Result<NodeHandle> {
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
// Register the prometheus recorder before creating the database,
// because database init needs it to register metrics.
config.install_prometheus_recorder()?;
let database = std::mem::take(&mut config.database);
let db_instance = database.init_db(config.db.log_level, config.chain.chain)?;
info!(target: "reth::cli", "Database opened");
match db_instance {
DatabaseInstance::Real { db, data_dir } => {
let builder = NodeBuilderWithDatabase { config, db, data_dir };
builder.launch::<E>(ext, executor).await
}
DatabaseInstance::Test { db, data_dir } => {
let builder = NodeBuilderWithDatabase { config, db, data_dir };
builder.launch::<E>(ext, executor).await
}
}
}
/// A version of the [NodeConfig] that has an installed database. This is used to construct the
/// [NodeHandle].
///
/// This also contains a path to a data dir that cannot be changed.
#[derive(Debug)]
pub struct NodeBuilderWithDatabase<DB> {
/// The node config
pub config: NodeConfig,
/// The database
pub db: Arc<DB>,
/// The data dir
pub data_dir: ChainPath<DataDirPath>,
}
impl<DB: Database + DatabaseMetrics + DatabaseMetadata + 'static> NodeBuilderWithDatabase<DB> {
/// Launch the node with the given extensions and executor
pub async fn launch<E: RethCliExt>(
mut self,
mut ext: E::Node,
executor: TaskExecutor,
) -> eyre::Result<NodeHandle> {
// Raise the fd limit of the process.
// Does not do anything on windows.
raise_fd_limit()?;
// get config
let config = self.load_config()?;
let prometheus_handle = self.config.install_prometheus_recorder()?;
let provider_factory = ProviderFactory::new(
Arc::clone(&self.db),
Arc::clone(&self.config.chain),
self.data_dir.static_files_path(),
)?
.with_static_files_metrics();
self.config.start_metrics_endpoint(prometheus_handle, Arc::clone(&self.db)).await?;
debug!(target: "reth::cli", chain=%self.config.chain.chain, genesis=?self.config.chain.genesis_hash(), "Initializing genesis");
let genesis_hash = init_genesis(provider_factory.clone())?;
info!(target: "reth::cli", "{}", self.config.chain.display_hardforks());
let consensus = self.config.consensus();
debug!(target: "reth::cli", "Spawning stages metrics listener task");
let (sync_metrics_tx, sync_metrics_rx) = unbounded_channel();
let sync_metrics_listener = reth_stages::MetricsListener::new(sync_metrics_rx);
executor.spawn_critical("stages metrics listener task", sync_metrics_listener);
let prune_config = self
.config
.pruning
.prune_config(Arc::clone(&self.config.chain))?
.or(config.prune.clone());
// TODO: stateful node builder should be able to remove cfgs here
#[cfg(feature = "optimism")]
let evm_config = OptimismEvmConfig::default();
// The default payload builder is implemented on the unit type.
#[cfg(not(feature = "optimism"))]
let evm_config = EthEvmConfig::default();
// configure blockchain tree
let tree_config = BlockchainTreeConfig::default();
let tree = self.config.build_blockchain_tree(
provider_factory.clone(),
consensus.clone(),
prune_config.clone(),
sync_metrics_tx.clone(),
tree_config,
evm_config,
)?;
let canon_state_notification_sender = tree.canon_state_notification_sender();
let blockchain_tree = ShareableBlockchainTree::new(tree);
debug!(target: "reth::cli", "configured blockchain tree");
// fetch the head block from the database
let head = self
.config
.lookup_head(provider_factory.clone())
.wrap_err("the head block is missing")?;
// setup the blockchain provider
let blockchain_db =
BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?;
// build transaction pool
let transaction_pool =
self.config.build_and_spawn_txpool(&blockchain_db, head, &executor, &self.data_dir)?;
// build network
let mut network_builder = self
.config
.build_network(
&config,
provider_factory.clone(),
executor.clone(),
head,
&self.data_dir,
)
.await?;
let components = RethNodeComponentsImpl::new(
blockchain_db.clone(),
transaction_pool.clone(),
network_builder.handle(),
executor.clone(),
blockchain_db.clone(),
evm_config,
);
// allow network modifications
ext.configure_network(network_builder.network_mut(), &components)?;
// launch network
let network = self.config.start_network(
network_builder,
&executor,
transaction_pool.clone(),
provider_factory.clone(),
&self.data_dir,
);
info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), enode = %network.local_node_record(), "Connected to P2P network");
debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID");
let network_client = network.fetch_client().await?;
ext.on_components_initialized(&components)?;
debug!(target: "reth::cli", "Spawning payload builder service");
// TODO: stateful node builder should handle this in with_payload_builder
// Optimism's payload builder is implemented on the OptimismPayloadBuilder type.
#[cfg(feature = "optimism")]
let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::default()
.set_compute_pending_block(self.config.builder.compute_pending_block);
#[cfg(feature = "optimism")]
let payload_builder: PayloadBuilderHandle<OptimismEngineTypes> =
ext.spawn_payload_builder_service(&self.config.builder, &components, payload_builder)?;
// The default payload builder is implemented on the unit type.
#[cfg(not(feature = "optimism"))]
let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default();
#[cfg(not(feature = "optimism"))]
let payload_builder: PayloadBuilderHandle<EthEngineTypes> =
ext.spawn_payload_builder_service(&self.config.builder, &components, payload_builder)?;
let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel();
if let Some(store_path) = self.config.debug.engine_api_store.clone() {
let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel();
let engine_api_store = EngineApiStore::new(store_path);
executor.spawn_critical(
"engine api interceptor",
engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx),
);
consensus_engine_rx = engine_intercept_rx;
};
let max_block = self.config.max_block(&network_client, provider_factory.clone()).await?;
let mut hooks = EngineHooks::new();
let mut static_file_producer = StaticFileProducer::new(
provider_factory.clone(),
provider_factory.static_file_provider(),
prune_config.clone().unwrap_or_default().segments,
);
let static_file_producer_events = static_file_producer.events();
hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone())));
info!(target: "reth::cli", "StaticFileProducer initialized");
// Configure the pipeline
let (mut pipeline, client) = if self.config.dev.dev {
info!(target: "reth::cli", "Starting Reth in dev mode");
for (idx, (address, alloc)) in self.config.chain.genesis.alloc.iter().enumerate() {
info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance));
}
let mining_mode =
self.config.mining_mode(transaction_pool.pending_transactions_listener());
let (_, client, mut task) = AutoSealBuilder::new(
Arc::clone(&self.config.chain),
blockchain_db.clone(),
transaction_pool.clone(),
consensus_engine_tx.clone(),
canon_state_notification_sender,
mining_mode,
evm_config,
)
.build();
let mut pipeline = self
.config
.build_networked_pipeline(
&config.stages,
client.clone(),
Arc::clone(&consensus),
provider_factory.clone(),
&executor,
sync_metrics_tx,
prune_config.clone(),
max_block,
static_file_producer,
evm_config,
)
.await?;
let pipeline_events = pipeline.events();
task.set_pipeline_events(pipeline_events);
debug!(target: "reth::cli", "Spawning auto mine task");
executor.spawn(Box::pin(task));
(pipeline, EitherDownloader::Left(client))
} else {
let pipeline = self
.config
.build_networked_pipeline(
&config.stages,
network_client.clone(),
Arc::clone(&consensus),
provider_factory.clone(),
&executor.clone(),
sync_metrics_tx,
prune_config.clone(),
max_block,
static_file_producer,
evm_config,
)
.await?;
(pipeline, EitherDownloader::Right(network_client))
};
let pipeline_events = pipeline.events();
let initial_target = self.config.initial_pipeline_target(genesis_hash);
let prune_config = prune_config.unwrap_or_default();
let mut pruner = PrunerBuilder::new(prune_config.clone())
.max_reorg_depth(tree_config.max_reorg_depth() as usize)
.prune_delete_limit(self.config.chain.prune_delete_limit)
.build(provider_factory.clone());
let pruner_events = pruner.events();
hooks.add(PruneHook::new(pruner, Box::new(executor.clone())));
info!(target: "reth::cli", ?prune_config, "Pruner initialized");
// Configure the consensus engine
let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel(
client,
pipeline,
blockchain_db.clone(),
Box::new(executor.clone()),
Box::new(network.clone()),
max_block,
self.config.debug.continuous,
payload_builder.clone(),
initial_target,
MIN_BLOCKS_FOR_PIPELINE_RUN,
consensus_engine_tx,
consensus_engine_rx,
hooks,
)?;
info!(target: "reth::cli", "Consensus engine initialized");
let events = stream_select!(
network.event_listener().map(Into::into),
beacon_engine_handle.event_listener().map(Into::into),
pipeline_events.map(Into::into),
if self.config.debug.tip.is_none() && !self.config.dev.dev {
Either::Left(
ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone()))
.map(Into::into),
)
} else {
Either::Right(stream::empty())
},
pruner_events.map(Into::into),
static_file_producer_events.map(Into::into),
);
executor.spawn_critical(
"events task",
reth_node_core::events::node::handle_events(
Some(network.clone()),
Some(head.number),
events,
self.db.clone(),
),
);
let engine_api = EngineApi::new(
blockchain_db.clone(),
self.config.chain.clone(),
beacon_engine_handle,
payload_builder.into(),
Box::new(executor.clone()),
);
info!(target: "reth::cli", "Engine API handler initialized");
// extract the jwt secret from the args if possible
let default_jwt_path = self.data_dir.jwt_path();
let jwt_secret = self.config.rpc.auth_jwt_secret(default_jwt_path)?;
// adjust rpc port numbers based on instance number
self.config.adjust_instance_ports();
// Start RPC servers
let rpc_server_handles =
self.config.rpc.start_servers(&components, engine_api, jwt_secret, &mut ext).await?;
// Run consensus engine to completion
let (tx, rx) = oneshot::channel();
info!(target: "reth::cli", "Starting consensus engine");
executor.spawn_critical_blocking("consensus engine", async move {
let res = beacon_consensus_engine.await;
let _ = tx.send(res);
});
ext.on_node_started(&components)?;
// If `enable_genesis_walkback` is set to true, the rollup client will need to
// perform the derivation pipeline from genesis, validating the data dir.
// When set to false, set the finalized, safe, and unsafe head block hashes
// on the rollup client using a fork choice update. This prevents the rollup
// client from performing the derivation pipeline from genesis, and instead
// starts syncing from the current tip in the DB.
#[cfg(feature = "optimism")]
if self.config.chain.is_optimism() && !self.config.rollup.enable_genesis_walkback {
let client = rpc_server_handles.auth.http_client();
reth_rpc_api::EngineApiClient::<OptimismEngineTypes>::fork_choice_updated_v2(
&client,
reth_rpc_types::engine::ForkchoiceState {
head_block_hash: head.hash,
safe_block_hash: head.hash,
finalized_block_hash: head.hash,
},
None,
)
.await?;
}
// construct node handle and return
let node_handle = NodeHandle {
rpc_server_handles,
node_exit_future: NodeExitFuture::new(rx, self.config.debug.terminate),
};
Ok(node_handle)
}
/// Returns the path to the config file.
fn config_path(&self) -> PathBuf {
self.config.config.clone().unwrap_or_else(|| self.data_dir.config_path())
}
/// Loads the reth config with the given datadir root
fn load_config(&self) -> eyre::Result<Config> {
let config_path = self.config_path();
let mut config = confy::load_path::<Config>(&config_path)
.wrap_err_with(|| format!("Could not load config file {:?}", config_path))?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Update the config with the command line arguments
config.peers.connect_trusted_nodes_only = self.config.network.trusted_only;
if !self.config.network.trusted_peers.is_empty() {
info!(target: "reth::cli", "Adding trusted nodes");
self.config.network.trusted_peers.iter().for_each(|peer| {
config.peers.trusted_nodes.insert(*peer);
});
}
Ok(config)
}
}
/// The [NodeHandle] contains the [RethRpcServerHandles] returned by the reth initialization
/// process, as well as a method for waiting for the node exit.
#[derive(Debug)]
pub struct NodeHandle {
/// The handles to the RPC servers
rpc_server_handles: RethRpcServerHandles,
/// A Future which waits node exit
/// See [`NodeExitFuture`]
node_exit_future: NodeExitFuture,
}
impl NodeHandle {
/// Returns the [RethRpcServerHandles] for this node.
pub fn rpc_server_handles(&self) -> &RethRpcServerHandles {
&self.rpc_server_handles
}
/// Waits for the node to exit, if it was configured to exit.
pub async fn wait_for_node_exit(self) -> eyre::Result<()> {
self.node_exit_future.await
}
}
/// A simple function to launch a node with the specified [NodeConfig], spawning tasks on the
/// [TaskExecutor] constructed from [TaskManager::current].
///
/// # Example
/// ```
/// # use reth_node_core::{
/// # node_config::NodeConfig,
/// # args::RpcServerArgs,
/// # };
/// # use reth::builder::spawn_node;
/// async fn t() {
/// // Create a node builder with an http rpc server enabled
/// let rpc_args = RpcServerArgs::default().with_http();
///
/// let builder = NodeConfig::test().with_rpc(rpc_args);
///
/// // Spawn the builder, returning a handle to the node
/// let (_handle, _manager) = spawn_node(builder).await.unwrap();
/// }
/// ```
pub async fn spawn_node(config: NodeConfig) -> eyre::Result<(NodeHandle, TaskManager)> {
let task_manager = TaskManager::current();
let ext = DefaultRethNodeCommandConfig::default();
Ok((launch_from_config::<()>(config, ext, task_manager.executor()).await?, task_manager))
}
#[cfg(test)]
mod tests {
use super::*;
use reth_node_core::args::RpcServerArgs;
use reth_primitives::U256;
use reth_rpc_api::EthApiClient;
#[tokio::test]
async fn block_number_node_config_test() {
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
let (handle, _manager) = spawn_node(NodeConfig::test().with_rpc(rpc_args)).await.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().rpc.http_client().unwrap();
let block_number = client.block_number().await.unwrap();
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
}
#[tokio::test]
async fn rpc_handles_none_without_http() {
// this launches a test node _without_ http
let (handle, _manager) = spawn_node(NodeConfig::test()).await.unwrap();
// ensure that the `http_client` is none
let maybe_client = handle.rpc_server_handles().rpc.http_client();
assert!(maybe_client.is_none());
}
#[tokio::test]
async fn launch_multiple_nodes() {
// spawn_test_node takes roughly 1 second per node, so this test takes ~4 seconds
let num_nodes = 4;
// contains handles and managers
let mut handles = Vec::new();
for _ in 0..num_nodes {
let handle = spawn_node(NodeConfig::test()).await.unwrap();
handles.push(handle);
}
}
#[cfg(feature = "optimism")]
#[tokio::test]
async fn optimism_pre_canyon_no_withdrawals_valid() {
reth_tracing::init_test_tracing();
use alloy_chains::Chain;
use jsonrpsee::http_client::HttpClient;
use reth_primitives::{ChainSpec, Genesis};
use reth_rpc_api::EngineApiClient;
use reth_rpc_types::engine::{
ForkchoiceState, OptimismPayloadAttributes, PayloadAttributes,
};
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::optimism_mainnet())
.genesis(Genesis::default())
.regolith_activated()
.build();
let genesis_hash = spec.genesis_hash();
// create node config
let node_config = NodeConfig::test().with_rpc(rpc_args).with_chain(spec);
let (handle, _manager) = spawn_node(node_config).await.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().auth.http_client();
let block_number = client.block_number().await.unwrap();
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
// call the engine_forkchoiceUpdated function with payload attributes
let forkchoice_state = ForkchoiceState {
head_block_hash: genesis_hash,
safe_block_hash: genesis_hash,
finalized_block_hash: genesis_hash,
};
let payload_attributes = OptimismPayloadAttributes {
payload_attributes: PayloadAttributes {
timestamp: 1,
prev_randao: Default::default(),
suggested_fee_recipient: Default::default(),
// canyon is _not_ in the chain spec, so this should cause the engine call to fail
withdrawals: None,
parent_beacon_block_root: None,
},
no_tx_pool: None,
gas_limit: Some(1),
transactions: None,
};
// call the engine_forkchoiceUpdated function with payload attributes
let res = <HttpClient as EngineApiClient<OptimismEngineTypes>>::fork_choice_updated_v2(
&client,
forkchoice_state,
Some(payload_attributes),
)
.await;
res.expect("pre-canyon engine call without withdrawals should succeed");
}
#[cfg(feature = "optimism")]
#[tokio::test]
async fn optimism_pre_canyon_withdrawals_invalid() {
reth_tracing::init_test_tracing();
use alloy_chains::Chain;
use assert_matches::assert_matches;
use jsonrpsee::{core::Error, http_client::HttpClient, types::error::INVALID_PARAMS_CODE};
use reth_primitives::{ChainSpec, Genesis};
use reth_rpc_api::EngineApiClient;
use reth_rpc_types::engine::{
ForkchoiceState, OptimismPayloadAttributes, PayloadAttributes,
};
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::optimism_mainnet())
.genesis(Genesis::default())
.regolith_activated()
.build();
let genesis_hash = spec.genesis_hash();
// create node config
let node_config = NodeConfig::test().with_rpc(rpc_args).with_chain(spec);
let (handle, _manager) = spawn_node(node_config).await.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().auth.http_client();
let block_number = client.block_number().await.unwrap();
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
// call the engine_forkchoiceUpdated function with payload attributes
let forkchoice_state = ForkchoiceState {
head_block_hash: genesis_hash,
safe_block_hash: genesis_hash,
finalized_block_hash: genesis_hash,
};
let payload_attributes = OptimismPayloadAttributes {
payload_attributes: PayloadAttributes {
timestamp: 1,
prev_randao: Default::default(),
suggested_fee_recipient: Default::default(),
// canyon is _not_ in the chain spec, so this should cause the engine call to fail
withdrawals: Some(vec![]),
parent_beacon_block_root: None,
},
no_tx_pool: None,
gas_limit: Some(1),
transactions: None,
};
// call the engine_forkchoiceUpdated function with payload attributes
let res = <HttpClient as EngineApiClient<OptimismEngineTypes>>::fork_choice_updated_v2(
&client,
forkchoice_state,
Some(payload_attributes),
)
.await;
let err = res.expect_err("pre-canyon engine call with withdrawals should fail");
assert_matches!(err, Error::Call(ref object) if object.code() == INVALID_PARAMS_CODE);
}
#[cfg(feature = "optimism")]
#[tokio::test]
async fn optimism_post_canyon_no_withdrawals_invalid() {
reth_tracing::init_test_tracing();
use alloy_chains::Chain;
use assert_matches::assert_matches;
use jsonrpsee::{core::Error, http_client::HttpClient, types::error::INVALID_PARAMS_CODE};
use reth_primitives::{ChainSpec, Genesis};
use reth_rpc_api::EngineApiClient;
use reth_rpc_types::engine::{
ForkchoiceState, OptimismPayloadAttributes, PayloadAttributes,
};
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::optimism_mainnet())
.genesis(Genesis::default())
.canyon_activated()
.build();
let genesis_hash = spec.genesis_hash();
// create node config
let node_config = NodeConfig::test().with_rpc(rpc_args).with_chain(spec);
let (handle, _manager) = spawn_node(node_config).await.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().auth.http_client();
let block_number = client.block_number().await.unwrap();
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
// call the engine_forkchoiceUpdated function with payload attributes
let forkchoice_state = ForkchoiceState {
head_block_hash: genesis_hash,
safe_block_hash: genesis_hash,
finalized_block_hash: genesis_hash,
};
let payload_attributes = OptimismPayloadAttributes {
payload_attributes: PayloadAttributes {
timestamp: 1,
prev_randao: Default::default(),
suggested_fee_recipient: Default::default(),
// canyon is _not_ in the chain spec, so this should cause the engine call to fail
withdrawals: None,
parent_beacon_block_root: None,
},
no_tx_pool: None,
gas_limit: Some(1),
transactions: None,
};
// call the engine_forkchoiceUpdated function with payload attributes
let res = <HttpClient as EngineApiClient<OptimismEngineTypes>>::fork_choice_updated_v2(
&client,
forkchoice_state,
Some(payload_attributes),
)
.await;
let err = res.expect_err("post-canyon engine call with no withdrawals should fail");
assert_matches!(err, Error::Call(ref object) if object.code() == INVALID_PARAMS_CODE);
}
#[cfg(feature = "optimism")]
#[tokio::test]
async fn optimism_post_canyon_withdrawals_valid() {
reth_tracing::init_test_tracing();
use alloy_chains::Chain;
use jsonrpsee::http_client::HttpClient;
use reth_primitives::{ChainSpec, Genesis};
use reth_rpc_api::EngineApiClient;
use reth_rpc_types::engine::{
ForkchoiceState, OptimismPayloadAttributes, PayloadAttributes,
};
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::optimism_mainnet())
.genesis(Genesis::default())
.canyon_activated()
.build();
let genesis_hash = spec.genesis_hash();
// create node config
let node_config = NodeConfig::test().with_rpc(rpc_args).with_chain(spec);
let (handle, _manager) = spawn_node(node_config).await.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().auth.http_client();
let block_number = client.block_number().await.unwrap();
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
// call the engine_forkchoiceUpdated function with payload attributes
let forkchoice_state = ForkchoiceState {
head_block_hash: genesis_hash,
safe_block_hash: genesis_hash,
finalized_block_hash: genesis_hash,
};
let payload_attributes = OptimismPayloadAttributes {
payload_attributes: PayloadAttributes {
timestamp: 1,
prev_randao: Default::default(),
suggested_fee_recipient: Default::default(),
// canyon is _not_ in the chain spec, so this should cause the engine call to fail
withdrawals: Some(vec![]),
parent_beacon_block_root: None,
},
no_tx_pool: None,
gas_limit: Some(1),
transactions: None,
};
// call the engine_forkchoiceUpdated function with payload attributes
let res = <HttpClient as EngineApiClient<OptimismEngineTypes>>::fork_choice_updated_v2(
&client,
forkchoice_state,
Some(payload_attributes),
)
.await;
res.expect("post-canyon engine call with withdrawals should succeed");
}
}

View File

@ -5,18 +5,19 @@ use crate::{
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
LogArgs,
},
cli::ext::RethCliExt,
commands::{
config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, p2p, recover, stage,
test_vectors,
config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, node, node::NoArgs, p2p,
recover, stage, test_vectors,
},
core::cli::runner::CliRunner,
version::{LONG_VERSION, SHORT_VERSION},
};
use clap::{value_parser, Parser, Subcommand};
use reth_db::DatabaseEnv;
use reth_node_builder::{InitState, WithLaunchContext};
use reth_primitives::ChainSpec;
use reth_tracing::FileWorkerGuard;
use std::sync::Arc;
use std::{ffi::OsString, fmt, future::Future, sync::Arc};
/// Re-export of the `reth_node_core` types specifically in the `cli` module.
///
@ -30,7 +31,7 @@ pub use crate::core::cli::*;
/// This is the entrypoint to the executable.
#[derive(Debug, Parser)]
#[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)]
pub struct Cli<Ext: RethCliExt = ()> {
pub struct Cli<Ext: clap::Args + fmt::Debug = NoArgs> {
/// The command to run
#[command(subcommand)]
command: Commands<Ext>,
@ -68,9 +69,70 @@ pub struct Cli<Ext: RethCliExt = ()> {
logs: LogArgs,
}
impl<Ext: RethCliExt> Cli<Ext> {
impl Cli {
/// Parsers only the default CLI arguments
pub fn parse_args() -> Self {
Self::parse()
}
/// Parsers only the default CLI arguments from the given iterator
pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
Cli::try_parse_from(itr)
}
}
impl<Ext: clap::Args + fmt::Debug> Cli<Ext> {
/// Execute the configured cli command.
pub fn run(mut self) -> eyre::Result<()> {
///
/// This accepts a closure that is used to launch the node via the
/// [NodeCommand](node::NodeCommand).
///
///
/// # Example
///
/// ```no_run
/// use reth::cli::Cli;
/// use reth_node_ethereum::EthereumNode;
///
/// Cli::parse_args()
/// .run(|builder, _| async move {
/// let handle = builder.launch_node(EthereumNode::default()).await?;
///
/// handle.wait_for_node_exit().await
/// })
/// .unwrap();
/// ```
///
/// # Example
///
/// Parse additional CLI arguments for the node command and use it to configure the node.
///
/// ```no_run
/// use clap::Parser;
/// use reth::cli::Cli;
///
/// #[derive(Debug, Parser)]
/// pub struct MyArgs {
/// pub enable: bool,
/// }
///
/// Cli::parse()
/// .run(|builder, my_args: MyArgs| async move {
/// // launch the node
///
/// Ok(())
/// })
/// .unwrap();
/// ````
pub fn run<L, Fut>(mut self, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<Arc<DatabaseEnv>, InitState>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
// add network name to logs dir
self.logs.log_file_directory =
self.logs.log_file_directory.join(self.chain.chain.to_string());
@ -79,7 +141,9 @@ impl<Ext: RethCliExt> Cli<Ext> {
let runner = CliRunner::default();
match self.command {
Commands::Node(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)),
Commands::Node(command) => {
runner.run_command_until_exit(|ctx| command.execute(ctx, launcher))
}
Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
@ -101,26 +165,11 @@ impl<Ext: RethCliExt> Cli<Ext> {
let guard = self.logs.init_tracing()?;
Ok(guard)
}
/// Configures the given node extension.
pub fn with_node_extension<C>(mut self, conf: C) -> Self
where
C: Into<Ext::Node>,
{
self.command.set_node_extension(conf.into());
self
}
}
/// Convenience function for parsing CLI options, set up logging and run the chosen command.
#[inline]
pub fn run() -> eyre::Result<()> {
Cli::<()>::parse().run()
}
/// Commands to be executed
#[derive(Debug, Subcommand)]
pub enum Commands<Ext: RethCliExt = ()> {
pub enum Commands<Ext: clap::Args + fmt::Debug = NoArgs> {
/// Start the node
#[command(name = "node")]
Node(node::NodeCommand<Ext>),
@ -155,17 +204,6 @@ pub enum Commands<Ext: RethCliExt = ()> {
Recover(recover::Command),
}
impl<Ext: RethCliExt> Commands<Ext> {
/// Sets the node extension if it is the [NodeCommand](node::NodeCommand).
///
/// This is a noop if the command is not the [NodeCommand](node::NodeCommand).
pub fn set_node_extension(&mut self, ext: Ext::Node) {
if let Commands::Node(command) = self {
command.ext = ext
}
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -174,7 +212,7 @@ mod tests {
#[test]
fn parse_color_mode() {
let reth = Cli::<()>::try_parse_from(["reth", "node", "--color", "always"]).unwrap();
let reth = Cli::try_parse_args_from(["reth", "node", "--color", "always"]).unwrap();
assert_eq!(reth.logs.color, ColorMode::Always);
}
@ -183,9 +221,9 @@ mod tests {
/// runtime
#[test]
fn test_parse_help_all_subcommands() {
let reth = Cli::<()>::command();
let reth = Cli::<NoArgs>::command();
for sub_command in reth.get_subcommands() {
let err = Cli::<()>::try_parse_from(["reth", sub_command.get_name(), "--help"])
let err = Cli::try_parse_args_from(["reth", sub_command.get_name(), "--help"])
.err()
.unwrap_or_else(|| {
panic!("Failed to parse help message {}", sub_command.get_name())
@ -201,7 +239,7 @@ mod tests {
/// name
#[test]
fn parse_logs_path() {
let mut reth = Cli::<()>::try_parse_from(["reth", "node"]).unwrap();
let mut reth = Cli::try_parse_args_from(["reth", "node"]).unwrap();
reth.logs.log_file_directory =
reth.logs.log_file_directory.join(reth.chain.chain.to_string());
let log_dir = reth.logs.log_file_directory;
@ -211,7 +249,7 @@ mod tests {
let mut iter = SUPPORTED_CHAINS.iter();
iter.next();
for chain in iter {
let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--chain", chain]).unwrap();
let mut reth = Cli::try_parse_args_from(["reth", "node", "--chain", chain]).unwrap();
reth.logs.log_file_directory =
reth.logs.log_file_directory.join(reth.chain.chain.to_string());
let log_dir = reth.logs.log_file_directory;
@ -220,21 +258,12 @@ mod tests {
}
}
#[test]
fn override_trusted_setup_file() {
// We already have a test that asserts that this has been initialized,
// so we cheat a little bit and check that loading a random file errors.
let reth = Cli::<()>::try_parse_from(["reth", "node", "--trusted-setup-file", "README.md"])
.unwrap();
assert!(reth.run().is_err());
}
#[test]
fn parse_env_filter_directives() {
let temp_dir = tempfile::tempdir().unwrap();
std::env::set_var("RUST_LOG", "info,evm=debug");
let reth = Cli::<()>::try_parse_from([
let reth = Cli::try_parse_args_from([
"reth",
"init",
"--datadir",
@ -243,6 +272,6 @@ mod tests {
"debug,net=trace",
])
.unwrap();
assert!(reth.run().is_ok());
assert!(reth.run(|_, _| async move { Ok(()) }).is_ok());
}
}

View File

@ -2,9 +2,7 @@
use crate::core::cli::runner::CliContext;
use clap::{Parser, Subcommand};
mod build_block;
pub mod engine_api_store;
mod execution;
mod in_memory_merkle;
mod merkle;

View File

@ -4,7 +4,6 @@ use crate::{
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
DatabaseArgs, NetworkArgs,
},
commands::debug_cmd::engine_api_store::{EngineApiStore, StoredEngineApiMessage},
core::cli::runner::CliContext,
dirs::{DataDirPath, MaybePlatformPath},
};
@ -20,6 +19,7 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_interfaces::consensus::Consensus;
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
use reth_node_core::engine_api_store::{EngineApiStore, StoredEngineApiMessage};
#[cfg(not(feature = "optimism"))]
use reth_node_ethereum::{EthEngineTypes, EthEvmConfig};
#[cfg(feature = "optimism")]

View File

@ -1,6 +1,4 @@
//! Main node command
//!
//! Starts the client
//! Main node command for launching a node
use crate::{
args::{
@ -8,21 +6,19 @@ use crate::{
DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs,
RpcServerArgs, TxPoolArgs,
},
builder::{launch_from_config, NodeConfig},
cli::{db_type::DatabaseBuilder, ext::RethCliExt},
core::cli::runner::CliContext,
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::{value_parser, Parser};
use reth_auto_seal_consensus::AutoSealConsensus;
use reth_beacon_consensus::BeaconConsensus;
use reth_interfaces::consensus::Consensus;
use clap::{value_parser, Args, Parser};
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext};
use reth_node_core::node_config::NodeConfig;
use reth_primitives::ChainSpec;
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc};
/// Start the node
#[derive(Debug, Parser)]
pub struct NodeCommand<Ext: RethCliExt = ()> {
pub struct NodeCommand<Ext: clap::Args + fmt::Debug = NoArgs> {
/// The path to the data dir for all reth files and subdirectories.
///
/// Defaults to the OS-specific data directory:
@ -116,63 +112,37 @@ pub struct NodeCommand<Ext: RethCliExt = ()> {
#[command(flatten)]
pub pruning: PruningArgs,
/// Rollup related arguments
#[cfg(feature = "optimism")]
#[command(flatten)]
pub rollup: crate::args::RollupArgs,
/// Additional cli arguments
#[command(flatten, next_help_heading = "Extension")]
pub ext: Ext::Node,
pub ext: Ext,
}
impl<Ext: RethCliExt> NodeCommand<Ext> {
/// Replaces the extension of the node command
pub fn with_ext<E: RethCliExt>(self, ext: E::Node) -> NodeCommand<E> {
let Self {
datadir,
config,
chain,
metrics,
trusted_setup_file,
instance,
with_unused_ports,
network,
rpc,
txpool,
builder,
debug,
db,
dev,
pruning,
#[cfg(feature = "optimism")]
rollup,
..
} = self;
NodeCommand {
datadir,
config,
chain,
metrics,
instance,
with_unused_ports,
trusted_setup_file,
network,
rpc,
txpool,
builder,
debug,
db,
dev,
pruning,
#[cfg(feature = "optimism")]
rollup,
ext,
}
impl NodeCommand {
/// Parsers only the default CLI arguments
pub fn parse_args() -> Self {
Self::parse()
}
/// Execute `node` command
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
/// Parsers only the default [NodeCommand] arguments from the given iterator
pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
Self::try_parse_from(itr)
}
}
impl<Ext: clap::Args + fmt::Debug> NodeCommand<Ext> {
/// Launches the node
///
/// This transforms the node command into a node config and launches the node using the given
/// closure.
pub async fn execute<L, Fut>(self, ctx: CliContext, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<Arc<DatabaseEnv>, InitState>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
let Self {
datadir,
config,
@ -189,17 +159,11 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
db,
dev,
pruning,
#[cfg(feature = "optimism")]
rollup,
ext,
} = self;
// set up real database
let database = DatabaseBuilder::Real(datadir);
// set up node config
let mut node_config = NodeConfig {
database,
config,
chain,
metrics,
@ -213,35 +177,38 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
db,
dev,
pruning,
#[cfg(feature = "optimism")]
rollup,
};
// Register the prometheus recorder before creating the database,
// because database init needs it to register metrics.
let _ = node_config.install_prometheus_recorder()?;
let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain);
let db_path = data_dir.db_path();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let database = Arc::new(
init_db(db_path.clone(), DatabaseArguments::default().log_level(db.log_level))?
.with_metrics(),
);
if with_unused_ports {
node_config = node_config.with_unused_ports();
}
let executor = ctx.task_executor;
let builder = NodeBuilder::new(node_config)
.with_database(database)
.with_launch_context(ctx.task_executor, data_dir);
// launch the node
let handle = launch_from_config::<Ext>(node_config, ext, executor).await?;
handle.wait_for_node_exit().await
}
/// Returns the [Consensus] instance to use.
///
/// By default this will be a [BeaconConsensus] instance, but if the `--dev` flag is set, it
/// will be an [AutoSealConsensus] instance.
pub fn consensus(&self) -> Arc<dyn Consensus> {
if self.dev.dev {
Arc::new(AutoSealConsensus::new(Arc::clone(&self.chain)))
} else {
Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)))
}
launcher(builder, ext).await
}
}
/// No Additional arguments
#[derive(Debug, Clone, Copy, Default, Args)]
#[non_exhaustive]
pub struct NoArgs;
#[cfg(test)]
mod tests {
use super::*;
@ -253,14 +220,14 @@ mod tests {
#[test]
fn parse_help_node_command() {
let err = NodeCommand::<()>::try_parse_from(["reth", "--help"]).unwrap_err();
let err = NodeCommand::try_parse_args_from(["reth", "--help"]).unwrap_err();
assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp);
}
#[test]
fn parse_common_node_command_chain_args() {
for chain in SUPPORTED_CHAINS {
let args: NodeCommand = NodeCommand::<()>::parse_from(["reth", "--chain", chain]);
let args: NodeCommand = NodeCommand::<NoArgs>::parse_from(["reth", "--chain", chain]);
assert_eq!(args.chain.chain, chain.parse::<reth_primitives::Chain>().unwrap());
}
}
@ -268,13 +235,13 @@ mod tests {
#[test]
fn parse_discovery_addr() {
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap();
NodeCommand::try_parse_args_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap();
assert_eq!(cmd.network.discovery.addr, Ipv4Addr::LOCALHOST);
}
#[test]
fn parse_addr() {
let cmd = NodeCommand::<()>::try_parse_from([
let cmd = NodeCommand::try_parse_args_from([
"reth",
"--discovery.addr",
"127.0.0.1",
@ -288,14 +255,14 @@ mod tests {
#[test]
fn parse_discovery_port() {
let cmd = NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300"]).unwrap();
let cmd = NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300"]).unwrap();
assert_eq!(cmd.network.discovery.port, 300);
}
#[test]
fn parse_port() {
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"])
NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300", "--port", "99"])
.unwrap();
assert_eq!(cmd.network.discovery.port, 300);
assert_eq!(cmd.network.port, 99);
@ -303,27 +270,27 @@ mod tests {
#[test]
fn parse_metrics_port() {
let cmd = NodeCommand::<()>::try_parse_from(["reth", "--metrics", "9001"]).unwrap();
let cmd = NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
let cmd = NodeCommand::<()>::try_parse_from(["reth", "--metrics", ":9001"]).unwrap();
let cmd = NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--metrics", "localhost:9001"]).unwrap();
NodeCommand::try_parse_args_from(["reth", "--metrics", "localhost:9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
}
#[test]
fn parse_config_path() {
let cmd = NodeCommand::<()>::try_parse_from(["reth", "--config", "my/path/to/reth.toml"])
.unwrap();
let cmd =
NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let config_path = cmd.config.unwrap_or(data_dir.config_path());
assert_eq!(config_path, Path::new("my/path/to/reth.toml"));
let cmd = NodeCommand::<()>::try_parse_from(["reth"]).unwrap();
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
@ -334,14 +301,14 @@ mod tests {
#[test]
fn parse_db_path() {
let cmd = NodeCommand::<()>::try_parse_from(["reth"]).unwrap();
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let db_path = data_dir.db_path();
let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]);
assert!(db_path.ends_with(end), "{:?}", cmd.config);
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--datadir", "my/custom/path"]).unwrap();
NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap();
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let db_path = data_dir.db_path();
assert_eq!(db_path, Path::new("my/custom/path/db"));
@ -350,7 +317,7 @@ mod tests {
#[test]
#[cfg(not(feature = "optimism"))] // dev mode not yet supported in op-reth
fn parse_dev() {
let cmd = NodeCommand::<()>::parse_from(["reth", "--dev"]);
let cmd = NodeCommand::<NoArgs>::parse_from(["reth", "--dev"]);
let chain = reth_primitives::DEV.clone();
assert_eq!(cmd.chain.chain, chain.chain);
assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash);
@ -368,7 +335,7 @@ mod tests {
#[test]
fn parse_instance() {
let mut cmd = NodeCommand::<()>::parse_from(["reth"]);
let mut cmd = NodeCommand::<NoArgs>::parse_from(["reth"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
@ -378,7 +345,7 @@ mod tests {
// check network listening port number
assert_eq!(cmd.network.port, 30303);
let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "2"]);
let mut cmd = NodeCommand::<NoArgs>::parse_from(["reth", "--instance", "2"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
@ -388,7 +355,7 @@ mod tests {
// check network listening port number
assert_eq!(cmd.network.port, 30304);
let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "3"]);
let mut cmd = NodeCommand::<NoArgs>::parse_from(["reth", "--instance", "3"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
@ -401,21 +368,21 @@ mod tests {
#[test]
fn parse_with_unused_ports() {
let cmd = NodeCommand::<()>::parse_from(["reth", "--with-unused-ports"]);
let cmd = NodeCommand::<NoArgs>::parse_from(["reth", "--with-unused-ports"]);
assert!(cmd.with_unused_ports);
}
#[test]
fn with_unused_ports_conflicts_with_instance() {
let err =
NodeCommand::<()>::try_parse_from(["reth", "--with-unused-ports", "--instance", "2"])
NodeCommand::try_parse_args_from(["reth", "--with-unused-ports", "--instance", "2"])
.unwrap_err();
assert_eq!(err.kind(), clap::error::ErrorKind::ArgumentConflict);
}
#[test]
fn with_unused_ports_check_zero() {
let mut cmd = NodeCommand::<()>::parse_from(["reth"]);
let mut cmd = NodeCommand::<NoArgs>::parse_from(["reth"]);
cmd.rpc = cmd.rpc.with_unused_ports();
cmd.network = cmd.network.with_unused_ports();

View File

@ -26,7 +26,6 @@
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod builder;
pub mod cli;
pub mod commands;
pub mod utils;
@ -62,6 +61,11 @@ pub mod version {
pub use reth_node_core::version::*;
}
/// Re-exported from `reth_node_builder`
pub mod builder {
pub use reth_node_builder::*;
}
/// Re-exported from `reth_node_core`, also to prevent a breaking change. See the comment on
/// the `reth_node_core::args` re-export for more details.
pub mod dirs {

View File

@ -10,12 +10,18 @@ compile_error!("Cannot build the `reth` binary with the `optimism` feature flag
#[cfg(not(feature = "optimism"))]
fn main() {
use reth::cli::Cli;
use reth_node_ethereum::EthereumNode;
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var("RUST_BACKTRACE").is_err() {
std::env::set_var("RUST_BACKTRACE", "1");
}
if let Err(err) = reth::cli::run() {
if let Err(err) = Cli::parse_args().run(|builder, _| async {
let handle = builder.launch_node(EthereumNode::default()).await?;
handle.node_exit_future.await
}) {
eprintln!("Error: {err:?}");
std::process::exit(1);
}

View File

@ -1,4 +1,11 @@
#![allow(missing_docs, rustdoc::missing_crate_level_docs)]
use clap::Parser;
use reth::cli::Cli;
use reth_node_builder::NodeHandle;
use reth_node_optimism::{args::RollupArgs, OptimismEngineTypes, OptimismNode};
use reth_provider::BlockReaderIdExt;
// We use jemalloc for performance reasons
#[cfg(all(feature = "jemalloc", unix))]
#[global_allocator]
@ -14,7 +21,34 @@ fn main() {
std::env::set_var("RUST_BACKTRACE", "1");
}
if let Err(err) = reth::cli::run() {
if let Err(err) = Cli::<RollupArgs>::parse().run(|builder, rollup_args| async move {
let NodeHandle { node, node_exit_future } =
builder.launch_node(OptimismNode::new(rollup_args.clone())).await?;
// If `enable_genesis_walkback` is set to true, the rollup client will need to
// perform the derivation pipeline from genesis, validating the data dir.
// When set to false, set the finalized, safe, and unsafe head block hashes
// on the rollup client using a fork choice update. This prevents the rollup
// client from performing the derivation pipeline from genesis, and instead
// starts syncing from the current tip in the DB.
if node.chain_spec().is_optimism() && !rollup_args.enable_genesis_walkback {
let client = node.rpc_server_handles.auth.http_client();
if let Ok(Some(head)) = node.provider.latest_header() {
reth_rpc_api::EngineApiClient::<OptimismEngineTypes>::fork_choice_updated_v2(
&client,
reth_rpc_types::engine::ForkchoiceState {
head_block_hash: head.hash(),
safe_block_hash: head.hash(),
finalized_block_hash: head.hash(),
},
None,
)
.await?;
}
}
node_exit_future.await
}) {
eprintln!("Error: {err:?}");
std::process::exit(1);
}

View File

@ -28,15 +28,6 @@ tokio = { workspace = true, features = ["sync", "time"] }
tokio-stream.workspace = true
tracing.workspace = true
[dev-dependencies]
reth-interfaces = { workspace = true, features = ["test-utils"] }
reth.workspace = true
tempfile.workspace = true
clap.workspace = true
jsonrpsee.workspace = true
eyre.workspace = true
serde_json.workspace = true
[features]
# Included solely to ignore certain tests.
optimism = []

View File

@ -13,6 +13,7 @@
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![warn(unused_crate_dependencies)]
use reth_beacon_consensus::BeaconEngineMessage;
use reth_interfaces::{

View File

@ -1,125 +0,0 @@
//! auto-mine consensus integration test
use clap::Parser;
use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params};
use reth::{
cli::{
components::RethNodeComponents,
ext::{NoArgs, NoArgsCliExt, RethNodeCommandConfig},
},
commands::node::NodeCommand,
core::cli::runner::CliRunner,
tasks::TaskSpawner,
};
use reth_primitives::{hex, revm_primitives::FixedBytes, ChainSpec, Genesis};
use reth_provider::CanonStateSubscriptions;
use reth_transaction_pool::TransactionPool;
use std::{sync::Arc, time::Duration};
use tokio::time::timeout;
#[derive(Debug)]
struct AutoMineConfig;
impl RethNodeCommandConfig for AutoMineConfig {
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
let pool = components.pool();
let mut canon_events = components.events().subscribe_to_canonical_state();
components.task_executor().spawn_critical_blocking("rpc request", Box::pin(async move {
// submit tx through rpc
let raw_tx = "0x02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090";
let client = HttpClientBuilder::default().build("http://127.0.0.1:8545").expect("http client should bind to default rpc port");
let response: String = client.request("eth_sendRawTransaction", rpc_params![raw_tx]).await.expect("client request should be valid");
let expected = "0xb1c6512f4fc202c04355fbda66755e0e344b152e633010e8fd75ecec09b63398";
assert_eq!(&response, expected);
// more than enough time for the next block
let duration = Duration::from_secs(15);
// wait for canon event or timeout
let update = timeout(duration, canon_events.recv())
.await
.expect("canon state should change before timeout")
.expect("canon events stream is still open");
let new_tip = update.tip();
let expected_tx_root: FixedBytes<32> = hex!("c79b5383458e63fb20c6a49d9ec7917195a59003a2af4b28a01d7c6fbbcd7e35").into();
assert_eq!(new_tip.transactions_root, expected_tx_root);
assert_eq!(new_tip.number, 1);
assert!(pool.pending_transactions().is_empty());
}));
Ok(())
}
}
/// This test is disabled for the `optimism` feature flag due to an incompatible feature set.
/// L1 info transactions are not included automatically, which are required for `op-reth` to
/// process transactions.
#[test]
#[cfg_attr(feature = "optimism", ignore)]
pub(crate) fn test_auto_mine() {
// create temp path for test
let temp_path = tempfile::TempDir::new().expect("tempdir is okay").into_path();
let datadir = temp_path.to_str().expect("temp path is okay");
let no_args = NoArgs::with(AutoMineConfig);
let chain = custom_chain();
let mut command = NodeCommand::<NoArgsCliExt<AutoMineConfig>>::parse_from([
"reth",
"--dev",
"--datadir",
datadir,
"--debug.max-block",
"1",
"--debug.terminate",
])
.with_ext::<NoArgsCliExt<AutoMineConfig>>(no_args);
// use custom chain spec
command.chain = chain;
let runner = CliRunner::default();
let node_command = runner.run_command_until_exit(|ctx| command.execute(ctx));
assert!(node_command.is_ok())
}
fn custom_chain() -> Arc<ChainSpec> {
let custom_genesis = r#"
{
"nonce": "0x42",
"timestamp": "0x0",
"extraData": "0x5343",
"gasLimit": "0x1388",
"difficulty": "0x400000000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0x6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b": {
"balance": "0x4a47e3c12448f4ad000000"
}
},
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"ethash": {},
"chainId": 2600,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true,
"shanghaiTime": 0
}
}
"#;
let genesis: Genesis = serde_json::from_str(custom_genesis).unwrap();
Arc::new(genesis.into())
}

View File

@ -1,4 +0,0 @@
//! auto-mine consensus tests
mod auto_mine;
async fn main() {}

View File

@ -4,32 +4,33 @@
use crate::{
components::{
FullNodeComponents, FullNodeComponentsAdapter, NodeComponents, NodeComponentsBuilder,
ComponentsBuilder, FullNodeComponents, FullNodeComponentsAdapter, NodeComponents,
NodeComponentsBuilder, PoolBuilder,
},
hooks::NodeHooks,
node::{FullNode, FullNodeTypes, FullNodeTypesAdapter, NodeTypes},
rpc::{RethRpcServerHandles, RpcContext, RpcHooks},
NodeHandle,
Node, NodeHandle,
};
use eyre::Context;
use futures::{future::Either, stream, stream_select, StreamExt};
use reth_beacon_consensus::{
hooks::{EngineHooks, PruneHook},
hooks::{EngineHooks, PruneHook, StaticFileHook},
BeaconConsensusEngine,
};
use reth_blockchain_tree::{BlockchainTreeConfig, ShareableBlockchainTree};
use reth_db::{
database::Database,
database_metrics::{DatabaseMetadata, DatabaseMetrics},
test_utils::{create_test_rw_db, TempDatabase},
DatabaseEnv,
};
use reth_interfaces::p2p::either::EitherDownloader;
use reth_network::{
transactions::{TransactionFetcherConfig, TransactionsManagerConfig},
NetworkBuilder, NetworkEvents, NetworkHandle,
};
use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle};
use reth_node_core::{
cli::config::{PayloadBuilderConfig, RethRpcConfig, RethTransactionPoolConfig},
dirs::{ChainPath, DataDirPath},
dirs::{ChainPath, DataDirPath, MaybePlatformPath},
engine_api_store::EngineApiStore,
events::cl::ConsensusLayerHealthEvents,
exit::NodeExitFuture,
init::init_genesis,
@ -42,13 +43,14 @@ use reth_primitives::{
format_ether, ChainSpec,
};
use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, ProviderFactory};
use reth_prune::{PrunerBuilder, PrunerEvent};
use reth_prune::PrunerBuilder;
use reth_revm::EvmProcessorFactory;
use reth_rpc_engine_api::EngineApi;
use reth_static_file::StaticFileProducer;
use reth_tasks::TaskExecutor;
use reth_tracing::tracing::{debug, info};
use reth_transaction_pool::{PoolConfig, TransactionPool};
use std::sync::Arc;
use std::{str::FromStr, sync::Arc};
use tokio::sync::{mpsc::unbounded_channel, oneshot};
/// The builtin provider type of the reth node.
@ -56,6 +58,9 @@ use tokio::sync::{mpsc::unbounded_channel, oneshot};
type RethFullProviderType<DB, Evm> =
BlockchainProvider<DB, ShareableBlockchainTree<DB, EvmProcessorFactory<Evm>>>;
type RethFullAdapter<DB, N> =
FullNodeTypesAdapter<N, DB, RethFullProviderType<DB, <N as NodeTypes>::Evm>>;
/// Declaratively construct a node.
///
/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing
@ -69,11 +74,19 @@ type RethFullProviderType<DB, Evm> =
/// [ConfigureEvm](reth_node_api::evm::ConfigureEvm), the database [Database] and finally all the
/// components of the node that are downstream of those types, these include:
///
/// - The transaction pool: [PoolBuilder](crate::components::PoolBuilder)
/// - The transaction pool: [PoolBuilder]
/// - The network: [NetworkBuilder](crate::components::NetworkBuilder)
/// - The payload builder: [PayloadBuilder](crate::components::PayloadServiceBuilder)
///
/// Finally, the node is ready to launch [NodeBuilder::launch]
/// Once all the components are configured, the node is ready to be launched.
///
/// On launch the builder returns a fully type aware [NodeHandle] that has access to all the
/// configured components and can interact with the node.
///
/// There are convenience functions for networks that come with a preset of types and components via
/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`.
///
/// The [NodeBuilder::node] function configures the node's types and components in one step.
///
/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
pub struct NodeBuilder<DB, State> {
@ -122,15 +135,40 @@ impl NodeBuilder<(), InitState> {
}
impl<DB> NodeBuilder<DB, InitState> {
/// Configures the additional external context, e.g. additional context captured via CLI args.
/// Configures the underlying database that the node will use.
pub fn with_database<D>(self, database: D) -> NodeBuilder<D, InitState> {
NodeBuilder { config: self.config, state: self.state, database }
}
/// Preconfigure the builder with the context to launch the node.
///
/// This provides the task executor and the data directory for the node.
pub fn with_launch_context(
self,
task_executor: TaskExecutor,
data_dir: ChainPath<DataDirPath>,
) -> WithLaunchContext<DB, InitState> {
WithLaunchContext { builder: self, task_executor, data_dir }
}
/// Creates an _ephemeral_ preconfigured node for testing purposes.
pub fn testing_node(
self,
task_executor: TaskExecutor,
) -> WithLaunchContext<Arc<TempDatabase<DatabaseEnv>>, InitState> {
let db = create_test_rw_db();
let db_path_str = db.path().to_str().expect("Path is not valid unicode");
let path =
MaybePlatformPath::<DataDirPath>::from_str(db_path_str).expect("Path is not valid");
let data_dir = path.unwrap_or_chain_default(self.config.chain.chain);
WithLaunchContext { builder: self.with_database(db), task_executor, data_dir }
}
}
impl<DB> NodeBuilder<DB, InitState>
where
DB: Database + Clone + 'static,
DB: Database + Unpin + Clone + 'static,
{
/// Configures the types of the node.
pub fn with_types<T>(self, types: T) -> NodeBuilder<DB, TypesState<T, DB>>
@ -143,6 +181,43 @@ where
database: self.database,
}
}
/// Preconfigures the node with a specific node implementation.
///
/// This is a convenience method that sets the node's types and components in one call.
pub fn node<N>(
self,
node: N,
) -> NodeBuilder<
DB,
ComponentsState<
N,
ComponentsBuilder<
RethFullAdapter<DB, N>,
N::PoolBuilder,
N::PayloadBuilder,
N::NetworkBuilder,
>,
FullNodeComponentsAdapter<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
>,
>
where
N: Node<FullNodeTypesAdapter<N, DB, RethFullProviderType<DB, <N as NodeTypes>::Evm>>>,
N::PoolBuilder: PoolBuilder<RethFullAdapter<DB, N>>,
N::NetworkBuilder: crate::components::NetworkBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
N::PayloadBuilder: crate::components::PayloadServiceBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
{
self.with_types(node.clone()).with_components(node.components())
}
}
impl<DB, Types> NodeBuilder<DB, TypesState<Types, DB>>
@ -216,40 +291,6 @@ where
}
}
/// Resets the setup process to the components stage.
///
/// CAUTION: All previously configured hooks will be lost.
pub fn fuse_components<C>(
self,
components_builder: C,
) -> NodeBuilder<
DB,
ComponentsState<
Types,
C,
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
C::Pool,
>,
>,
>
where
C: NodeComponentsBuilder<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
>,
{
NodeBuilder {
config: self.config,
database: self.database,
state: ComponentsState {
types: self.state.types,
components_builder,
hooks: NodeHooks::new(),
rpc: RpcHooks::new(),
},
}
}
/// Sets the hook that is run once the node's components are initialized.
pub fn on_component_initialized<F>(mut self, hook: F) -> Self
where
@ -259,6 +300,7 @@ where
Components::Pool,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.state.hooks.set_on_component_initialized(hook);
@ -276,6 +318,7 @@ where
>,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.state.hooks.set_on_node_started(hook);
@ -295,6 +338,7 @@ where
>,
RethRpcServerHandles,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.state.rpc.set_on_rpc_started(hook);
@ -313,6 +357,7 @@ where
>,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.state.rpc.set_extend_rpc_modules(hook);
@ -320,6 +365,11 @@ where
}
/// Launches the node and returns a handle to it.
///
/// This bootstraps the node internals, creates all the components with the provider
/// [NodeComponentsBuilder] and launches the node.
///
/// Returns a [NodeHandle] that can be used to interact with the node.
pub async fn launch(
self,
executor: TaskExecutor,
@ -332,7 +382,7 @@ where
>,
>,
> {
// get config
// get config from file
let reth_config = self.load_config(&data_dir)?;
let Self {
@ -354,20 +404,14 @@ where
database.clone(),
Arc::clone(&config.chain),
data_dir.static_files_path(),
)?;
// configure static_file_producer
let static_file_producer = reth_static_file::StaticFileProducer::new(
provider_factory.clone(),
provider_factory.static_file_provider(),
config.prune_config()?.unwrap_or_default().segments,
);
)?
.with_static_files_metrics();
debug!(target: "reth::cli", chain=%config.chain.chain, genesis=?config.chain.genesis_hash(), "Initializing genesis");
let genesis_hash = init_genesis(provider_factory.clone())?;
info!(target: "reth::cli", "{}", config.chain.display_hardforks());
info!(target: "reth::cli", "{}",config.chain.display_hardforks());
let consensus = config.consensus();
@ -412,7 +456,7 @@ where
debug!(target: "reth::cli", "creating components");
let NodeComponents { transaction_pool, network, payload_builder } =
components_builder.build_components(&ctx)?;
components_builder.build_components(&ctx).await?;
let BuilderContext {
provider: blockchain_db,
@ -438,8 +482,30 @@ where
// create pipeline
let network_client = network.fetch_client().await?;
let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel();
let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel();
if let Some(store_path) = config.debug.engine_api_store.clone() {
debug!(target: "reth::cli", "spawning engine API store");
let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel();
let engine_api_store = EngineApiStore::new(store_path);
executor.spawn_critical(
"engine api interceptor",
engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx),
);
consensus_engine_rx = engine_intercept_rx;
};
let max_block = config.max_block(&network_client, provider_factory.clone()).await?;
let mut hooks = EngineHooks::new();
let mut static_file_producer = StaticFileProducer::new(
provider_factory.clone(),
provider_factory.static_file_provider(),
prune_config.clone().unwrap_or_default().segments,
);
let static_file_producer_events = static_file_producer.events();
hooks.add(StaticFileHook::new(static_file_producer.clone(), Box::new(executor.clone())));
info!(target: "reth::cli", "StaticFileProducer initialized");
// Configure the pipeline
let (mut pipeline, client) = if config.dev.dev {
@ -504,22 +570,16 @@ where
let pipeline_events = pipeline.events();
let initial_target = config.initial_pipeline_target(genesis_hash);
let mut hooks = EngineHooks::new();
let pruner_events = if let Some(prune_config) = prune_config {
let mut pruner = PrunerBuilder::new(prune_config.clone())
.max_reorg_depth(tree_config.max_reorg_depth() as usize)
.prune_delete_limit(config.chain.prune_delete_limit)
.build(provider_factory);
let prune_config = prune_config.unwrap_or_default();
let mut pruner = PrunerBuilder::new(prune_config.clone())
.max_reorg_depth(tree_config.max_reorg_depth() as usize)
.prune_delete_limit(config.chain.prune_delete_limit)
.build(provider_factory.clone());
let events = pruner.events();
hooks.add(PruneHook::new(pruner, Box::new(executor.clone())));
info!(target: "reth::cli", ?prune_config, "Pruner initialized");
Either::Left(events)
} else {
Either::Right(stream::empty::<PrunerEvent>())
};
let pruner_events = pruner.events();
hooks.add(PruneHook::new(pruner, Box::new(executor.clone())));
info!(target: "reth::cli", ?prune_config, "Pruner initialized");
// Configure the consensus engine
let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel(
@ -551,7 +611,8 @@ where
} else {
Either::Right(stream::empty())
},
pruner_events.map(Into::into)
pruner_events.map(Into::into),
static_file_producer_events.map(Into::into)
);
executor.spawn_critical(
"events task",
@ -613,7 +674,7 @@ where
network,
provider,
payload_builder,
executor,
task_executor: executor,
rpc_server_handles,
rpc_registry,
config,
@ -638,6 +699,283 @@ where
}
}
/// A [NodeBuilder] with it's launch context already configured.
///
/// This exposes the same methods as [NodeBuilder] but with the launch context already configured,
/// See [WithLaunchContext::launch]
pub struct WithLaunchContext<DB, State> {
builder: NodeBuilder<DB, State>,
task_executor: TaskExecutor,
data_dir: ChainPath<DataDirPath>,
}
impl<DB, State> WithLaunchContext<DB, State> {
/// Returns a reference to the node builder's config.
pub fn config(&self) -> &NodeConfig {
self.builder.config()
}
/// Returns a reference to the task executor.
pub fn task_executor(&self) -> &TaskExecutor {
&self.task_executor
}
/// Returns a reference to the data directory.
pub fn data_dir(&self) -> &ChainPath<DataDirPath> {
&self.data_dir
}
}
impl<DB> WithLaunchContext<DB, InitState>
where
DB: Database + Clone + Unpin + 'static,
{
/// Configures the types of the node.
pub fn with_types<T>(self, types: T) -> WithLaunchContext<DB, TypesState<T, DB>>
where
T: NodeTypes,
{
WithLaunchContext {
builder: self.builder.with_types(types),
task_executor: self.task_executor,
data_dir: self.data_dir,
}
}
/// Preconfigures the node with a specific node implementation.
pub fn node<N>(
self,
node: N,
) -> WithLaunchContext<
DB,
ComponentsState<
N,
ComponentsBuilder<
RethFullAdapter<DB, N>,
N::PoolBuilder,
N::PayloadBuilder,
N::NetworkBuilder,
>,
FullNodeComponentsAdapter<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
>,
>
where
N: Node<FullNodeTypesAdapter<N, DB, RethFullProviderType<DB, <N as NodeTypes>::Evm>>>,
N::PoolBuilder: PoolBuilder<RethFullAdapter<DB, N>>,
N::NetworkBuilder: crate::components::NetworkBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
N::PayloadBuilder: crate::components::PayloadServiceBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
{
self.with_types(node.clone()).with_components(node.components())
}
}
impl<DB> WithLaunchContext<DB, InitState>
where
DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static,
{
/// Launches a preconfigured [Node]
///
/// This bootstraps the node internals, creates all the components with the given [Node] type
/// and launches the node.
///
/// Returns a [NodeHandle] that can be used to interact with the node.
pub async fn launch_node<N>(
self,
node: N,
) -> eyre::Result<
NodeHandle<
FullNodeComponentsAdapter<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
>,
>
where
N: Node<FullNodeTypesAdapter<N, DB, RethFullProviderType<DB, <N as NodeTypes>::Evm>>>,
N::PoolBuilder: PoolBuilder<RethFullAdapter<DB, N>>,
N::NetworkBuilder: crate::components::NetworkBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
N::PayloadBuilder: crate::components::PayloadServiceBuilder<
RethFullAdapter<DB, N>,
<N::PoolBuilder as PoolBuilder<RethFullAdapter<DB, N>>>::Pool,
>,
{
self.node(node).launch().await
}
}
impl<DB, Types> WithLaunchContext<DB, TypesState<Types, DB>>
where
Types: NodeTypes,
DB: Database + Clone + Unpin + 'static,
{
/// Configures the node's components.
///
/// The given components builder is used to create the components of the node when it is
/// launched.
pub fn with_components<Components>(
self,
components_builder: Components,
) -> WithLaunchContext<
DB,
ComponentsState<
Types,
Components,
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
>
where
Components: NodeComponentsBuilder<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
>,
{
WithLaunchContext {
builder: self.builder.with_components(components_builder),
task_executor: self.task_executor,
data_dir: self.data_dir,
}
}
}
impl<DB, Types, Components>
WithLaunchContext<
DB,
ComponentsState<
Types,
Components,
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
>
where
DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static,
Types: NodeTypes,
Components: NodeComponentsBuilder<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
>,
{
/// Apply a function to the components builder.
pub fn map_components(self, f: impl FnOnce(Components) -> Components) -> Self {
Self {
builder: self.builder.map_components(f),
task_executor: self.task_executor,
data_dir: self.data_dir,
}
}
/// Sets the hook that is run once the node's components are initialized.
pub fn on_component_initialized<F>(mut self, hook: F) -> Self
where
F: Fn(
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.builder.state.hooks.set_on_component_initialized(hook);
self
}
/// Sets the hook that is run once the node has started.
pub fn on_node_started<F>(mut self, hook: F) -> Self
where
F: Fn(
FullNode<
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.builder.state.hooks.set_on_node_started(hook);
self
}
/// Sets the hook that is run once the rpc server is started.
pub fn on_rpc_started<F>(mut self, hook: F) -> Self
where
F: Fn(
RpcContext<
'_,
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
RethRpcServerHandles,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.builder.state.rpc.set_on_rpc_started(hook);
self
}
/// Sets the hook that is run to configure the rpc modules.
pub fn extend_rpc_modules<F>(mut self, hook: F) -> Self
where
F: Fn(
RpcContext<
'_,
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.builder.state.rpc.set_extend_rpc_modules(hook);
self
}
/// Launches the node and returns a handle to it.
pub async fn launch(
self,
) -> eyre::Result<
NodeHandle<
FullNodeComponentsAdapter<
FullNodeTypesAdapter<Types, DB, RethFullProviderType<DB, Types::Evm>>,
Components::Pool,
>,
>,
> {
let Self { builder, task_executor, data_dir } = self;
builder.launch(task_executor, data_dir).await
}
/// Check that the builder can be launched
///
/// This is useful when writing tests to ensure that the builder is configured correctly.
pub fn check_launch(self) -> Self {
self
}
}
/// Captures the necessary context for building the components of the node.
#[derive(Debug)]
pub struct BuilderContext<Node: FullNodeTypes> {
@ -712,6 +1050,17 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
self.config.builder.clone()
}
/// Returns the default network config for the node.
pub fn network_config(&self) -> eyre::Result<NetworkConfig<Node::Provider>> {
self.config.network_config(
&self.reth_config,
self.provider.clone(),
self.executor.clone(),
self.head,
self.data_dir(),
)
}
/// Creates the [NetworkBuilder] for the node.
pub async fn network_builder(&self) -> eyre::Result<NetworkBuilder<Node::Provider, (), ()>> {
self.config
@ -725,11 +1074,8 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
.await
}
/// Creates the [NetworkBuilder] for the node and blocks until it is ready.
pub fn network_builder_blocking(&self) -> eyre::Result<NetworkBuilder<Node::Provider, (), ()>> {
self.executor.block_on(self.network_builder())
}
/// Convenience function to start the network.
///
/// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected
/// to that network.
pub fn start_network<Pool>(
@ -741,17 +1087,7 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
Pool: TransactionPool + Unpin + 'static,
{
let (handle, network, txpool, eth) = builder
.transactions(
pool,
TransactionsManagerConfig {
transaction_fetcher_config: TransactionFetcherConfig::new(
self.config.network.soft_limit_byte_size_pooled_transactions_response,
self.config
.network
.soft_limit_byte_size_pooled_transactions_response_on_pack_request,
),
},
)
.transactions(pool, Default::default())
.request_handler(self.provider().clone())
.split_with_handle();

View File

@ -87,6 +87,9 @@ where
Node: FullNodeTypes,
{
/// Configures the pool builder.
///
/// This accepts a [PoolBuilder] instance that will be used to create the node's transaction
/// pool.
pub fn pool<PB>(self, pool_builder: PB) -> ComponentsBuilder<Node, PB, PayloadB, NetworkB>
where
PB: PoolBuilder<Node>,
@ -102,6 +105,9 @@ where
PoolB: PoolBuilder<Node>,
{
/// Configures the network builder.
///
/// This accepts a [NetworkBuilder] instance that will be used to create the node's network
/// stack.
pub fn network<NB>(self, network_builder: NB) -> ComponentsBuilder<Node, PoolB, PayloadB, NB>
where
NB: NetworkBuilder<Node, PoolB::Pool>,
@ -111,6 +117,9 @@ where
}
/// Configures the payload builder.
///
/// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's
/// payload builder service.
pub fn payload<PB>(self, payload_builder: PB) -> ComponentsBuilder<Node, PoolB, PB, NetworkB>
where
PB: PayloadServiceBuilder<Node, PoolB::Pool>,
@ -130,15 +139,15 @@ where
{
type Pool = PoolB::Pool;
fn build_components(
async fn build_components(
self,
context: &BuilderContext<Node>,
) -> eyre::Result<NodeComponents<Node, Self::Pool>> {
let Self { pool_builder, payload_builder, network_builder, _marker } = self;
let pool = pool_builder.build_pool(context)?;
let network = network_builder.build_network(context, pool.clone())?;
let payload_builder = payload_builder.spawn_payload_service(context, pool.clone())?;
let pool = pool_builder.build_pool(context).await?;
let network = network_builder.build_network(context, pool.clone()).await?;
let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?;
Ok(NodeComponents { transaction_pool: pool, network, payload_builder })
}

View File

@ -3,20 +3,30 @@
use crate::{node::FullNodeTypes, BuilderContext};
use reth_network::NetworkHandle;
use reth_transaction_pool::TransactionPool;
use std::future::Future;
/// A type that knows how to build the network implementation.
pub trait NetworkBuilder<Node: FullNodeTypes, Pool: TransactionPool> {
pub trait NetworkBuilder<Node: FullNodeTypes, Pool: TransactionPool>: Send {
/// Launches the network implementation and returns the handle to it.
fn build_network(self, ctx: &BuilderContext<Node>, pool: Pool) -> eyre::Result<NetworkHandle>;
fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> impl Future<Output = eyre::Result<NetworkHandle>> + Send;
}
impl<Node, F, Pool> NetworkBuilder<Node, Pool> for F
impl<Node, F, Fut, Pool> NetworkBuilder<Node, Pool> for F
where
Node: FullNodeTypes,
Pool: TransactionPool,
F: FnOnce(&BuilderContext<Node>, Pool) -> eyre::Result<NetworkHandle>,
F: Fn(&BuilderContext<Node>, Pool) -> Fut + Send,
Fut: Future<Output = eyre::Result<NetworkHandle>> + Send,
{
fn build_network(self, ctx: &BuilderContext<Node>, pool: Pool) -> eyre::Result<NetworkHandle> {
fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> impl Future<Output = eyre::Result<NetworkHandle>> + Send {
self(ctx, pool)
}
}

View File

@ -3,28 +3,32 @@
use crate::{node::FullNodeTypes, BuilderContext};
use reth_payload_builder::PayloadBuilderHandle;
use reth_transaction_pool::TransactionPool;
use std::future::Future;
/// A type that knows how to spawn the payload service.
pub trait PayloadServiceBuilder<Node: FullNodeTypes, Pool: TransactionPool> {
pub trait PayloadServiceBuilder<Node: FullNodeTypes, Pool: TransactionPool>: Send {
/// Spawns the payload service and returns the handle to it.
///
/// The [BuilderContext] is provided to allow give to access the node's configuration.
fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<PayloadBuilderHandle<Node::Engine>>;
) -> impl Future<Output = eyre::Result<PayloadBuilderHandle<Node::Engine>>> + Send;
}
impl<Node, F, Pool> PayloadServiceBuilder<Node, Pool> for F
impl<Node, F, Fut, Pool> PayloadServiceBuilder<Node, Pool> for F
where
Node: FullNodeTypes,
Pool: TransactionPool,
F: FnOnce(&BuilderContext<Node>, Pool) -> eyre::Result<PayloadBuilderHandle<Node::Engine>>,
F: Fn(&BuilderContext<Node>, Pool) -> Fut + Send,
Fut: Future<Output = eyre::Result<PayloadBuilderHandle<Node::Engine>>> + Send,
{
fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<PayloadBuilderHandle<Node::Engine>> {
) -> impl Future<Output = eyre::Result<PayloadBuilderHandle<Node::Engine>>> + Send {
self(ctx, pool)
}
}

View File

@ -1,25 +1,33 @@
//! Pool component for the node builder.
use crate::{node::FullNodeTypes, BuilderContext};
use reth_transaction_pool::TransactionPool;
use std::future::Future;
/// A type that knows how to build the transaction pool.
pub trait PoolBuilder<Node: FullNodeTypes> {
pub trait PoolBuilder<Node: FullNodeTypes>: Send {
/// The transaction pool to build.
type Pool: TransactionPool + Unpin + 'static;
/// Creates the transaction pool.
fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool>;
fn build_pool(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Pool>> + Send;
}
impl<Node, F, Pool> PoolBuilder<Node> for F
impl<Node, F, Fut, Pool> PoolBuilder<Node> for F
where
Node: FullNodeTypes,
Pool: TransactionPool + Unpin + 'static,
F: FnOnce(&BuilderContext<Node>) -> eyre::Result<Pool>,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<Pool>> + Send,
{
type Pool = Pool;
fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
fn build_pool(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Pool>> {
self(ctx)
}
}

View File

@ -125,13 +125,14 @@ pub trait NodeComponentsBuilder<Node: FullNodeTypes> {
fn build_components(
self,
context: &BuilderContext<Node>,
) -> eyre::Result<NodeComponents<Node, Self::Pool>>;
) -> impl std::future::Future<Output = eyre::Result<NodeComponents<Node, Self::Pool>>> + Send;
}
impl<Node, F, Pool> NodeComponentsBuilder<Node> for F
impl<Node, F, Fut, Pool> NodeComponentsBuilder<Node> for F
where
Node: FullNodeTypes,
F: FnOnce(&BuilderContext<Node>) -> eyre::Result<NodeComponents<Node, Pool>>,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: std::future::Future<Output = eyre::Result<NodeComponents<Node, Pool>>> + Send,
Pool: TransactionPool + Unpin + 'static,
{
type Pool = Pool;
@ -139,7 +140,8 @@ where
fn build_components(
self,
ctx: &BuilderContext<Node>,
) -> eyre::Result<NodeComponents<Node, Pool>> {
) -> impl std::future::Future<Output = eyre::Result<NodeComponents<Node, Self::Pool>>> + Send
{
self(ctx)
}
}

View File

@ -3,6 +3,7 @@ use reth_node_core::exit::NodeExitFuture;
use std::fmt;
/// A Handle to the launched node.
#[must_use = "Needs to await the node exit future"]
pub struct NodeHandle<Node: FullNodeComponents> {
/// All node components.
pub node: FullNode<Node>,

View File

@ -72,7 +72,7 @@ impl<Node: FullNodeComponents> fmt::Debug for NodeHooks<Node> {
}
/// A helper trait for the event hook that is run once the node is initialized.
pub trait OnComponentInitializedHook<Node> {
pub trait OnComponentInitializedHook<Node>: Send {
/// Consumes the event hook and runs it.
///
/// If this returns an error, the node launch will be aborted.
@ -81,7 +81,7 @@ pub trait OnComponentInitializedHook<Node> {
impl<Node, F> OnComponentInitializedHook<Node> for F
where
F: Fn(Node) -> eyre::Result<()>,
F: Fn(Node) -> eyre::Result<()> + Send,
{
fn on_event(&self, node: Node) -> eyre::Result<()> {
self(node)
@ -89,7 +89,7 @@ where
}
/// A helper trait that is run once the node is started.
pub trait OnNodeStartedHook<Node: FullNodeComponents> {
pub trait OnNodeStartedHook<Node: FullNodeComponents>: Send {
/// Consumes the event hook and runs it.
///
/// If this returns an error, the node launch will be aborted.
@ -99,7 +99,7 @@ pub trait OnNodeStartedHook<Node: FullNodeComponents> {
impl<Node, F> OnNodeStartedHook<Node> for F
where
Node: FullNodeComponents,
F: Fn(FullNode<Node>) -> eyre::Result<()>,
F: Fn(FullNode<Node>) -> eyre::Result<()> + Send,
{
fn on_event(&self, node: FullNode<Node>) -> eyre::Result<()> {
self(node)

View File

@ -13,22 +13,24 @@ pub mod hooks;
/// Support for configuring the higher level node types.
pub mod node;
pub use node::*;
/// Support for configuring the components of a node.
pub mod components;
mod builder;
pub use builder::*;
mod handle;
pub mod rpc;
pub use handle::NodeHandle;
pub mod provider;
pub use builder::*;
pub use handle::NodeHandle;
pub mod rpc;
/// Re-export the core configuration traits.
pub use reth_node_core::cli::config::{
PayloadBuilderConfig, RethNetworkConfig, RethRpcConfig, RethTransactionPoolConfig,
};
// re-export the core config for convenience
pub use reth_node_core::node_config::NodeConfig;

View File

@ -1,18 +1,38 @@
use crate::{
components::FullNodeComponents,
components::{ComponentsBuilder, FullNodeComponents},
provider::FullProvider,
rpc::{RethRpcServerHandles, RpcRegistry},
};
use reth_db::database::Database;
use reth_network::NetworkHandle;
use reth_node_api::{evm::ConfigureEvm, primitives::NodePrimitives, EngineTypes};
use reth_node_core::{
cli::components::FullProvider,
dirs::{ChainPath, DataDirPath},
node_config::NodeConfig,
rpc::builder::{auth::AuthServerHandle, RpcServerHandle},
};
use reth_payload_builder::PayloadBuilderHandle;
use reth_primitives::ChainSpec;
use reth_provider::ChainSpecProvider;
use reth_tasks::TaskExecutor;
use std::marker::PhantomData;
use std::{marker::PhantomData, sync::Arc};
/// A [Node] is a [NodeTypes] that comes with preconfigured components.
///
/// This can be used to configure the builder with a preset of components.
pub trait Node<N>: NodeTypes + Clone {
/// The type that builds the node's pool.
type PoolBuilder;
/// The type that builds the node's network.
type NetworkBuilder;
/// The type that builds the node's payload service.
type PayloadBuilder;
/// Returns the [ComponentsBuilder] for the node.
fn components(
self,
) -> ComponentsBuilder<N, Self::PoolBuilder, Self::PayloadBuilder, Self::NetworkBuilder>;
}
/// The type that configures stateless node types, the node's primitive types.
pub trait NodeTypes: Send + Sync + 'static {
@ -76,20 +96,47 @@ where
}
/// The launched node with all components including RPC handlers.
///
/// This can be used to interact with the launched node.
#[derive(Debug)]
pub struct FullNode<Node: FullNodeComponents> {
pub(crate) evm_config: Node::Evm,
pub(crate) pool: Node::Pool,
pub(crate) network: NetworkHandle,
pub(crate) provider: Node::Provider,
pub(crate) payload_builder: PayloadBuilderHandle<Node::Engine>,
pub(crate) executor: TaskExecutor,
pub(crate) rpc_server_handles: RethRpcServerHandles,
pub(crate) rpc_registry: RpcRegistry<Node>,
/// The evm configuration.
pub evm_config: Node::Evm,
/// The node's transaction pool.
pub pool: Node::Pool,
/// Handle to the node's network.
pub network: NetworkHandle,
/// Provider to interact with the node's database
pub provider: Node::Provider,
/// Handle to the node's payload builder service.
pub payload_builder: PayloadBuilderHandle<Node::Engine>,
/// Task executor for the node.
pub task_executor: TaskExecutor,
/// Handles to the node's rpc servers
pub rpc_server_handles: RethRpcServerHandles,
/// The configured rpc namespaces
pub rpc_registry: RpcRegistry<Node>,
/// The initial node config.
pub(crate) config: NodeConfig,
pub config: NodeConfig,
/// The data dir of the node.
pub(crate) data_dir: ChainPath<DataDirPath>,
pub data_dir: ChainPath<DataDirPath>,
}
impl<Node: FullNodeComponents> FullNode<Node> {
/// Returns the [ChainSpec] of the node.
pub fn chain_spec(&self) -> Arc<ChainSpec> {
self.provider.chain_spec()
}
/// Returns the [RpcServerHandle] to the started rpc server.
pub fn rpc_server_handle(&self) -> &RpcServerHandle {
&self.rpc_server_handles.rpc
}
/// Returns the [AuthServerHandle] to the started authenticated engine API server.
pub fn auth_server_handle(&self) -> &AuthServerHandle {
&self.rpc_server_handles.auth
}
}
impl<Node: FullNodeComponents> Clone for FullNode<Node> {
@ -100,7 +147,7 @@ impl<Node: FullNodeComponents> Clone for FullNode<Node> {
network: self.network.clone(),
provider: self.provider.clone(),
payload_builder: self.payload_builder.clone(),
executor: self.executor.clone(),
task_executor: self.task_executor.clone(),
rpc_server_handles: self.rpc_server_handles.clone(),
rpc_registry: self.rpc_registry.clone(),
config: self.config.clone(),

View File

@ -14,6 +14,7 @@ use reth_node_core::{
},
},
};
use reth_payload_builder::PayloadBuilderHandle;
use reth_rpc::JwtSecret;
use reth_tasks::TaskExecutor;
use reth_tracing::tracing::{debug, info};
@ -94,7 +95,7 @@ impl<Node: FullNodeComponents> fmt::Debug for RpcHooks<Node> {
}
/// Event hook that is called once the rpc server is started.
pub trait OnRpcStarted<Node: FullNodeComponents> {
pub trait OnRpcStarted<Node: FullNodeComponents>: Send {
/// The hook that is called once the rpc server is started.
fn on_rpc_started(
&self,
@ -105,7 +106,7 @@ pub trait OnRpcStarted<Node: FullNodeComponents> {
impl<Node, F> OnRpcStarted<Node> for F
where
F: Fn(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()>,
F: Fn(RpcContext<'_, Node>, RethRpcServerHandles) -> eyre::Result<()> + Send,
Node: FullNodeComponents,
{
fn on_rpc_started(
@ -124,14 +125,14 @@ impl<Node: FullNodeComponents> OnRpcStarted<Node> for () {
}
/// Event hook that is called when the rpc server is started.
pub trait ExtendRpcModules<Node: FullNodeComponents> {
pub trait ExtendRpcModules<Node: FullNodeComponents>: Send {
/// The hook that is called once the rpc server is started.
fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()>;
}
impl<Node, F> ExtendRpcModules<Node> for F
where
F: Fn(RpcContext<'_, Node>) -> eyre::Result<()>,
F: Fn(RpcContext<'_, Node>) -> eyre::Result<()> + Send,
Node: FullNodeComponents,
{
fn extend_rpc_modules(&self, ctx: RpcContext<'_, Node>) -> eyre::Result<()> {
@ -223,6 +224,26 @@ impl<'a, Node: FullNodeComponents> RpcContext<'a, Node> {
pub fn node(&self) -> &Node {
&self.node
}
/// Returns the transaction pool instance.
pub fn pool(&self) -> &Node::Pool {
self.node.pool()
}
/// Returns provider to interact with the node.
pub fn provider(&self) -> &Node::Provider {
self.node.provider()
}
/// Returns the handle to the network
pub fn network(&self) -> &NetworkHandle {
self.node.network()
}
/// Returns the handle to the payload builder service
pub fn payload_builder(&self) -> &PayloadBuilderHandle<Node::Engine> {
self.node.payload_builder()
}
}
/// Launch the rpc servers.

View File

@ -51,12 +51,6 @@ pub use dev_args::DevArgs;
mod pruning_args;
pub use pruning_args::PruningArgs;
/// RollupArgs for configuring the op-reth rollup
#[cfg(feature = "optimism")]
mod rollup_args;
#[cfg(feature = "optimism")]
pub use rollup_args::RollupArgs;
pub mod utils;
pub mod types;

View File

@ -34,18 +34,6 @@ pub struct PayloadBuilderArgs {
/// Maximum number of tasks to spawn for building a payload.
#[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::<usize>::new().range(1..))]
pub max_payload_tasks: usize,
/// By default the pending block equals the latest block
/// to save resources and not leak txs from the tx-pool,
/// this flag enables computing of the pending block
/// from the tx-pool instead.
///
/// If `compute_pending_block` is not enabled, the payload builder
/// will use the payload attributes from the latest block. Note
/// that this flag is not yet functional.
#[cfg(feature = "optimism")]
#[arg(long = "rollup.compute-pending-block")]
pub compute_pending_block: bool,
}
impl Default for PayloadBuilderArgs {
@ -56,8 +44,6 @@ impl Default for PayloadBuilderArgs {
interval: Duration::from_secs(1),
deadline: SLOT_DURATION,
max_payload_tasks: 3,
#[cfg(feature = "optimism")]
compute_pending_block: false,
}
}
}
@ -82,11 +68,6 @@ impl PayloadBuilderConfig for PayloadBuilderArgs {
fn max_payload_tasks(&self) -> usize {
self.max_payload_tasks
}
#[cfg(feature = "optimism")]
fn compute_pending_block(&self) -> bool {
self.compute_pending_block
}
}
#[derive(Clone, Debug, Default)]

View File

@ -5,18 +5,13 @@ use crate::{
types::{MaxU32, ZeroAsNoneU64},
GasPriceOracleArgs, RpcStateCacheArgs,
},
cli::{
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
config::RethRpcConfig,
ext::RethNodeCommandConfig,
},
cli::config::RethRpcConfig,
utils::get_or_create_jwt_secret_from_path,
};
use clap::{
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
Arg, Args, Command,
};
use futures::TryFutureExt;
use rand::Rng;
use reth_network_api::{NetworkInfo, Peers};
use reth_node_api::{ConfigureEvmEnv, EngineTypes};
@ -32,10 +27,10 @@ use reth_rpc_builder::{
auth::{AuthServerConfig, AuthServerHandle},
constants,
error::RpcError,
EthConfig, IpcServerBuilder, RethRpcModule, RpcModuleBuilder, RpcModuleConfig,
RpcModuleSelection, RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig,
EthConfig, IpcServerBuilder, RethRpcModule, RpcModuleConfig, RpcModuleSelection,
RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig,
};
use reth_rpc_engine_api::{EngineApi, EngineApiServer};
use reth_rpc_engine_api::EngineApi;
use reth_tasks::TaskSpawner;
use reth_transaction_pool::TransactionPool;
use std::{
@ -43,7 +38,7 @@ use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
path::PathBuf,
};
use tracing::{debug, info};
use tracing::debug;
/// Default max number of subscriptions per connection.
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
@ -268,81 +263,6 @@ impl RpcServerArgs {
self
}
/// Configures and launches _all_ servers.
///
/// Returns the handles for the launched regular RPC server(s) (if any) and the server handle
/// for the auth server that handles the `engine_` API that's accessed by the consensus
/// layer.
pub async fn start_servers<Reth, Engine, Conf, EngineT>(
&self,
components: &Reth,
engine_api: Engine,
jwt_secret: JwtSecret,
conf: &mut Conf,
) -> eyre::Result<RethRpcServerHandles>
where
EngineT: EngineTypes + 'static,
Engine: EngineApiServer<EngineT>,
Reth: RethNodeComponents,
Conf: RethNodeCommandConfig,
{
let auth_config = self.auth_server_config(jwt_secret)?;
let module_config = self.transport_rpc_module_config();
debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config");
let (mut modules, mut auth_module, mut registry) = RpcModuleBuilder::default()
.with_provider(components.provider())
.with_pool(components.pool())
.with_network(components.network())
.with_events(components.events())
.with_executor(components.task_executor())
.with_evm_config(components.evm_config())
.build_with_auth_server(module_config, engine_api);
let rpc_components = RethRpcComponents {
registry: &mut registry,
modules: &mut modules,
auth_module: &mut auth_module,
};
// apply configured customization
conf.extend_rpc_modules(self, components, rpc_components)?;
let server_config = self.rpc_server_config();
let launch_rpc = modules.clone().start_server(server_config).map_ok(|handle| {
if let Some(url) = handle.ipc_endpoint() {
info!(target: "reth::cli", url=%url, "RPC IPC server started");
}
if let Some(addr) = handle.http_local_addr() {
info!(target: "reth::cli", url=%addr, "RPC HTTP server started");
}
if let Some(addr) = handle.ws_local_addr() {
info!(target: "reth::cli", url=%addr, "RPC WS server started");
}
handle
});
let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| {
let addr = handle.local_addr();
info!(target: "reth::cli", url=%addr, "RPC auth server started");
handle
});
// launch servers concurrently
let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?;
let handles = RethRpcServerHandles { rpc, auth };
// call hook
let rpc_components = RethRpcComponents {
registry: &mut registry,
modules: &mut modules,
auth_module: &mut auth_module,
};
conf.on_rpc_server_started(self, components, rpc_components, handles.clone())?;
Ok(handles)
}
/// Convenience function for starting a rpc server with configs which extracted from cli args.
pub async fn start_rpc_server<Provider, Pool, Network, Tasks, Events, EvmConfig>(
&self,

View File

@ -1,230 +0,0 @@
//! Components that are used by the node command.
use reth_db::database::Database;
use reth_network::{NetworkEvents, NetworkProtocols};
use reth_network_api::{NetworkInfo, Peers};
use reth_node_api::ConfigureEvmEnv;
use reth_primitives::ChainSpec;
use reth_provider::{
AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader,
DatabaseProviderFactory, EvmEnvProvider, StateProviderFactory,
};
use reth_rpc_builder::{
auth::{AuthRpcModule, AuthServerHandle},
RethModuleRegistry, RpcServerHandle, TransportRpcModules,
};
use reth_tasks::TaskSpawner;
use reth_transaction_pool::TransactionPool;
use std::{marker::PhantomData, sync::Arc};
/// Helper trait to unify all provider traits for simplicity.
pub trait FullProvider<DB: Database>:
DatabaseProviderFactory<DB>
+ BlockReaderIdExt
+ AccountReader
+ StateProviderFactory
+ EvmEnvProvider
+ ChainSpecProvider
+ ChangeSetReader
+ CanonStateSubscriptions
+ Clone
+ Unpin
+ 'static
{
}
impl<T, DB: Database> FullProvider<DB> for T where
T: DatabaseProviderFactory<DB>
+ BlockReaderIdExt
+ AccountReader
+ StateProviderFactory
+ EvmEnvProvider
+ ChainSpecProvider
+ ChangeSetReader
+ CanonStateSubscriptions
+ Clone
+ Unpin
+ 'static
{
}
/// The trait that is implemented for the Node command.
pub trait RethNodeComponents: Clone + Send + Sync + 'static {
/// Underlying database type.
type DB: Database + Clone + Unpin + 'static;
/// The Provider type that is provided by the node itself
type Provider: FullProvider<Self::DB>;
/// The transaction pool type
type Pool: TransactionPool + Clone + Unpin + 'static;
/// The network type used to communicate with p2p.
type Network: NetworkInfo + Peers + NetworkProtocols + NetworkEvents + Clone + Unpin + 'static;
/// The events type used to create subscriptions.
type Events: CanonStateSubscriptions + Clone + 'static;
/// The type that is used to spawn tasks.
type Tasks: TaskSpawner + Clone + Unpin + 'static;
/// The type that defines how to configure the EVM before execution.
type EvmConfig: ConfigureEvmEnv + 'static;
/// Returns the instance of the provider
fn provider(&self) -> Self::Provider;
/// Returns the instance of the task executor.
fn task_executor(&self) -> Self::Tasks;
/// Returns the instance of the transaction pool.
fn pool(&self) -> Self::Pool;
/// Returns the instance of the network API.
fn network(&self) -> Self::Network;
/// Returns the instance of the events subscription handler.
fn events(&self) -> Self::Events;
/// Returns the instance of the EVM config.
fn evm_config(&self) -> Self::EvmConfig;
/// Helper function to return the chain spec.
fn chain_spec(&self) -> Arc<ChainSpec> {
self.provider().chain_spec()
}
}
/// Helper container to encapsulate [RethModuleRegistry],[TransportRpcModules] and [AuthRpcModule].
///
/// This can be used to access installed modules, or create commonly used handlers like
/// [reth_rpc::EthApi], and ultimately merge additional rpc handler into the configured transport
/// modules [TransportRpcModules] as well as configured authenticated methods [AuthRpcModule].
#[derive(Debug)]
#[allow(clippy::type_complexity)]
pub struct RethRpcComponents<'a, Reth: RethNodeComponents> {
/// A Helper type the holds instances of the configured modules.
///
/// This provides easy access to rpc handlers, such as [RethModuleRegistry::eth_api].
pub registry: &'a mut RethModuleRegistry<
Reth::Provider,
Reth::Pool,
Reth::Network,
Reth::Tasks,
Reth::Events,
Reth::EvmConfig,
>,
/// Holds installed modules per transport type.
///
/// This can be used to merge additional modules into the configured transports (http, ipc,
/// ws). See [TransportRpcModules::merge_configured]
pub modules: &'a mut TransportRpcModules,
/// Holds jwt authenticated rpc module.
///
/// This can be used to merge additional modules into the configured authenticated methods
pub auth_module: &'a mut AuthRpcModule,
}
/// A Generic implementation of the RethNodeComponents trait.
///
/// Represents components required for the Reth node.
#[derive(Clone, Debug)]
pub struct RethNodeComponentsImpl<DB, Provider, Pool, Network, Events, Tasks, EvmConfig> {
/// Represents underlying database type.
__phantom: PhantomData<DB>,
/// Represents the provider instance.
pub provider: Provider,
/// Represents the transaction pool instance.
pub pool: Pool,
/// Represents the network instance used for communication.
pub network: Network,
/// Represents the task executor instance.
pub task_executor: Tasks,
/// Represents the events subscription handler instance.
pub events: Events,
/// Represents the type that is used to configure the EVM before execution.
pub evm_config: EvmConfig,
}
impl<DB, Provider, Pool, Network, Events, Tasks, EvmConfig>
RethNodeComponentsImpl<DB, Provider, Pool, Network, Events, Tasks, EvmConfig>
{
/// Create new instance of the node components.
pub fn new(
provider: Provider,
pool: Pool,
network: Network,
task_executor: Tasks,
events: Events,
evm_config: EvmConfig,
) -> Self {
Self {
provider,
pool,
network,
task_executor,
events,
evm_config,
__phantom: std::marker::PhantomData,
}
}
}
impl<DB, Provider, Pool, Network, Events, Tasks, EvmConfig> RethNodeComponents
for RethNodeComponentsImpl<DB, Provider, Pool, Network, Events, Tasks, EvmConfig>
where
DB: Database + Clone + Unpin + 'static,
Provider: FullProvider<DB> + Clone + 'static,
Tasks: TaskSpawner + Clone + Unpin + 'static,
Pool: TransactionPool + Clone + Unpin + 'static,
Network: NetworkInfo + Peers + NetworkProtocols + NetworkEvents + Clone + Unpin + 'static,
Events: CanonStateSubscriptions + Clone + 'static,
EvmConfig: ConfigureEvmEnv + 'static,
{
type DB = DB;
type Provider = Provider;
type Pool = Pool;
type Network = Network;
type Events = Events;
type Tasks = Tasks;
type EvmConfig = EvmConfig;
fn provider(&self) -> Self::Provider {
self.provider.clone()
}
fn task_executor(&self) -> Self::Tasks {
self.task_executor.clone()
}
fn pool(&self) -> Self::Pool {
self.pool.clone()
}
fn network(&self) -> Self::Network {
self.network.clone()
}
fn events(&self) -> Self::Events {
self.events.clone()
}
fn evm_config(&self) -> Self::EvmConfig {
self.evm_config.clone()
}
}
/// Contains the handles to the spawned RPC servers.
///
/// This can be used to access the endpoints of the servers.
///
/// # Example
///
/// ```rust
/// use reth_node_core::{cli::components::RethRpcServerHandles, rpc::api::EthApiClient};
/// # async fn t(handles: RethRpcServerHandles) {
/// let client = handles.rpc.http_client().expect("http server not started");
/// let block_number = client.block_number().await.unwrap();
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct RethRpcServerHandles {
/// The regular RPC server handle.
pub rpc: RpcServerHandle,
/// The handle to the auth server (engine API)
pub auth: AuthServerHandle,
}

View File

@ -105,10 +105,6 @@ pub trait PayloadBuilderConfig {
/// Maximum number of tasks to spawn for building a payload.
fn max_payload_tasks(&self) -> usize;
/// Returns whether or not to construct the pending block.
#[cfg(feature = "optimism")]
fn compute_pending_block(&self) -> bool;
}
/// A trait that represents the configured network and can be used to apply additional configuration

View File

@ -1,125 +0,0 @@
//! A real or test database type
use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
use alloy_chains::Chain;
use reth_db::{
init_db,
mdbx::DatabaseArguments,
test_utils::{create_test_rw_db, TempDatabase},
DatabaseEnv,
};
use reth_interfaces::db::LogLevel;
use std::{str::FromStr, sync::Arc};
/// A type that represents either a _real_ (represented by a path), or _test_ database, which will
/// use a [TempDatabase].
#[derive(Debug, Clone)]
pub enum DatabaseBuilder {
/// The real database type, with a specified data dir
Real(MaybePlatformPath<DataDirPath>),
/// The test database type
Test,
}
impl DatabaseBuilder {
/// Creates a _test_ database
pub fn test() -> Self {
Self::Test
}
/// Initializes and returns the [DatabaseInstance] depending on the current database type.
///
/// If the [DatabaseBuilder] is test, then the [ChainPath] constructed will be derived from the
/// db path of the [TempDatabase] and the given chain. The [LogLevel] will not be used.
///
/// If the [DatabaseBuilder] is real, then the db will be initialized using the given log level
/// and the [ChainPath] will be derived from the given path and chain. This database path is
/// then passed into [init_db].
pub fn init_db(
self,
log_level: Option<LogLevel>,
chain: Chain,
) -> eyre::Result<DatabaseInstance> {
match self {
DatabaseBuilder::Test => {
let db = create_test_rw_db();
let db_path_str = db.path().to_str().expect("Path is not valid unicode");
let path = MaybePlatformPath::<DataDirPath>::from_str(db_path_str)
.expect("Path is not valid");
let data_dir = path.unwrap_or_chain_default(chain);
Ok(DatabaseInstance::Test { db, data_dir })
}
DatabaseBuilder::Real(path) => {
let data_dir = path.unwrap_or_chain_default(chain);
let db_path = data_dir.db_path();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(
init_db(db_path.clone(), DatabaseArguments::default().log_level(log_level))?
.with_metrics(),
);
Ok(DatabaseInstance::Real { db, data_dir })
}
}
}
}
/// The [Default] implementation for [DatabaseBuilder] uses the _real_ variant, using the default
/// value for the inner [MaybePlatformPath].
impl Default for DatabaseBuilder {
fn default() -> Self {
Self::Real(MaybePlatformPath::<DataDirPath>::default())
}
}
/// A constructed database type, with a [ChainPath].
#[derive(Debug, Clone)]
pub enum DatabaseInstance {
/// The test database
Test {
/// The database
db: Arc<TempDatabase<DatabaseEnv>>,
/// The data dir
data_dir: ChainPath<DataDirPath>,
},
/// The real database
Real {
/// The database
db: Arc<DatabaseEnv>,
/// The data dir
data_dir: ChainPath<DataDirPath>,
},
}
impl DatabaseInstance {
/// Returns the data dir for this database instance
pub fn data_dir(&self) -> &ChainPath<DataDirPath> {
match self {
Self::Test { data_dir, .. } => data_dir,
Self::Real { data_dir, .. } => data_dir,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_database_db_dir() {
// create temp dir to test that the db path is correct
let tempdir = tempfile::tempdir().unwrap();
let expected_datadir_path = tempdir.path().to_path_buf();
let expected_db_path = tempdir.path().join("db");
let datadir_path = MaybePlatformPath::<DataDirPath>::from(tempdir.path().to_path_buf());
let db = DatabaseBuilder::Real(datadir_path);
let db = db.init_db(None, Chain::mainnet()).unwrap();
// ensure that the datadir path is correct
assert_eq!(db.data_dir().data_dir_path(), expected_datadir_path);
// ensure that the db path is correct
assert_eq!(db.data_dir().db_path(), expected_db_path);
}
}

View File

@ -1,360 +0,0 @@
//! Support for integrating customizations into the CLI.
use crate::cli::{
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
config::{PayloadBuilderConfig, RethNetworkConfig, RethRpcConfig},
};
use clap::Args;
use reth_basic_payload_builder::{
BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, PayloadBuilder,
};
use reth_node_api::EngineTypes;
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService};
use reth_provider::CanonStateSubscriptions;
use reth_tasks::TaskSpawner;
use std::{fmt, marker::PhantomData};
/// A trait that allows for extending parts of the CLI with additional functionality.
///
/// This is intended as a way to allow to _extend_ the node command. For example, to register
/// additional RPC namespaces.
pub trait RethCliExt {
/// Provides additional configuration for the node CLI command.
///
/// This supports additional CLI arguments that can be used to modify the node configuration.
///
/// If no additional CLI arguments are required, the [NoArgs] wrapper type can be used.
type Node: RethNodeCommandExt;
}
/// The default CLI extension.
impl RethCliExt for () {
type Node = DefaultRethNodeCommandConfig;
}
/// A trait that allows for extending and customizing parts of the rethr node command.
///
/// The functions are invoked during the initialization of the node command in the following order:
///
/// 1. [configure_network](RethNodeCommandConfig::configure_network)
/// 2. [on_components_initialized](RethNodeCommandConfig::on_components_initialized)
/// 3. [spawn_payload_builder_service](RethNodeCommandConfig::spawn_payload_builder_service)
/// 4. [extend_rpc_modules](RethNodeCommandConfig::extend_rpc_modules)
/// 5. [on_rpc_server_started](RethNodeCommandConfig::on_rpc_server_started)
/// 6. [on_node_started](RethNodeCommandConfig::on_node_started)
pub trait RethNodeCommandConfig: fmt::Debug {
/// Invoked with the network configuration before the network is configured.
///
/// This allows additional configuration of the network before it is launched.
fn configure_network<Conf, Reth>(
&mut self,
config: &mut Conf,
components: &Reth,
) -> eyre::Result<()>
where
Conf: RethNetworkConfig,
Reth: RethNodeComponents,
{
let _ = config;
let _ = components;
Ok(())
}
/// Event hook called once all components have been initialized.
///
/// This is called as soon as the node components have been initialized.
fn on_components_initialized<Reth: RethNodeComponents>(
&mut self,
components: &Reth,
) -> eyre::Result<()> {
let _ = components;
Ok(())
}
/// Event hook called once the node has been launched.
///
/// This is called last after the node has been launched.
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
let _ = components;
Ok(())
}
/// Event hook called once the rpc servers has been started.
///
/// This is called after the rpc server has been started.
fn on_rpc_server_started<Conf, Reth>(
&mut self,
config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
handles: RethRpcServerHandles,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
let _ = config;
let _ = components;
let _ = rpc_components;
let _ = handles;
Ok(())
}
/// Allows for registering additional RPC modules for the transports.
///
/// This is expected to call the merge functions of [reth_rpc_builder::TransportRpcModules], for
/// example [reth_rpc_builder::TransportRpcModules::merge_configured].
///
/// This is called before the rpc server will be started [Self::on_rpc_server_started].
fn extend_rpc_modules<Conf, Reth>(
&mut self,
config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
let _ = config;
let _ = components;
let _ = rpc_components;
Ok(())
}
/// Configures the [PayloadBuilderService] for the node, spawns it and returns the
/// [PayloadBuilderHandle].
///
/// By default this spawns a [BasicPayloadJobGenerator] with the default configuration
/// [BasicPayloadJobGeneratorConfig].
fn spawn_payload_builder_service<Conf, Reth, Builder, Engine>(
&mut self,
conf: &Conf,
components: &Reth,
payload_builder: Builder,
) -> eyre::Result<PayloadBuilderHandle<Engine>>
where
Conf: PayloadBuilderConfig,
Reth: RethNodeComponents,
Engine: EngineTypes + 'static,
Builder: PayloadBuilder<
Reth::Pool,
Reth::Provider,
Attributes = Engine::PayloadBuilderAttributes,
BuiltPayload = Engine::BuiltPayload,
> + Unpin
+ 'static,
{
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval())
.deadline(conf.deadline())
.max_payload_tasks(conf.max_payload_tasks())
.extradata(conf.extradata_rlp_bytes())
.max_gas_limit(conf.max_gas_limit());
// no extradata for optimism
#[cfg(feature = "optimism")]
let payload_job_config = payload_job_config.extradata(Default::default());
let payload_generator = BasicPayloadJobGenerator::with_builder(
components.provider(),
components.pool(),
components.task_executor(),
payload_job_config,
components.chain_spec(),
payload_builder,
);
let (payload_service, payload_builder) = PayloadBuilderService::new(
payload_generator,
components.events().canonical_state_stream(),
);
components
.task_executor()
.spawn_critical("payload builder service", Box::pin(payload_service));
Ok(payload_builder)
}
}
/// A trait that allows for extending parts of the CLI with additional functionality.
pub trait RethNodeCommandExt: RethNodeCommandConfig + fmt::Debug + clap::Args {}
// blanket impl for all types that implement the required traits.
impl<T> RethNodeCommandExt for T where T: RethNodeCommandConfig + fmt::Debug + clap::Args {}
/// The default configuration for the reth node command.
///
/// This is a convenience type for [NoArgs<()>].
#[derive(Debug, Clone, Copy, Default, Args)]
#[non_exhaustive]
pub struct DefaultRethNodeCommandConfig;
impl RethNodeCommandConfig for DefaultRethNodeCommandConfig {}
impl RethNodeCommandConfig for () {}
/// A helper type for [RethCliExt] extension that don't require any additional clap Arguments.
#[derive(Debug, Clone, Copy)]
pub struct NoArgsCliExt<Conf>(PhantomData<Conf>);
impl<Conf: RethNodeCommandConfig> RethCliExt for NoArgsCliExt<Conf> {
type Node = NoArgs<Conf>;
}
/// A helper struct that allows for wrapping a [RethNodeCommandConfig] value without providing
/// additional CLI arguments.
///
/// Note: This type must be manually filled with a [RethNodeCommandConfig] manually before executing
/// the reth node command.
#[derive(Debug, Clone, Copy, Default, Args)]
pub struct NoArgs<T = ()> {
#[arg(skip)]
inner: Option<T>,
}
impl<T> NoArgs<T> {
/// Creates a new instance of the wrapper type.
pub fn with(inner: T) -> Self {
Self { inner: Some(inner) }
}
/// Sets the inner value.
pub fn set(&mut self, inner: T) {
self.inner = Some(inner)
}
/// Transforms the configured value.
pub fn map<U>(self, inner: U) -> NoArgs<U> {
NoArgs::with(inner)
}
/// Returns the inner value if it exists.
pub fn inner(&self) -> Option<&T> {
self.inner.as_ref()
}
/// Returns a mutable reference to the inner value if it exists.
pub fn inner_mut(&mut self) -> Option<&mut T> {
self.inner.as_mut()
}
/// Consumes the wrapper and returns the inner value if it exists.
pub fn into_inner(self) -> Option<T> {
self.inner
}
}
impl<T: RethNodeCommandConfig> RethNodeCommandConfig for NoArgs<T> {
fn configure_network<Conf, Reth>(
&mut self,
config: &mut Conf,
components: &Reth,
) -> eyre::Result<()>
where
Conf: RethNetworkConfig,
Reth: RethNodeComponents,
{
if let Some(conf) = self.inner_mut() {
conf.configure_network(config, components)
} else {
Ok(())
}
}
fn on_components_initialized<Reth: RethNodeComponents>(
&mut self,
components: &Reth,
) -> eyre::Result<()> {
if let Some(conf) = self.inner_mut() {
conf.on_components_initialized(components)
} else {
Ok(())
}
}
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
if let Some(conf) = self.inner_mut() {
conf.on_node_started(components)
} else {
Ok(())
}
}
fn on_rpc_server_started<Conf, Reth>(
&mut self,
config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
handles: RethRpcServerHandles,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
if let Some(conf) = self.inner_mut() {
conf.on_rpc_server_started(config, components, rpc_components, handles)
} else {
Ok(())
}
}
fn extend_rpc_modules<Conf, Reth>(
&mut self,
config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
if let Some(conf) = self.inner_mut() {
conf.extend_rpc_modules(config, components, rpc_components)
} else {
Ok(())
}
}
fn spawn_payload_builder_service<Conf, Reth, Builder, Engine>(
&mut self,
conf: &Conf,
components: &Reth,
payload_builder: Builder,
) -> eyre::Result<PayloadBuilderHandle<Engine>>
where
Conf: PayloadBuilderConfig,
Reth: RethNodeComponents,
Engine: EngineTypes + 'static,
Builder: PayloadBuilder<
Reth::Pool,
Reth::Provider,
Attributes = Engine::PayloadBuilderAttributes,
BuiltPayload = Engine::BuiltPayload,
> + Unpin
+ 'static,
{
self.inner_mut()
.ok_or_else(|| eyre::eyre!("config value must be set"))?
.spawn_payload_builder_service(conf, components, payload_builder)
}
}
impl<T> From<T> for NoArgs<T> {
fn from(value: T) -> Self {
Self::with(value)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn assert_ext<T: RethNodeCommandExt>() {}
#[test]
fn ensure_ext() {
assert_ext::<DefaultRethNodeCommandConfig>();
assert_ext::<NoArgs<()>>();
}
}

View File

@ -1,7 +1,4 @@
//! Types for the CLI.
//! Additional CLI configuration support.
pub mod components;
pub mod config;
pub mod db_type;
pub mod ext;
pub mod runner;

View File

@ -10,6 +10,7 @@
pub mod args;
pub mod cli;
pub mod dirs;
pub mod engine_api_store;
pub mod events;
pub mod exit;
pub mod init;

View File

@ -5,8 +5,8 @@ use crate::{
get_secret_key, DatabaseArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs,
PruningArgs, RpcServerArgs, TxPoolArgs,
},
cli::{config::RethTransactionPoolConfig, db_type::DatabaseBuilder},
dirs::{ChainPath, DataDirPath, MaybePlatformPath},
cli::config::RethTransactionPoolConfig,
dirs::{ChainPath, DataDirPath},
metrics::prometheus_exporter,
utils::{get_single_header, write_peers_to_file},
};
@ -142,9 +142,6 @@ pub static PROMETHEUS_RECORDER_HANDLE: Lazy<PrometheusHandle> =
/// ```
#[derive(Debug, Clone)]
pub struct NodeConfig {
/// The test database
pub database: DatabaseBuilder,
/// The path to the configuration file to use.
pub config: Option<PathBuf>,
@ -199,17 +196,12 @@ pub struct NodeConfig {
/// All pruning related arguments
pub pruning: PruningArgs,
/// Rollup related arguments
#[cfg(feature = "optimism")]
pub rollup: crate::args::RollupArgs,
}
impl NodeConfig {
/// Creates a testing [NodeConfig], causing the database to be launched ephemerally.
pub fn test() -> Self {
let mut test = Self {
database: DatabaseBuilder::test(),
config: None,
chain: MAINNET.clone(),
metrics: None,
@ -223,8 +215,6 @@ impl NodeConfig {
db: DatabaseArgs::default(),
dev: DevArgs::default(),
pruning: PruningArgs::default(),
#[cfg(feature = "optimism")]
rollup: crate::args::RollupArgs::default(),
};
// set all ports to zero by default for test instances
@ -232,9 +222,9 @@ impl NodeConfig {
test
}
/// Set the datadir for the node
pub fn with_datadir(mut self, datadir: MaybePlatformPath<DataDirPath>) -> Self {
self.database = DatabaseBuilder::Real(datadir);
/// Sets --dev mode for the node
pub const fn dev(mut self) -> Self {
self.dev.dev = true;
self
}
@ -316,13 +306,6 @@ impl NodeConfig {
self
}
/// Set the rollup args for the node
#[cfg(feature = "optimism")]
pub fn with_rollup(mut self, rollup: crate::args::RollupArgs) -> Self {
self.rollup = rollup;
self
}
/// Get the network secret from the given data dir
pub fn network_secret(&self, data_dir: &ChainPath<DataDirPath>) -> eyre::Result<SecretKey> {
let network_secret_path =
@ -392,6 +375,28 @@ impl NodeConfig {
}
}
/// Create the [NetworkConfig] for the node
pub fn network_config<C>(
&self,
config: &Config,
client: C,
executor: TaskExecutor,
head: Head,
data_dir: &ChainPath<DataDirPath>,
) -> eyre::Result<NetworkConfig<C>> {
info!(target: "reth::cli", "Connecting to P2P network");
let secret_key = self.network_secret(data_dir)?;
let default_peers_path = data_dir.known_peers_path();
Ok(self.load_network_config(
config,
client,
executor.clone(),
head,
secret_key,
default_peers_path.clone(),
))
}
/// Create the [NetworkBuilder].
///
/// This only configures it and does not spawn it.
@ -406,18 +411,7 @@ impl NodeConfig {
where
C: BlockNumReader,
{
info!(target: "reth::cli", "Connecting to P2P network");
let secret_key = self.network_secret(data_dir)?;
let default_peers_path = data_dir.known_peers_path();
let network_config = self.load_network_config(
config,
client,
executor.clone(),
head,
secret_key,
default_peers_path.clone(),
);
let network_config = self.network_config(config, client, executor, head, data_dir)?;
let builder = NetworkManager::builder(network_config).await?;
Ok(builder)
}
@ -773,14 +767,6 @@ impl NodeConfig {
self.network.port + self.instance - 1,
)));
// When `sequencer_endpoint` is configured, the node will forward all transactions to a
// Sequencer node for execution and inclusion on L1, and disable its own txpool
// gossip to prevent other parties in the network from learning about them.
#[cfg(feature = "optimism")]
let cfg_builder = cfg_builder
.sequencer_endpoint(self.rollup.sequencer_http.clone())
.disable_tx_gossip(self.rollup.disable_txpool_gossip);
cfg_builder.build(client)
}
@ -914,7 +900,6 @@ impl NodeConfig {
impl Default for NodeConfig {
fn default() -> Self {
Self {
database: DatabaseBuilder::default(),
config: None,
chain: MAINNET.clone(),
metrics: None,
@ -928,8 +913,6 @@ impl Default for NodeConfig {
db: DatabaseArgs::default(),
dev: DevArgs::default(),
pruning: PruningArgs::default(),
#[cfg(feature = "optimism")]
rollup: crate::args::RollupArgs::default(),
}
}
}

View File

@ -25,14 +25,12 @@ reth-transaction-pool.workspace = true
reth-network.workspace = true
revm.workspace = true
# async
async-trait.workspace = true
# misc
eyre.workspace = true
serde.workspace = true
[dev-dependencies]
reth-db.workspace = true
[features]
# This is a workaround for reth-cli crate to allow this as mandatory dependency without breaking the build even if unused.
# This makes managing features and testing workspace easier because clippy always builds all members if --workspace is provided
optimism = []
reth-db.workspace = true

View File

@ -16,6 +16,5 @@ pub use engine::EthEngineTypes;
/// [ConfigureEvmEnv](reth_node_api::ConfigureEvmEnv) trait.
pub mod evm;
pub use evm::EthEvmConfig;
#[cfg(not(feature = "optimism"))]
pub mod node;
pub use node::EthereumNode;

View File

@ -5,7 +5,7 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera
use reth_network::NetworkHandle;
use reth_node_builder::{
components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder},
node::{FullNodeTypes, NodeTypes},
node::{FullNodeTypes, Node, NodeTypes},
BuilderContext, PayloadBuilderConfig,
};
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService};
@ -20,12 +20,11 @@ use reth_transaction_pool::{
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct EthereumNode;
// TODO make this stateful with evm config
impl EthereumNode {
/// Returns a [ComponentsBuilder] configured for a regular Ethereum node.
pub fn components<Node>(
) -> ComponentsBuilder<Node, EthereumPoolBuilder, EthereumPayloadBuilder, EthereumNetwork>
) -> ComponentsBuilder<Node, EthereumPoolBuilder, EthereumPayloadBuilder, EthereumNetworkBuilder>
where
Node: FullNodeTypes<Engine = EthEngineTypes>,
{
@ -33,7 +32,7 @@ impl EthereumNode {
.node_types::<Node>()
.pool(EthereumPoolBuilder::default())
.payload(EthereumPayloadBuilder::default())
.network(EthereumNetwork::default())
.network(EthereumNetworkBuilder::default())
}
}
@ -43,7 +42,26 @@ impl NodeTypes for EthereumNode {
type Evm = EthEvmConfig;
fn evm_config(&self) -> Self::Evm {
todo!()
EthEvmConfig::default()
}
}
impl<N> Node<N> for EthereumNode
where
N: FullNodeTypes<Engine = EthEngineTypes>,
{
type PoolBuilder = EthereumPoolBuilder;
type NetworkBuilder = EthereumNetworkBuilder;
type PayloadBuilder = EthereumPayloadBuilder;
fn components(
self,
) -> ComponentsBuilder<N, Self::PoolBuilder, Self::PayloadBuilder, Self::NetworkBuilder> {
ComponentsBuilder::default()
.node_types::<N>()
.pool(EthereumPoolBuilder::default())
.payload(EthereumPayloadBuilder::default())
.network(EthereumNetworkBuilder::default())
}
}
@ -63,7 +81,7 @@ where
{
type Pool = EthTransactionPool<Node::Provider, DiskFileBlobStore>;
fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.data_dir();
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
@ -128,7 +146,7 @@ where
Node: FullNodeTypes<Engine = EthEngineTypes>,
Pool: TransactionPool + Unpin + 'static,
{
fn spawn_payload_service(
async fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
@ -162,17 +180,21 @@ where
/// A basic ethereum payload service.
#[derive(Debug, Default, Clone, Copy)]
pub struct EthereumNetwork {
pub struct EthereumNetworkBuilder {
// TODO add closure to modify network
}
impl<Node, Pool> NetworkBuilder<Node, Pool> for EthereumNetwork
impl<Node, Pool> NetworkBuilder<Node, Pool> for EthereumNetworkBuilder
where
Node: FullNodeTypes,
Pool: TransactionPool + Unpin + 'static,
{
fn build_network(self, ctx: &BuilderContext<Node>, pool: Pool) -> eyre::Result<NetworkHandle> {
let network = ctx.network_builder_blocking()?;
async fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<NetworkHandle> {
let network = ctx.network_builder().await?;
let handle = ctx.start_network(network, pool);
Ok(handle)

View File

@ -1,4 +1,3 @@
#[cfg(not(feature = "optimism"))]
mod builder;
fn main() {}

View File

@ -27,7 +27,8 @@ reth-transaction-pool.workspace = true
reth-network.workspace = true
revm.workspace = true
# io
# misc
clap.workspace = true
serde.workspace = true
eyre.workspace = true

View File

@ -1,4 +1,6 @@
//! clap [Args](clap::Args) for op-reth rollup configuration
//! Additional Node command arguments.
//! clap [Args](clap::Args) for optimism rollup configuration
/// Parameters for rollup configuration
#[derive(Debug, Clone, Default, PartialEq, Eq, clap::Args)]
@ -16,6 +18,17 @@ pub struct RollupArgs {
/// prior to beginning normal syncing.
#[arg(long = "rollup.enable-genesis-walkback")]
pub enable_genesis_walkback: bool,
/// By default the pending block equals the latest block
/// to save resources and not leak txs from the tx-pool,
/// this flag enables computing of the pending block
/// from the tx-pool instead.
///
/// If `compute_pending_block` is not enabled, the payload builder
/// will use the payload attributes from the latest block. Note
/// that this flag is not yet functional.
#[arg(long = "rollup.compute-pending-block")]
pub compute_pending_block: bool,
}
#[cfg(test)]

View File

@ -19,5 +19,6 @@ pub use engine::OptimismEngineTypes;
pub mod evm;
pub use evm::OptimismEvmConfig;
pub mod args;
pub mod node;
pub use node::OptimismNode;

View File

@ -1,12 +1,12 @@
//! Optimism Node types config.
use crate::{OptimismEngineTypes, OptimismEvmConfig};
use crate::{args::RollupArgs, OptimismEngineTypes, OptimismEvmConfig};
use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig};
use reth_network::NetworkHandle;
use reth_network::{NetworkHandle, NetworkManager};
use reth_node_builder::{
components::{ComponentsBuilder, NetworkBuilder, PayloadServiceBuilder, PoolBuilder},
node::{FullNodeTypes, NodeTypes},
BuilderContext, PayloadBuilderConfig,
BuilderContext, Node, PayloadBuilderConfig,
};
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService};
use reth_provider::CanonStateSubscriptions;
@ -17,23 +17,48 @@ use reth_transaction_pool::{
};
/// Type configuration for a regular Optimism node.
#[derive(Debug, Default, Clone, Copy)]
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct OptimismNode;
// TODO make this stateful with evm config
pub struct OptimismNode {
/// Additional Optimism args
pub args: RollupArgs,
}
impl OptimismNode {
/// Returns a [`ComponentsBuilder`] configured for a regular Ethereum node.
/// Creates a new instance of the Optimism node type.
pub const fn new(args: RollupArgs) -> Self {
Self { args }
}
/// Returns the components for the given [RollupArgs].
pub fn components<Node>(
) -> ComponentsBuilder<Node, OptimismPoolBuilder, OptimismPayloadBuilder, OptimismNetwork>
args: RollupArgs,
) -> ComponentsBuilder<Node, OptimismPoolBuilder, OptimismPayloadBuilder, OptimismNetworkBuilder>
where
Node: FullNodeTypes<Engine = OptimismEngineTypes>,
{
let RollupArgs { sequencer_http, disable_txpool_gossip, compute_pending_block, .. } = args;
ComponentsBuilder::default()
.node_types::<Node>()
.pool(OptimismPoolBuilder::default())
.payload(OptimismPayloadBuilder::default())
.network(OptimismNetwork)
.payload(OptimismPayloadBuilder::new(compute_pending_block))
.network(OptimismNetworkBuilder { sequencer_http, disable_txpool_gossip })
}
}
impl<N> Node<N> for OptimismNode
where
N: FullNodeTypes<Engine = OptimismEngineTypes>,
{
type PoolBuilder = OptimismPoolBuilder;
type NetworkBuilder = OptimismNetworkBuilder;
type PayloadBuilder = OptimismPayloadBuilder;
fn components(
self,
) -> ComponentsBuilder<N, Self::PoolBuilder, Self::PayloadBuilder, Self::NetworkBuilder> {
let Self { args } = self;
Self::components(args)
}
}
@ -43,7 +68,7 @@ impl NodeTypes for OptimismNode {
type Evm = OptimismEvmConfig;
fn evm_config(&self) -> Self::Evm {
todo!()
OptimismEvmConfig::default()
}
}
@ -53,9 +78,7 @@ impl NodeTypes for OptimismNode {
/// config.
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct OptimismPoolBuilder {
// TODO add options for txpool args
}
pub struct OptimismPoolBuilder;
impl<Node> PoolBuilder<Node> for OptimismPoolBuilder
where
@ -63,7 +86,7 @@ where
{
type Pool = EthTransactionPool<Node::Provider, DiskFileBlobStore>;
fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.data_dir();
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
@ -118,29 +141,47 @@ where
}
}
/// A basic optimism payload service.
/// A basic optimism payload service builder
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct OptimismPayloadBuilder;
pub struct OptimismPayloadBuilder {
/// By default the pending block equals the latest block
/// to save resources and not leak txs from the tx-pool,
/// this flag enables computing of the pending block
/// from the tx-pool instead.
///
/// If `compute_pending_block` is not enabled, the payload builder
/// will use the payload attributes from the latest block. Note
/// that this flag is not yet functional.
pub compute_pending_block: bool,
}
impl OptimismPayloadBuilder {
/// Create a new instance with the given `compute_pending_block` flag.
pub const fn new(compute_pending_block: bool) -> Self {
Self { compute_pending_block }
}
}
impl<Node, Pool> PayloadServiceBuilder<Node, Pool> for OptimismPayloadBuilder
where
Node: FullNodeTypes<Engine = OptimismEngineTypes>,
Pool: TransactionPool + Unpin + 'static,
{
fn spawn_payload_service(
async fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<PayloadBuilderHandle<Node::Engine>> {
let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::default();
let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::default()
.set_compute_pending_block(self.compute_pending_block);
let conf = ctx.payload_builder_config();
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval())
.deadline(conf.deadline())
.max_payload_tasks(conf.max_payload_tasks())
.extradata(conf.extradata_rlp_bytes())
// no extradata for OP
.extradata(Default::default())
.max_gas_limit(conf.max_gas_limit());
let payload_generator = BasicPayloadJobGenerator::with_builder(
@ -160,17 +201,36 @@ where
}
}
/// A basic ethereum payload service.
#[derive(Debug, Default, Clone, Copy)]
pub struct OptimismNetwork;
/// A basic optimism network builder.
#[derive(Debug, Default, Clone)]
pub struct OptimismNetworkBuilder {
/// HTTP endpoint for the sequencer mempool
pub sequencer_http: Option<String>,
/// Disable transaction pool gossip
pub disable_txpool_gossip: bool,
}
impl<Node, Pool> NetworkBuilder<Node, Pool> for OptimismNetwork
impl<Node, Pool> NetworkBuilder<Node, Pool> for OptimismNetworkBuilder
where
Node: FullNodeTypes,
Pool: TransactionPool + Unpin + 'static,
{
fn build_network(self, ctx: &BuilderContext<Node>, pool: Pool) -> eyre::Result<NetworkHandle> {
let network = ctx.network_builder_blocking()?;
async fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<NetworkHandle> {
let Self { sequencer_http, disable_txpool_gossip } = self;
let mut network_config = ctx.network_config()?;
// When `sequencer_endpoint` is configured, the node will forward all transactions to a
// Sequencer node for execution and inclusion on L1, and disable its own txpool
// gossip to prevent other parties in the network from learning about them.
network_config.tx_gossip_disabled = disable_txpool_gossip;
network_config.optimism_network_config.sequencer_endpoint = sequencer_http;
let network = NetworkManager::builder(network_config).await?;
let handle = ctx.start_network(network, pool);
Ok(handle)

View File

@ -12,7 +12,7 @@ fn test_basic_setup() {
let _builder = NodeBuilder::new(config)
.with_database(db)
.with_types(OptimismNode::default())
.with_components(OptimismNode::components())
.with_components(OptimismNode::components(Default::default()))
.on_component_initialized(move |ctx| {
let _provider = ctx.provider();
Ok(())

View File

@ -24,9 +24,4 @@ reth-basic-payload-builder.workspace = true
revm.workspace = true
# misc
tracing.workspace = true
[features]
# This is a workaround for reth-cli crate to allow this as mandatory dependency without breaking the build even if unused.
# This makes managing features and testing workspace easier because clippy always builds all members if --workspace is provided
optimism = []
tracing.workspace = true

View File

@ -7,90 +7,84 @@
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#[cfg(not(feature = "optimism"))]
pub use builder::*;
use reth_basic_payload_builder::{
commit_withdrawals, is_better_payload, pre_block_beacon_root_contract_call, BuildArguments,
BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome,
};
use reth_payload_builder::{
error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes,
};
use reth_primitives::{
constants::{
eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS,
},
eip4844::calculate_excess_blob_gas,
proofs,
revm::env::tx_env_with_recovered,
Block, Header, IntoRecoveredTransaction, Receipt, Receipts, EMPTY_OMMER_ROOT_HASH, U256,
};
use reth_provider::{BundleStateWithReceipts, StateProviderFactory};
use reth_revm::database::StateProviderDatabase;
use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool};
use revm::{
db::states::bundle_state::BundleRetention,
primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState},
DatabaseCommit, State,
};
use tracing::{debug, trace, warn};
#[cfg(not(feature = "optimism"))]
mod builder {
use reth_basic_payload_builder::{
commit_withdrawals, is_better_payload, pre_block_beacon_root_contract_call, BuildArguments,
BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome,
};
use reth_payload_builder::{
error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes,
};
use reth_primitives::{
constants::{
eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS,
},
eip4844::calculate_excess_blob_gas,
proofs,
revm::env::tx_env_with_recovered,
Block, Header, IntoRecoveredTransaction, Receipt, Receipts, EMPTY_OMMER_ROOT_HASH, U256,
};
use reth_provider::{BundleStateWithReceipts, StateProviderFactory};
use reth_revm::database::StateProviderDatabase;
use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool};
use revm::{
db::states::bundle_state::BundleRetention,
primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState},
DatabaseCommit, State,
};
use tracing::{debug, trace, warn};
/// Ethereum payload builder
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[non_exhaustive]
pub struct EthereumPayloadBuilder;
/// Ethereum payload builder
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[non_exhaustive]
pub struct EthereumPayloadBuilder;
// Default implementation of [PayloadBuilder] for unit type
impl<Pool, Client> PayloadBuilder<Pool, Client> for EthereumPayloadBuilder
where
Client: StateProviderFactory,
Pool: TransactionPool,
{
type Attributes = EthPayloadBuilderAttributes;
type BuiltPayload = EthBuiltPayload;
// Default implementation of [PayloadBuilder] for unit type
impl<Pool, Client> PayloadBuilder<Pool, Client> for EthereumPayloadBuilder
where
Client: StateProviderFactory,
Pool: TransactionPool,
{
type Attributes = EthPayloadBuilderAttributes;
type BuiltPayload = EthBuiltPayload;
fn try_build(
&self,
args: BuildArguments<Pool, Client, EthPayloadBuilderAttributes, EthBuiltPayload>,
) -> Result<BuildOutcome<EthBuiltPayload>, PayloadBuilderError> {
default_ethereum_payload_builder(args)
}
fn try_build(
&self,
args: BuildArguments<Pool, Client, EthPayloadBuilderAttributes, EthBuiltPayload>,
) -> Result<BuildOutcome<EthBuiltPayload>, PayloadBuilderError> {
default_ethereum_payload_builder(args)
}
fn build_empty_payload(
client: &Client,
config: PayloadConfig<Self::Attributes>,
) -> Result<EthBuiltPayload, PayloadBuilderError> {
let extra_data = config.extra_data();
let PayloadConfig {
initialized_block_env,
parent_block,
attributes,
chain_spec,
initialized_cfg,
..
} = config;
fn build_empty_payload(
client: &Client,
config: PayloadConfig<Self::Attributes>,
) -> Result<EthBuiltPayload, PayloadBuilderError> {
let extra_data = config.extra_data();
let PayloadConfig {
initialized_block_env,
parent_block,
attributes,
chain_spec,
initialized_cfg,
..
} = config;
debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload");
debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload");
let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| {
let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| {
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload");
err
})?;
let mut db = State::builder()
.with_database_boxed(Box::new(StateProviderDatabase::new(&state)))
.with_bundle_update()
.build();
let mut db = State::builder()
.with_database_boxed(Box::new(StateProviderDatabase::new(&state)))
.with_bundle_update()
.build();
let base_fee = initialized_block_env.basefee.to::<u64>();
let block_number = initialized_block_env.number.to::<u64>();
let block_gas_limit: u64 =
initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX);
let base_fee = initialized_block_env.basefee.to::<u64>();
let block_number = initialized_block_env.number.to::<u64>();
let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX);
// apply eip-4788 pre block contract call
pre_block_beacon_root_contract_call(
// apply eip-4788 pre block contract call
pre_block_beacon_root_contract_call(
&mut db,
&chain_spec,
block_number,
@ -102,277 +96,28 @@ mod builder {
err
})?;
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals.clone()).map_err(|err| {
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload");
err
})?;
// merge all transitions into bundle state, this would apply the withdrawal balance
// changes and 4788 contract call
db.merge_transitions(BundleRetention::PlainState);
// merge all transitions into bundle state, this would apply the withdrawal balance
// changes and 4788 contract call
db.merge_transitions(BundleRetention::PlainState);
// calculate the state root
let bundle_state =
BundleStateWithReceipts::new(db.take_bundle(), Receipts::new(), block_number);
let state_root = state.state_root(&bundle_state).map_err(|err| {
// calculate the state root
let bundle_state =
BundleStateWithReceipts::new(db.take_bundle(), Receipts::new(), block_number);
let state_root = state.state_root(&bundle_state).map_err(|err| {
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload");
err
})?;
let mut excess_blob_gas = None;
let mut blob_gas_used = None;
if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) {
excess_blob_gas = if chain_spec
.is_cancun_active_at_timestamp(parent_block.timestamp)
{
let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default();
let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default();
Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used))
} else {
// for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0
Some(calculate_excess_blob_gas(0, 0))
};
blob_gas_used = Some(0);
}
let header = Header {
parent_hash: parent_block.hash(),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: initialized_block_env.coinbase,
state_root,
transactions_root: EMPTY_TRANSACTIONS,
withdrawals_root,
receipts_root: EMPTY_RECEIPTS,
logs_bloom: Default::default(),
timestamp: attributes.timestamp,
mix_hash: attributes.prev_randao,
nonce: BEACON_NONCE,
base_fee_per_gas: Some(base_fee),
number: parent_block.number + 1,
gas_limit: block_gas_limit,
difficulty: U256::ZERO,
gas_used: 0,
extra_data,
blob_gas_used,
excess_blob_gas,
parent_beacon_block_root: attributes.parent_beacon_block_root,
};
let block = Block { header, body: vec![], ommers: vec![], withdrawals };
let sealed_block = block.seal_slow();
Ok(EthBuiltPayload::new(attributes.payload_id(), sealed_block, U256::ZERO))
}
}
/// Constructs an Ethereum transaction payload using the best transactions from the pool.
///
/// Given build arguments including an Ethereum client, transaction pool,
/// and configuration, this function creates a transaction payload. Returns
/// a result indicating success with the payload or an error in case of failure.
#[inline]
pub fn default_ethereum_payload_builder<Pool, Client>(
args: BuildArguments<Pool, Client, EthPayloadBuilderAttributes, EthBuiltPayload>,
) -> Result<BuildOutcome<EthBuiltPayload>, PayloadBuilderError>
where
Client: StateProviderFactory,
Pool: TransactionPool,
{
let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args;
let state_provider = client.state_by_block_hash(config.parent_block.hash())?;
let state = StateProviderDatabase::new(&state_provider);
let mut db = State::builder()
.with_database_ref(cached_reads.as_db(&state))
.with_bundle_update()
.build();
let extra_data = config.extra_data();
let PayloadConfig {
initialized_block_env,
initialized_cfg,
parent_block,
attributes,
chain_spec,
..
} = config;
debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload");
let mut cumulative_gas_used = 0;
let mut sum_blob_gas_used = 0;
let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX);
let base_fee = initialized_block_env.basefee.to::<u64>();
let mut executed_txs = Vec::new();
let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new(
base_fee,
initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64),
));
let mut total_fees = U256::ZERO;
let block_number = initialized_block_env.number.to::<u64>();
// apply eip-4788 pre block contract call
pre_block_beacon_root_contract_call(
&mut db,
&chain_spec,
block_number,
&initialized_cfg,
&initialized_block_env,
&attributes,
)?;
let mut receipts = Vec::new();
while let Some(pool_tx) = best_txs.next() {
// ensure we still have capacity for this transaction
if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit {
// we can't fit this transaction into the block, so we need to mark it as invalid
// which also removes all dependent transaction from the iterator before we can
// continue
best_txs.mark_invalid(&pool_tx);
continue
}
// check if the job was cancelled, if so we can exit early
if cancel.is_cancelled() {
return Ok(BuildOutcome::Cancelled)
}
// convert tx to a signed transaction
let tx = pool_tx.to_recovered_transaction();
// There's only limited amount of blob space available per block, so we need to check if
// the EIP-4844 can still fit in the block
if let Some(blob_tx) = tx.transaction.as_eip4844() {
let tx_blob_gas = blob_tx.blob_gas();
if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK {
// we can't fit this _blob_ transaction into the block, so we mark it as
// invalid, which removes its dependent transactions from
// the iterator. This is similar to the gas limit condition
// for regular transactions above.
trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block");
best_txs.mark_invalid(&pool_tx);
continue
}
}
// Configure the environment for the block.
let mut evm = revm::Evm::builder()
.with_db(&mut db)
.with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env(
initialized_cfg.clone(),
initialized_block_env.clone(),
tx_env_with_recovered(&tx),
))
.build();
let ResultAndState { result, state } = match evm.transact() {
Ok(res) => res,
Err(err) => {
match err {
EVMError::Transaction(err) => {
if matches!(err, InvalidTransaction::NonceTooLow { .. }) {
// if the nonce is too low, we can skip this transaction
trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction");
} else {
// if the transaction is invalid, we can skip it and all of its
// descendants
trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants");
best_txs.mark_invalid(&pool_tx);
}
continue
}
err => {
// this is an error that we should treat as fatal for this attempt
return Err(PayloadBuilderError::EvmExecutionError(err))
}
}
}
};
// drop evm so db is released.
drop(evm);
// commit changes
db.commit(state);
// add to the total blob gas used if the transaction successfully executed
if let Some(blob_tx) = tx.transaction.as_eip4844() {
let tx_blob_gas = blob_tx.blob_gas();
sum_blob_gas_used += tx_blob_gas;
// if we've reached the max data gas per block, we can skip blob txs entirely
if sum_blob_gas_used == MAX_DATA_GAS_PER_BLOCK {
best_txs.skip_blobs();
}
}
let gas_used = result.gas_used();
// add gas used by the transaction to cumulative gas used, before creating the receipt
cumulative_gas_used += gas_used;
// Push transaction changeset and calculate header bloom filter for receipt.
receipts.push(Some(Receipt {
tx_type: tx.tx_type(),
success: result.is_success(),
cumulative_gas_used,
logs: result.logs().into_iter().map(Into::into).collect(),
}));
// update add to total fees
let miner_fee = tx
.effective_tip_per_gas(Some(base_fee))
.expect("fee is always valid; execution succeeded");
total_fees += U256::from(miner_fee) * U256::from(gas_used);
// append transaction to the list of executed transactions
executed_txs.push(tx.into_signed());
}
// check if we have a better block
if !is_better_payload(best_payload.as_ref(), total_fees) {
// can skip building the block
return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads })
}
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?;
// merge all transitions into bundle state, this would apply the withdrawal balance changes
// and 4788 contract call
db.merge_transitions(BundleRetention::PlainState);
let bundle = BundleStateWithReceipts::new(
db.take_bundle(),
Receipts::from_vec(vec![receipts]),
block_number,
);
let receipts_root = bundle.receipts_root_slow(block_number).expect("Number is in range");
let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range");
// calculate the state root
let state_root = state_provider.state_root(&bundle)?;
// create the block header
let transactions_root = proofs::calculate_transaction_root(&executed_txs);
// initialize empty blob sidecars at first. If cancun is active then this will
let mut blob_sidecars = Vec::new();
let mut excess_blob_gas = None;
let mut blob_gas_used = None;
// only determine cancun fields when active
if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) {
// grab the blob sidecars from the executed txs
blob_sidecars = pool.get_all_blobs_exact(
executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(),
)?;
excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) {
let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default();
let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default();
@ -383,7 +128,7 @@ mod builder {
Some(calculate_excess_blob_gas(0, 0))
};
blob_gas_used = Some(sum_blob_gas_used);
blob_gas_used = Some(0);
}
let header = Header {
@ -391,10 +136,10 @@ mod builder {
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: initialized_block_env.coinbase,
state_root,
transactions_root,
receipts_root,
transactions_root: EMPTY_TRANSACTIONS,
withdrawals_root,
logs_bloom,
receipts_root: EMPTY_RECEIPTS,
logs_bloom: Default::default(),
timestamp: attributes.timestamp,
mix_hash: attributes.prev_randao,
nonce: BEACON_NONCE,
@ -402,24 +147,270 @@ mod builder {
number: parent_block.number + 1,
gas_limit: block_gas_limit,
difficulty: U256::ZERO,
gas_used: cumulative_gas_used,
gas_used: 0,
extra_data,
parent_beacon_block_root: attributes.parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
parent_beacon_block_root: attributes.parent_beacon_block_root,
};
// seal the block
let block = Block { header, body: executed_txs, ommers: vec![], withdrawals };
let block = Block { header, body: vec![], ommers: vec![], withdrawals };
let sealed_block = block.seal_slow();
debug!(target: "payload_builder", ?sealed_block, "sealed built block");
let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees);
// extend the payload with the blob sidecars from the executed txs
payload.extend_sidecars(blob_sidecars);
Ok(BuildOutcome::Better { payload, cached_reads })
Ok(EthBuiltPayload::new(attributes.payload_id(), sealed_block, U256::ZERO))
}
}
/// Constructs an Ethereum transaction payload using the best transactions from the pool.
///
/// Given build arguments including an Ethereum client, transaction pool,
/// and configuration, this function creates a transaction payload. Returns
/// a result indicating success with the payload or an error in case of failure.
#[inline]
pub fn default_ethereum_payload_builder<Pool, Client>(
args: BuildArguments<Pool, Client, EthPayloadBuilderAttributes, EthBuiltPayload>,
) -> Result<BuildOutcome<EthBuiltPayload>, PayloadBuilderError>
where
Client: StateProviderFactory,
Pool: TransactionPool,
{
let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args;
let state_provider = client.state_by_block_hash(config.parent_block.hash())?;
let state = StateProviderDatabase::new(&state_provider);
let mut db =
State::builder().with_database_ref(cached_reads.as_db(&state)).with_bundle_update().build();
let extra_data = config.extra_data();
let PayloadConfig {
initialized_block_env,
initialized_cfg,
parent_block,
attributes,
chain_spec,
..
} = config;
debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload");
let mut cumulative_gas_used = 0;
let mut sum_blob_gas_used = 0;
let block_gas_limit: u64 = initialized_block_env.gas_limit.try_into().unwrap_or(u64::MAX);
let base_fee = initialized_block_env.basefee.to::<u64>();
let mut executed_txs = Vec::new();
let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new(
base_fee,
initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64),
));
let mut total_fees = U256::ZERO;
let block_number = initialized_block_env.number.to::<u64>();
// apply eip-4788 pre block contract call
pre_block_beacon_root_contract_call(
&mut db,
&chain_spec,
block_number,
&initialized_cfg,
&initialized_block_env,
&attributes,
)?;
let mut receipts = Vec::new();
while let Some(pool_tx) = best_txs.next() {
// ensure we still have capacity for this transaction
if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit {
// we can't fit this transaction into the block, so we need to mark it as invalid
// which also removes all dependent transaction from the iterator before we can
// continue
best_txs.mark_invalid(&pool_tx);
continue
}
// check if the job was cancelled, if so we can exit early
if cancel.is_cancelled() {
return Ok(BuildOutcome::Cancelled)
}
// convert tx to a signed transaction
let tx = pool_tx.to_recovered_transaction();
// There's only limited amount of blob space available per block, so we need to check if
// the EIP-4844 can still fit in the block
if let Some(blob_tx) = tx.transaction.as_eip4844() {
let tx_blob_gas = blob_tx.blob_gas();
if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK {
// we can't fit this _blob_ transaction into the block, so we mark it as
// invalid, which removes its dependent transactions from
// the iterator. This is similar to the gas limit condition
// for regular transactions above.
trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block");
best_txs.mark_invalid(&pool_tx);
continue
}
}
// Configure the environment for the block.
let mut evm = revm::Evm::builder()
.with_db(&mut db)
.with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env(
initialized_cfg.clone(),
initialized_block_env.clone(),
tx_env_with_recovered(&tx),
))
.build();
let ResultAndState { result, state } = match evm.transact() {
Ok(res) => res,
Err(err) => {
match err {
EVMError::Transaction(err) => {
if matches!(err, InvalidTransaction::NonceTooLow { .. }) {
// if the nonce is too low, we can skip this transaction
trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction");
} else {
// if the transaction is invalid, we can skip it and all of its
// descendants
trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants");
best_txs.mark_invalid(&pool_tx);
}
continue
}
err => {
// this is an error that we should treat as fatal for this attempt
return Err(PayloadBuilderError::EvmExecutionError(err))
}
}
}
};
// drop evm so db is released.
drop(evm);
// commit changes
db.commit(state);
// add to the total blob gas used if the transaction successfully executed
if let Some(blob_tx) = tx.transaction.as_eip4844() {
let tx_blob_gas = blob_tx.blob_gas();
sum_blob_gas_used += tx_blob_gas;
// if we've reached the max data gas per block, we can skip blob txs entirely
if sum_blob_gas_used == MAX_DATA_GAS_PER_BLOCK {
best_txs.skip_blobs();
}
}
let gas_used = result.gas_used();
// add gas used by the transaction to cumulative gas used, before creating the receipt
cumulative_gas_used += gas_used;
// Push transaction changeset and calculate header bloom filter for receipt.
#[allow(clippy::needless_update)] // side-effect of optimism fields
receipts.push(Some(Receipt {
tx_type: tx.tx_type(),
success: result.is_success(),
cumulative_gas_used,
logs: result.logs().into_iter().map(Into::into).collect(),
..Default::default()
}));
// update add to total fees
let miner_fee = tx
.effective_tip_per_gas(Some(base_fee))
.expect("fee is always valid; execution succeeded");
total_fees += U256::from(miner_fee) * U256::from(gas_used);
// append transaction to the list of executed transactions
executed_txs.push(tx.into_signed());
}
// check if we have a better block
if !is_better_payload(best_payload.as_ref(), total_fees) {
// can skip building the block
return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads })
}
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?;
// merge all transitions into bundle state, this would apply the withdrawal balance changes
// and 4788 contract call
db.merge_transitions(BundleRetention::PlainState);
let bundle = BundleStateWithReceipts::new(
db.take_bundle(),
Receipts::from_vec(vec![receipts]),
block_number,
);
let receipts_root = bundle.receipts_root_slow(block_number).expect("Number is in range");
let logs_bloom = bundle.block_logs_bloom(block_number).expect("Number is in range");
// calculate the state root
let state_root = state_provider.state_root(&bundle)?;
// create the block header
let transactions_root = proofs::calculate_transaction_root(&executed_txs);
// initialize empty blob sidecars at first. If cancun is active then this will
let mut blob_sidecars = Vec::new();
let mut excess_blob_gas = None;
let mut blob_gas_used = None;
// only determine cancun fields when active
if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) {
// grab the blob sidecars from the executed txs
blob_sidecars = pool.get_all_blobs_exact(
executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(),
)?;
excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) {
let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default();
let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default();
Some(calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used))
} else {
// for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0
Some(calculate_excess_blob_gas(0, 0))
};
blob_gas_used = Some(sum_blob_gas_used);
}
let header = Header {
parent_hash: parent_block.hash(),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: initialized_block_env.coinbase,
state_root,
transactions_root,
receipts_root,
withdrawals_root,
logs_bloom,
timestamp: attributes.timestamp,
mix_hash: attributes.prev_randao,
nonce: BEACON_NONCE,
base_fee_per_gas: Some(base_fee),
number: parent_block.number + 1,
gas_limit: block_gas_limit,
difficulty: U256::ZERO,
gas_used: cumulative_gas_used,
extra_data,
parent_beacon_block_root: attributes.parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
};
// seal the block
let block = Block { header, body: executed_txs, ommers: vec![], withdrawals };
let sealed_block = block.seal_slow();
debug!(target: "payload_builder", ?sealed_block, "sealed built block");
let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees);
// extend the payload with the blob sidecars from the executed txs
payload.extend_sidecars(blob_sidecars);
Ok(BuildOutcome::Better { payload, cached_reads })
}

View File

@ -503,7 +503,7 @@ mod builder {
block_number,
);
let receipts_root = bundle
.receipts_root_slow(
.optimism_receipts_root_slow(
block_number,
chain_spec.as_ref(),
attributes.payload_attributes.timestamp,

View File

@ -1,7 +1,3 @@
#[cfg(not(feature = "optimism"))]
use crate::proofs::calculate_receipt_root_ref;
#[cfg(feature = "optimism")]
use crate::proofs::calculate_receipt_root_ref_optimism;
use crate::{
compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR},
logs_bloom, Bloom, Log, PruneSegmentError, TxType, B256,
@ -97,22 +93,21 @@ impl Receipts {
}
/// Retrieves the receipt root for all recorded receipts from index.
#[cfg(not(feature = "optimism"))]
pub fn root_slow(&self, index: usize) -> Option<B256> {
Some(calculate_receipt_root_ref(
Some(crate::proofs::calculate_receipt_root_ref(
&self.receipt_vec[index].iter().map(Option::as_ref).collect::<Option<Vec<_>>>()?,
))
}
/// Retrieves the receipt root for all recorded receipts from index.
#[cfg(feature = "optimism")]
pub fn root_slow(
pub fn optimism_root_slow(
&self,
index: usize,
chain_spec: &crate::ChainSpec,
timestamp: u64,
) -> Option<B256> {
Some(calculate_receipt_root_ref_optimism(
Some(crate::proofs::calculate_receipt_root_ref_optimism(
&self.receipt_vec[index].iter().map(Option::as_ref).collect::<Option<Vec<_>>>()?,
chain_spec,
timestamp,

View File

@ -215,15 +215,18 @@ impl PendingBlockEnv {
block_number,
);
#[cfg(feature = "optimism")]
let receipts_root = bundle
.receipts_root_slow(
.optimism_receipts_root_slow(
block_number,
#[cfg(feature = "optimism")]
chain_spec.as_ref(),
#[cfg(feature = "optimism")]
block_env.timestamp.to::<u64>(),
)
.expect("Block is present");
#[cfg(not(feature = "optimism"))]
let receipts_root = bundle.receipts_root_slow(block_number).expect("Block is present");
let logs_bloom = bundle.block_logs_bloom(block_number).expect("Block is present");
// calculate the state root

View File

@ -163,8 +163,11 @@ impl BundleStateWithReceipts {
/// Returns the receipt root for all recorded receipts.
/// Note: this function calculated Bloom filters for every receipt and created merkle trees
/// of receipt. This is a expensive operation.
#[cfg(not(feature = "optimism"))]
#[allow(unused_variables)]
pub fn receipts_root_slow(&self, block_number: BlockNumber) -> Option<B256> {
#[cfg(feature = "optimism")]
panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead.");
#[cfg(not(feature = "optimism"))]
self.receipts.root_slow(self.block_number_to_index(block_number)?)
}
@ -172,13 +175,17 @@ impl BundleStateWithReceipts {
/// Note: this function calculated Bloom filters for every receipt and created merkle trees
/// of receipt. This is a expensive operation.
#[cfg(feature = "optimism")]
pub fn receipts_root_slow(
pub fn optimism_receipts_root_slow(
&self,
block_number: BlockNumber,
chain_spec: &reth_primitives::ChainSpec,
timestamp: u64,
) -> Option<B256> {
self.receipts.root_slow(self.block_number_to_index(block_number)?, chain_spec, timestamp)
self.receipts.optimism_root_slow(
self.block_number_to_index(block_number)?,
chain_spec,
timestamp,
)
}
/// Returns reference to receipts.

View File

@ -298,12 +298,6 @@ impl TaskExecutor {
&self.on_shutdown
}
/// Runs a future to completion on this Handle's associated Runtime.
#[track_caller]
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
self.handle.block_on(future)
}
/// Spawns a future on the tokio runtime depending on the [TaskKind]
fn spawn_on_rt<F>(&self, fut: F, task_kind: TaskKind) -> JoinHandle<()>
where

View File

@ -8,6 +8,7 @@ license.workspace = true
[dependencies]
reth.workspace = true
reth-transaction-pool.workspace = true
reth-node-ethereum.workspace = true
clap = { workspace = true, features = ["derive"] }
jsonrpsee = { workspace = true, features = ["server", "macros"] }

View File

@ -14,24 +14,38 @@
use clap::Parser;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use reth::cli::{
components::{RethNodeComponents, RethRpcComponents},
config::RethRpcConfig,
ext::{RethCliExt, RethNodeCommandConfig},
Cli,
};
use reth::cli::Cli;
use reth_node_ethereum::EthereumNode;
use reth_transaction_pool::TransactionPool;
fn main() {
Cli::<MyRethCliExt>::parse().run().unwrap();
}
Cli::<RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
let handle = builder
.node(EthereumNode::default())
.extend_rpc_modules(move |ctx| {
if !args.enable_ext {
return Ok(())
}
/// The type that tells the reth CLI what extensions to use
struct MyRethCliExt;
// here we get the configured pool.
let pool = ctx.pool().clone();
impl RethCliExt for MyRethCliExt {
/// This tells the reth CLI to install the `txpool` rpc namespace via `RethCliTxpoolExt`
type Node = RethCliTxpoolExt;
let ext = TxpoolExt { pool };
// now we merge our extension namespace into all configured transports
ctx.modules.merge_configured(ext.into_rpc())?;
println!("txpool extension enabled");
Ok(())
})
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
@ -42,34 +56,6 @@ struct RethCliTxpoolExt {
pub enable_ext: bool,
}
impl RethNodeCommandConfig for RethCliTxpoolExt {
// This is the entrypoint for the CLI to extend the RPC server with custom rpc namespaces.
fn extend_rpc_modules<Conf, Reth>(
&mut self,
_config: &Conf,
_components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
if !self.enable_ext {
return Ok(())
}
// here we get the configured pool type from the CLI.
let pool = rpc_components.registry.pool().clone();
let ext = TxpoolExt { pool };
// now we merge our extension namespace into all configured transports
rpc_components.modules.merge_configured(ext.into_rpc())?;
println!("txpool extension enabled");
Ok(())
}
}
/// trait interface for a custom rpc namespace: `txpool`
///
/// This defines an additional namespace where all methods are configured as trait functions.

View File

@ -7,10 +7,9 @@ license.workspace = true
[dependencies]
reth.workspace = true
eyre.workspace = true
reth-node-ethereum.workspace = true
clap.workspace = true
serde.workspace = true
serde_json.workspace = true
tracing.workspace = true
futures-util.workspace = true
tokio = { workspace = true, features = ["time"] }

View File

@ -15,33 +15,26 @@
//!
//! See lighthouse beacon Node API: <https://lighthouse-book.sigmaprime.io/api-bn.html#beacon-node-api>
#![warn(unused_crate_dependencies)]
use clap::Parser;
use futures_util::stream::StreamExt;
use mev_share_sse::{client::EventStream, EventClient};
use reth::{
cli::{
components::RethNodeComponents,
ext::{RethCliExt, RethNodeCommandConfig},
Cli,
},
rpc::types::beacon::events::PayloadAttributesEvent,
tasks::TaskSpawner,
};
use reth::{cli::Cli, rpc::types::beacon::events::PayloadAttributesEvent};
use reth_node_ethereum::EthereumNode;
use std::net::{IpAddr, Ipv4Addr};
use tracing::{info, warn};
fn main() {
Cli::<BeaconEventsExt>::parse().run().unwrap();
}
Cli::<BeaconEventsConfig>::parse()
.run(|builder, args| async move {
let handle = builder.node(EthereumNode::default()).launch().await?;
/// The type that tells the reth CLI what extensions to use
#[derive(Debug, Default)]
#[non_exhaustive]
struct BeaconEventsExt;
handle.node.task_executor.spawn(Box::pin(args.run()));
impl RethCliExt for BeaconEventsExt {
/// This tells the reth CLI to install additional CLI arguments
type Node = BeaconEventsConfig;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
@ -95,13 +88,6 @@ impl BeaconEventsConfig {
}
}
impl RethNodeCommandConfig for BeaconEventsConfig {
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
components.task_executor().spawn(Box::pin(self.clone().run()));
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -7,5 +7,4 @@ license.workspace = true
[dependencies]
reth.workspace = true
clap.workspace = true
eyre.workspace = true
reth-node-ethereum.workspace = true

View File

@ -15,38 +15,30 @@
//! > "Node started"
//! once the node has been started.
use clap::Parser;
use reth::cli::{
components::RethNodeComponents,
ext::{NoArgsCliExt, RethNodeCommandConfig},
Cli,
};
use reth::cli::Cli;
use reth_node_ethereum::EthereumNode;
fn main() {
Cli::<NoArgsCliExt<MyRethConfig>>::parse()
.with_node_extension(MyRethConfig::default())
.run()
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
.node(EthereumNode::default())
.on_node_started(|_ctx| {
println!("Node started");
Ok(())
})
.on_rpc_started(|_ctx, _handles| {
println!("RPC started");
Ok(())
})
.on_component_initialized(|_ctx| {
println!("All components initialized");
Ok(())
})
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
struct MyRethConfig;
impl RethNodeCommandConfig for MyRethConfig {
fn on_components_initialized<Reth: RethNodeComponents>(
&mut self,
_components: &Reth,
) -> eyre::Result<()> {
println!("All components initialized");
Ok(())
}
fn on_node_started<Reth: RethNodeComponents>(
&mut self,
_components: &Reth,
) -> eyre::Result<()> {
println!("Node started");
Ok(())
}
}

View File

@ -0,0 +1,18 @@
[package]
name = "custom-dev-node"
version = "0.0.0"
publish = false
edition.workspace = true
license.workspace = true
[dependencies]
reth.workspace = true
reth-node-core.workspace = true
reth-primitives.workspace = true
reth-node-ethereum.workspace = true
futures-util.workspace = true
eyre.workspace = true
tokio.workspace = true
serde_json.workspace = true

View File

@ -0,0 +1,96 @@
//! This example shows how to run a custom dev node programmatically and submit a transaction
//! through rpc.
#![warn(unused_crate_dependencies)]
use futures_util::StreamExt;
use reth::{
builder::{NodeBuilder, NodeHandle},
providers::CanonStateSubscriptions,
rpc::eth::EthTransactions,
tasks::TaskManager,
};
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
use reth_node_ethereum::EthereumNode;
use reth_primitives::{b256, hex, ChainSpec, Genesis};
use std::sync::Arc;
#[tokio::main]
async fn main() -> eyre::Result<()> {
let tasks = TaskManager::current();
// create node config
let node_config = NodeConfig::test()
.dev()
.with_rpc(RpcServerArgs::default().with_http())
.with_chain(custom_chain());
let NodeHandle { mut node, node_exit_future } = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
.node(EthereumNode::default())
.launch()
.await?;
let mut notifications = node.provider.canonical_state_stream();
// submit tx through rpc
let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090");
let eth_api = node.rpc_registry.eth_api();
let hash = eth_api.send_raw_transaction(raw_tx.into()).await?;
let expected = b256!("b1c6512f4fc202c04355fbda66755e0e344b152e633010e8fd75ecec09b63398");
assert_eq!(hash, expected);
println!("submitted transaction: {hash}");
let head = notifications.next().await.unwrap();
let tx = head.tip().transactions().next().unwrap();
assert_eq!(tx.hash(), hash);
println!("mined transaction: {hash}");
node_exit_future.await
}
fn custom_chain() -> Arc<ChainSpec> {
let custom_genesis = r#"
{
"nonce": "0x42",
"timestamp": "0x0",
"extraData": "0x5343",
"gasLimit": "0x1388",
"difficulty": "0x400000000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0x6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b": {
"balance": "0x4a47e3c12448f4ad000000"
}
},
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"ethash": {},
"chainId": 2600,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true,
"shanghaiTime": 0
}
}
"#;
let genesis: Genesis = serde_json::from_str(custom_genesis).unwrap();
Arc::new(genesis.into())
}

View File

@ -0,0 +1,18 @@
[package]
name = "custom-evm"
version = "0.0.0"
publish = false
edition.workspace = true
license.workspace = true
[dependencies]
reth.workspace = true
reth-node-api.workspace = true
reth-node-core.workspace = true
reth-primitives.workspace = true
reth-node-ethereum.workspace = true
reth-tracing.workspace = true
alloy-chains.workspace = true
eyre.workspace = true
tokio.workspace = true

View File

@ -0,0 +1,147 @@
//! This example shows how to implement a node with a custom EVM
#![warn(unused_crate_dependencies)]
use alloy_chains::Chain;
use reth::{
builder::{node::NodeTypes, NodeBuilder},
primitives::{
address,
revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv},
Address, Bytes, U256,
},
revm::{
handler::register::EvmHandler,
precompile::{Precompile, PrecompileSpecId, Precompiles},
Database, Evm, EvmBuilder,
},
tasks::TaskManager,
};
use reth_node_api::{ConfigureEvm, ConfigureEvmEnv};
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthereumNode};
use reth_primitives::{ChainSpec, Genesis, Header, Transaction};
use reth_tracing::{RethTracer, Tracer};
use std::sync::Arc;
/// Custom EVM configuration
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct MyEvmConfig;
impl MyEvmConfig {
/// Sets the precompiles to the EVM handler
///
/// This will be invoked when the EVM is created via [ConfigureEvm::evm] or
/// [ConfigureEvm::evm_with_inspector]
///
/// This will use the default mainnet precompiles and add additional precompiles.
pub fn set_precompiles<EXT, DB>(handler: &mut EvmHandler<EXT, DB>)
where
DB: Database,
{
// first we need the evm spec id, which determines the precompiles
let spec_id = handler.cfg.spec_id;
// install the precompiles
handler.pre_execution.load_precompiles = Arc::new(move || {
let mut precompiles = Precompiles::new(PrecompileSpecId::from_spec_id(spec_id)).clone();
precompiles.inner.insert(
address!("0000000000000000000000000000000000000999"),
Precompile::Env(Self::my_precompile),
);
precompiles
});
}
/// A custom precompile that does nothing
fn my_precompile(_data: &Bytes, _gas: u64, _env: &Env) -> PrecompileResult {
Ok((0, Bytes::new()))
}
}
impl ConfigureEvmEnv for MyEvmConfig {
type TxMeta = ();
fn fill_tx_env<T>(tx_env: &mut TxEnv, transaction: T, sender: Address, meta: Self::TxMeta)
where
T: AsRef<Transaction>,
{
EthEvmConfig::fill_tx_env(tx_env, transaction, sender, meta)
}
fn fill_cfg_env(
cfg_env: &mut CfgEnvWithHandlerCfg,
chain_spec: &ChainSpec,
header: &Header,
total_difficulty: U256,
) {
EthEvmConfig::fill_cfg_env(cfg_env, chain_spec, header, total_difficulty)
}
}
impl ConfigureEvm for MyEvmConfig {
fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, (), DB> {
EvmBuilder::default()
.with_db(db)
// add additional precompiles
.append_handler_register(MyEvmConfig::set_precompiles)
.build()
}
fn evm_with_inspector<'a, DB: Database + 'a, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> {
EvmBuilder::default()
.with_db(db)
.with_external_context(inspector)
// add additional precompiles
.append_handler_register(MyEvmConfig::set_precompiles)
.build()
}
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
struct MyCustomNode;
/// Configure the node types
impl NodeTypes for MyCustomNode {
type Primitives = ();
type Engine = EthEngineTypes;
type Evm = MyEvmConfig;
fn evm_config(&self) -> Self::Evm {
Self::Evm::default()
}
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _guard = RethTracer::new().init()?;
let tasks = TaskManager::current();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
.london_activated()
.paris_activated()
.shanghai_activated()
.cancun_activated()
.build();
let node_config =
NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec);
let handle = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
.with_types(MyCustomNode::default())
.with_components(EthereumNode::components())
.launch()
.await
.unwrap();
println!("Node started");
handle.node_exit_future.await
}

View File

@ -7,6 +7,6 @@ license.workspace = true
[dependencies]
reth.workspace = true
reth-node-ethereum.workspace = true
clap = { workspace = true, features = ["derive"] }
futures-util.workspace = true
eyre.workspace = true
futures-util.workspace = true

View File

@ -11,15 +11,11 @@
#![warn(unused_crate_dependencies)]
use clap::Parser;
use futures_util::stream::StreamExt;
use futures_util::StreamExt;
use reth::{
cli::{
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
config::RethRpcConfig,
ext::{RethCliExt, RethNodeCommandConfig},
Cli,
},
primitives::{Address, BlockId, IntoRecoveredTransaction},
builder::NodeHandle,
cli::Cli,
primitives::{Address, BlockNumberOrTag, IntoRecoveredTransaction},
revm::{
inspector_handle_register,
interpreter::{Interpreter, OpCode},
@ -29,21 +25,79 @@ use reth::{
compat::transaction::transaction_to_call_request,
eth::{revm_utils::EvmOverrides, EthTransactions},
},
tasks::TaskSpawner,
transaction_pool::TransactionPool,
};
use reth_node_ethereum::node::EthereumNode;
use std::collections::HashSet;
fn main() {
Cli::<MyRethCliExt>::parse().run().unwrap();
}
Cli::<RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
// launch the node
let NodeHandle { mut node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
/// The type that tells the reth CLI what extensions to use
struct MyRethCliExt;
let recipients = args.recipients.iter().copied().collect::<HashSet<_>>();
impl RethCliExt for MyRethCliExt {
/// This tells the reth CLI to trace addresses via `RethCliTxpoolExt`
type Node = RethCliTxpoolExt;
// create a new subscription to pending transactions
let mut pending_transactions = node.pool.new_pending_pool_transactions_listener();
// get an instance of the `trace_` API handler
let eth_api = node.rpc_registry.eth_api();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
node.task_executor.spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if recipients.is_empty() {
// convert the pool transaction
let call_request =
transaction_to_call_request(tx.to_recovered_transaction());
let result = eth_api
.spawn_with_call_at(
call_request,
BlockNumberOrTag::Latest.into(),
EvmOverrides::default(),
move |db, env| {
let mut dummy_inspector = DummyInspector::default();
{
// configure the evm with the custom inspector
let mut evm = Evm::builder()
.with_db(db)
.with_external_context(&mut dummy_inspector)
.with_env_with_handler_cfg(env)
.append_handler_register(inspector_handle_register)
.build();
// execute the transaction on a blocking task and await the
// inspector result
let _ = evm.transact()?;
}
Ok(dummy_inspector)
},
)
.await;
if let Ok(ret_val) = result {
let hash = tx.hash();
println!(
"Inspector result for transaction {}: \n {}",
hash,
ret_val.ret_val.join("\n")
);
}
}
}
}));
node_exit_future.await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
@ -74,76 +128,3 @@ where
}
}
}
impl RethNodeCommandConfig for RethCliTxpoolExt {
/// Sets up a subscription to listen for new pending transactions and traces them.
/// If the transaction is from one of the specified recipients, it will be traced.
/// If no recipients are specified, all transactions will be traced.
fn on_rpc_server_started<Conf, Reth>(
&mut self,
_config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
_handles: RethRpcServerHandles,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
let recipients = self.recipients.iter().copied().collect::<HashSet<_>>();
// create a new subscription to pending transactions
let mut pending_transactions = components.pool().new_pending_pool_transactions_listener();
let eth_api = rpc_components.registry.eth_api();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
components.task_executor().spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if recipients.is_empty() {
// convert the pool transaction
let call_request = transaction_to_call_request(tx.to_recovered_transaction());
let result = eth_api
.spawn_with_call_at(
call_request,
BlockId::default(),
EvmOverrides::default(),
move |db, env| {
let mut dummy_inspector = DummyInspector::default();
{
// configure the evm with the custom inspector
let mut evm = Evm::builder()
.with_db(db)
.with_external_context(&mut dummy_inspector)
.with_env_with_handler_cfg(env)
.append_handler_register(inspector_handle_register)
.build();
// execute the transaction on a blocking task and await the
// inspector result
let _ = evm.transact()?;
}
Ok(dummy_inspector)
},
)
.await;
if let Ok(ret_val) = result {
let hash = tx.hash();
println!(
"Inspector result for transaction {}: \n {}",
hash,
ret_val.ret_val.join("\n")
);
}
}
}
}));
Ok(())
}
}

View File

@ -0,0 +1,16 @@
[package]
name = "custom-node-components"
version = "0.0.0"
publish = false
edition.workspace = true
license.workspace = true
[dependencies]
reth.workspace = true
reth-node-ethereum.workspace = true
reth-transaction-pool.workspace = true
reth-tracing.workspace = true
eyre.workspace = true

View File

@ -0,0 +1,104 @@
//! This example shows how to configure custom components for a reth node.
#![warn(unused_crate_dependencies)]
use reth::{
builder::{components::PoolBuilder, BuilderContext, FullNodeTypes},
cli::Cli,
providers::CanonStateSubscriptions,
transaction_pool::{
blobstore::InMemoryBlobStore, EthTransactionPool, TransactionValidationTaskExecutor,
},
};
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::{debug, info};
use reth_transaction_pool::PoolConfig;
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
// use the default ethereum node types
.with_types(EthereumNode::default())
// Configure the components of the node
// use default ethereum components but use our custom pool
.with_components(EthereumNode::components().pool(CustomPoolBuilder::default()))
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// A custom pool builder
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct CustomPoolBuilder {
/// Use custom pool config
pool_config: PoolConfig,
}
/// Implement the `PoolBuilder` trait for the custom pool builder
///
/// This will be used to build the transaction pool and its maintenance tasks during launch.
impl<Node> PoolBuilder<Node> for CustomPoolBuilder
where
Node: FullNodeTypes,
{
type Pool = EthTransactionPool<Node::Provider, InMemoryBlobStore>;
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.data_dir();
let blob_store = InMemoryBlobStore::default();
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
.with_head_timestamp(ctx.head().timestamp)
.kzg_settings(ctx.kzg_settings()?)
.with_additional_tasks(5)
.build_with_tasks(
ctx.provider().clone(),
ctx.task_executor().clone(),
blob_store.clone(),
);
let transaction_pool =
reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config);
info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions_path();
// spawn txpool maintenance task
{
let pool = transaction_pool.clone();
let chain_events = ctx.provider().canonical_state_stream();
let client = ctx.provider().clone();
let transactions_backup_config =
reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path);
ctx.task_executor().spawn_critical_with_graceful_shutdown_signal(
"local transactions backup task",
|shutdown| {
reth_transaction_pool::maintain::backup_local_transactions_task(
shutdown,
pool.clone(),
transactions_backup_config,
)
},
);
// spawn the maintenance task
ctx.task_executor().spawn_critical(
"txpool maintenance task",
reth_transaction_pool::maintain::maintain_transaction_pool_future(
client,
pool,
chain_events,
ctx.task_executor().clone(),
Default::default(),
),
);
debug!(target: "reth::cli", "Spawned txpool maintenance task");
}
Ok(transaction_pool)
}
}

View File

@ -5,19 +5,19 @@ publish = false
edition.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
reth.workspace = true
reth-rpc-api.workspace = true
reth-rpc-types.workspace = true
reth-node-api.workspace = true
reth-node-core.workspace = true
reth-primitives.workspace = true
reth-payload-builder.workspace = true
reth-basic-payload-builder.workspace = true
reth-ethereum-payload-builder.workspace = true
reth-node-ethereum.workspace = true
reth-tracing.workspace = true
alloy-chains.workspace = true
jsonrpsee.workspace = true
eyre.workspace = true
tokio.workspace = true
thiserror.workspace = true

View File

@ -15,25 +15,43 @@
//! Once traits are implemented and custom types are defined, the [EngineTypes] trait can be
//! implemented:
#![warn(unused_crate_dependencies)]
use alloy_chains::Chain;
use jsonrpsee::http_client::HttpClient;
use reth::builder::spawn_node;
use reth::{
builder::{
components::{ComponentsBuilder, PayloadServiceBuilder},
node::NodeTypes,
BuilderContext, FullNodeTypes, Node, NodeBuilder, PayloadBuilderConfig,
},
primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg},
providers::{CanonStateSubscriptions, StateProviderFactory},
tasks::TaskManager,
transaction_pool::TransactionPool,
};
use reth_basic_payload_builder::{
BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome,
PayloadBuilder, PayloadConfig,
};
use reth_node_api::{
validate_version_specific_fields, AttributesValidationError, EngineApiMessageVersion,
EngineTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes,
};
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes};
use reth_primitives::{
revm::config::revm_spec_by_timestamp_after_merge,
revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId},
Address, ChainSpec, Genesis, Header, Withdrawals, B256, U256,
use reth_node_ethereum::{
node::{EthereumNetworkBuilder, EthereumPoolBuilder},
EthEvmConfig,
};
use reth_rpc_api::{EngineApiClient, EthApiClient};
use reth_payload_builder::{
error::PayloadBuilderError, EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderHandle,
PayloadBuilderService,
};
use reth_primitives::{Address, ChainSpec, Genesis, Header, Withdrawals, B256};
use reth_rpc_types::{
engine::{ForkchoiceState, PayloadAttributes as EthPayloadAttributes, PayloadId},
engine::{PayloadAttributes as EthPayloadAttributes, PayloadId},
withdrawal::Withdrawal,
};
use reth_tracing::{RethTracer, Tracer};
use serde::{Deserialize, Serialize};
use std::convert::Infallible;
use thiserror::Error;
@ -84,7 +102,7 @@ impl PayloadAttributes for CustomPayloadAttributes {
}
}
/// Newtype around the payload builder attributes type
/// New type around the payload builder attributes type
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CustomPayloadBuilderAttributes(EthPayloadBuilderAttributes);
@ -123,50 +141,13 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes {
fn withdrawals(&self) -> &Withdrawals {
&self.0.withdrawals
}
fn cfg_and_block_env(
&self,
chain_spec: &ChainSpec,
parent: &Header,
) -> (CfgEnvWithHandlerCfg, BlockEnv) {
// configure evm env based on parent block
let mut cfg = CfgEnv::default();
cfg.chain_id = chain_spec.chain().id();
// ensure we're not missing any timestamp based hardforks
let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp());
// if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is
// cancun now, we need to set the excess blob gas to the default value
let blob_excess_gas_and_price = parent
.next_block_excess_blob_gas()
.or_else(|| {
if spec_id == SpecId::CANCUN {
// default excess blob gas is zero
Some(0)
} else {
None
}
})
.map(BlobExcessGasAndPrice::new);
let block_env = BlockEnv {
number: U256::from(parent.number + 1),
coinbase: self.suggested_fee_recipient(),
timestamp: U256::from(self.timestamp()),
difficulty: U256::ZERO,
prevrandao: Some(self.prev_randao()),
gas_limit: U256::from(parent.gas_limit),
// calculate basefee based on parent block's gas usage
basefee: U256::from(
parent
.next_block_base_fee(chain_spec.base_fee_params(self.timestamp()))
.unwrap_or_default(),
),
// calculate excess gas based on parent block's blob gas usage
blob_excess_gas_and_price,
};
(CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)
self.0.cfg_and_block_env(chain_spec, parent)
}
}
@ -190,10 +171,156 @@ impl EngineTypes for CustomEngineTypes {
}
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
struct MyCustomNode;
/// Configure the node types
impl NodeTypes for MyCustomNode {
type Primitives = ();
// use the custom engine types
type Engine = CustomEngineTypes;
// use the default ethereum EVM config
type Evm = EthEvmConfig;
fn evm_config(&self) -> Self::Evm {
Self::Evm::default()
}
}
/// Implement the Node trait for the custom node
///
/// This provides a preset configuration for the node
impl<N> Node<N> for MyCustomNode
where
N: FullNodeTypes<Engine = CustomEngineTypes>,
{
type PoolBuilder = EthereumPoolBuilder;
type NetworkBuilder = EthereumNetworkBuilder;
type PayloadBuilder = CustomPayloadServiceBuilder;
fn components(
self,
) -> ComponentsBuilder<N, Self::PoolBuilder, Self::PayloadBuilder, Self::NetworkBuilder> {
ComponentsBuilder::default()
.node_types::<N>()
.pool(EthereumPoolBuilder::default())
.payload(CustomPayloadServiceBuilder::default())
.network(EthereumNetworkBuilder::default())
}
}
/// A custom payload service builder that supports the custom engine types
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct CustomPayloadServiceBuilder;
impl<Node, Pool> PayloadServiceBuilder<Node, Pool> for CustomPayloadServiceBuilder
where
Node: FullNodeTypes<Engine = CustomEngineTypes>,
Pool: TransactionPool + Unpin + 'static,
{
async fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<PayloadBuilderHandle<Node::Engine>> {
let payload_builder = CustomPayloadBuilder::default();
let conf = ctx.payload_builder_config();
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval())
.deadline(conf.deadline())
.max_payload_tasks(conf.max_payload_tasks())
.extradata(conf.extradata_rlp_bytes())
.max_gas_limit(conf.max_gas_limit());
let payload_generator = BasicPayloadJobGenerator::with_builder(
ctx.provider().clone(),
pool,
ctx.task_executor().clone(),
payload_job_config,
ctx.chain_spec(),
payload_builder,
);
let (payload_service, payload_builder) =
PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream());
ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service));
Ok(payload_builder)
}
}
/// The type responsible for building custom payloads
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct CustomPayloadBuilder;
impl<Pool, Client> PayloadBuilder<Pool, Client> for CustomPayloadBuilder
where
Client: StateProviderFactory,
Pool: TransactionPool,
{
type Attributes = CustomPayloadBuilderAttributes;
type BuiltPayload = EthBuiltPayload;
fn try_build(
&self,
args: BuildArguments<Pool, Client, Self::Attributes, Self::BuiltPayload>,
) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args;
let PayloadConfig {
initialized_block_env,
initialized_cfg,
parent_block,
extra_data,
attributes,
chain_spec,
} = config;
// This reuses the default EthereumPayloadBuilder to build the payload
// but any custom logic can be implemented here
reth_ethereum_payload_builder::EthereumPayloadBuilder::default().try_build(BuildArguments {
client,
pool,
cached_reads,
config: PayloadConfig {
initialized_block_env,
initialized_cfg,
parent_block,
extra_data,
attributes: attributes.0,
chain_spec,
},
cancel,
best_payload,
})
}
fn build_empty_payload(
client: &Client,
config: PayloadConfig<Self::Attributes>,
) -> Result<Self::BuiltPayload, PayloadBuilderError> {
let PayloadConfig {
initialized_block_env,
initialized_cfg,
parent_block,
extra_data,
attributes,
chain_spec,
} = config;
<reth_ethereum_payload_builder::EthereumPayloadBuilder as PayloadBuilder<Pool,Client>> ::build_empty_payload(client,
PayloadConfig { initialized_block_env, initialized_cfg, parent_block, extra_data, attributes: attributes.0, chain_spec }
)
}
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
// this launches a test node with http
let rpc_args = RpcServerArgs::default().with_http();
let _guard = RethTracer::new().init()?;
let tasks = TaskManager::current();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
@ -204,46 +331,17 @@ async fn main() -> eyre::Result<()> {
.shanghai_activated()
.build();
let genesis_hash = spec.genesis_hash();
// create node config
let node_config = NodeConfig::test().with_rpc(rpc_args).with_chain(spec);
let node_config =
NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec);
let (handle, _manager) = spawn_node(node_config).await.unwrap();
let handle = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
.launch_node(MyCustomNode::default())
.await
.unwrap();
// call a function on the node
let client = handle.rpc_server_handles().auth.http_client();
let block_number = client.block_number().await.unwrap();
println!("Node started");
// it should be zero, since this is an ephemeral test node
assert_eq!(block_number, U256::ZERO);
// call the engine_forkchoiceUpdated function with payload attributes
let forkchoice_state = ForkchoiceState {
head_block_hash: genesis_hash,
safe_block_hash: genesis_hash,
finalized_block_hash: genesis_hash,
};
let payload_attributes = CustomPayloadAttributes {
inner: EthPayloadAttributes {
timestamp: 1,
prev_randao: Default::default(),
suggested_fee_recipient: Default::default(),
withdrawals: Some(vec![]),
parent_beacon_block_root: None,
},
custom: 42,
};
// call the engine_forkchoiceUpdated function with payload attributes
let res = <HttpClient as EngineApiClient<CustomEngineTypes>>::fork_choice_updated_v2(
&client,
forkchoice_state,
Some(payload_attributes),
)
.await;
assert!(res.is_ok());
Ok(())
handle.node_exit_future.await
}

View File

@ -5,17 +5,15 @@ publish = false
edition.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
reth.workspace = true
reth-primitives.workspace = true
reth-node-api.workspace = true
reth-basic-payload-builder.workspace = true
reth-payload-builder.workspace = true
reth-node-ethereum.workspace = true
reth-ethereum-payload-builder.workspace = true
tracing.workspace = true
clap = { workspace = true, features = ["derive"] }
futures-util.workspace = true
eyre.workspace = true
tokio.workspace = true
eyre.workspace = true

View File

@ -8,58 +8,40 @@
//! ```
//!
//! This launch the regular reth node overriding the engine api payload builder with our custom.
use clap::Parser;
#![warn(unused_crate_dependencies)]
use generator::EmptyBlockPayloadJobGenerator;
use reth::{
cli::{
components::RethNodeComponents,
config::PayloadBuilderConfig,
ext::{NoArgsCliExt, RethNodeCommandConfig},
Cli,
},
builder::{components::PayloadServiceBuilder, node::FullNodeTypes, BuilderContext},
cli::{config::PayloadBuilderConfig, Cli},
payload::PayloadBuilderHandle,
providers::CanonStateSubscriptions,
tasks::TaskSpawner,
transaction_pool::TransactionPool,
};
use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder};
use reth_node_api::EngineTypes;
use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig;
use reth_node_ethereum::{EthEngineTypes, EthereumNode};
use reth_payload_builder::PayloadBuilderService;
pub mod generator;
pub mod job;
fn main() {
Cli::<NoArgsCliExt<MyCustomBuilder>>::parse()
.with_node_extension(MyCustomBuilder::default())
.run()
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
struct MyCustomBuilder;
pub struct CustomPayloadBuilder;
impl RethNodeCommandConfig for MyCustomBuilder {
fn spawn_payload_builder_service<Conf, Reth, Builder, Engine>(
&mut self,
conf: &Conf,
components: &Reth,
payload_builder: Builder,
) -> eyre::Result<PayloadBuilderHandle<Engine>>
where
Conf: PayloadBuilderConfig,
Reth: RethNodeComponents,
Engine: EngineTypes + 'static,
Builder: PayloadBuilder<
Reth::Pool,
Reth::Provider,
Attributes = Engine::PayloadBuilderAttributes,
BuiltPayload = Engine::BuiltPayload,
> + Unpin
+ 'static,
{
impl<Node, Pool> PayloadServiceBuilder<Node, Pool> for CustomPayloadBuilder
where
Node: FullNodeTypes<Engine = EthEngineTypes>,
Pool: TransactionPool + Unpin + 'static,
{
async fn spawn_payload_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<PayloadBuilderHandle<Node::Engine>> {
tracing::info!("Spawning a custom payload builder");
let conf = ctx.payload_builder_config();
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval())
@ -69,23 +51,38 @@ impl RethNodeCommandConfig for MyCustomBuilder {
.max_gas_limit(conf.max_gas_limit());
let payload_generator = EmptyBlockPayloadJobGenerator::with_builder(
components.provider(),
components.pool(),
components.task_executor(),
ctx.provider().clone(),
pool,
ctx.task_executor().clone(),
payload_job_config,
components.chain_spec().clone(),
payload_builder,
ctx.chain_spec().clone(),
reth_ethereum_payload_builder::EthereumPayloadBuilder::default(),
);
let (payload_service, payload_builder) = PayloadBuilderService::new(
payload_generator,
components.events().canonical_state_stream(),
);
let (payload_service, payload_builder) =
PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream());
components
.task_executor()
ctx.task_executor()
.spawn_critical("custom payload builder service", Box::pin(payload_service));
Ok(payload_builder)
}
}
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
.with_types(EthereumNode::default())
// Configure the components of the node
// use default ethereum components but use our custom payload builder
.with_components(
EthereumNode::components().payload(CustomPayloadBuilder::default()),
)
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}

View File

@ -7,10 +7,6 @@ license.workspace = true
[dependencies]
reth.workspace = true
reth-node-ethereum.workspace = true
clap = { workspace = true, features = ["derive"] }
jsonrpsee = { workspace = true, features = ["server", "macros"] }
futures-util.workspace = true
eyre.workspace = true
[dev-dependencies]
tokio.workspace = true
futures-util.workspace = true

View File

@ -8,35 +8,65 @@
//!
//! If no recipients are specified, all transactions will be traced.
#![warn(unused_crate_dependencies)]
use clap::Parser;
use futures_util::StreamExt;
use reth::{
cli::{
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
config::RethRpcConfig,
ext::{RethCliExt, RethNodeCommandConfig},
Cli,
},
builder::NodeHandle,
cli::Cli,
primitives::{Address, IntoRecoveredTransaction},
rpc::{
compat::transaction::transaction_to_call_request,
types::trace::{parity::TraceType, tracerequest::TraceCallRequest},
},
tasks::TaskSpawner,
transaction_pool::TransactionPool,
};
use reth_node_ethereum::node::EthereumNode;
use std::collections::HashSet;
fn main() {
Cli::<MyRethCliExt>::parse().run().unwrap();
}
Cli::<RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
// launch the node
let NodeHandle { mut node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
/// The type that tells the reth CLI what extensions to use
struct MyRethCliExt;
let recipients = args.recipients.iter().copied().collect::<HashSet<_>>();
impl RethCliExt for MyRethCliExt {
/// This tells the reth CLI to trace addresses via `RethCliTxpoolExt`
type Node = RethCliTxpoolExt;
// create a new subscription to pending transactions
let mut pending_transactions = node.pool.new_pending_pool_transactions_listener();
// get an instance of the `trace_` API handler
let traceapi = node.rpc_registry.trace_api();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
node.task_executor.spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if let Some(tx_recipient_address) = tx.to() {
if recipients.is_empty() || recipients.contains(&tx_recipient_address) {
// trace the transaction with `trace_call`
let callrequest =
transaction_to_call_request(tx.to_recovered_transaction());
let tracerequest = TraceCallRequest::new(callrequest)
.with_trace_type(TraceType::Trace);
if let Ok(trace_result) = traceapi.trace_call(tracerequest).await {
let hash = tx.hash();
println!("trace result for transaction {hash}: {trace_result:?}");
}
}
}
}
}));
node_exit_future.await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
@ -46,50 +76,3 @@ struct RethCliTxpoolExt {
#[arg(long, value_delimiter = ',')]
pub recipients: Vec<Address>,
}
impl RethNodeCommandConfig for RethCliTxpoolExt {
fn on_rpc_server_started<Conf, Reth>(
&mut self,
_config: &Conf,
components: &Reth,
rpc_components: RethRpcComponents<'_, Reth>,
_handles: RethRpcServerHandles,
) -> eyre::Result<()>
where
Conf: RethRpcConfig,
Reth: RethNodeComponents,
{
let recipients = self.recipients.iter().copied().collect::<HashSet<_>>();
// create a new subscription to pending transactions
let mut pending_transactions = components.pool().new_pending_pool_transactions_listener();
// get an instance of the `trace_` API handler
let traceapi = rpc_components.registry.trace_api();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
components.task_executor().spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if let Some(tx_recipient_address) = tx.to() {
if recipients.is_empty() || recipients.contains(&tx_recipient_address) {
// trace the transaction with `trace_call`
let callrequest =
transaction_to_call_request(tx.to_recovered_transaction());
let tracerequest =
TraceCallRequest::new(callrequest).with_trace_type(TraceType::Trace);
if let Ok(trace_result) = traceapi.trace_call(tracerequest).await {
let hash = tx.hash();
println!("trace result for transaction {hash}: {trace_result:?}");
}
}
}
}
}));
Ok(())
}
}