mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Moving more types from reth crate to node core crate (#6102)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
106
Cargo.lock
generated
106
Cargo.lock
generated
@ -1503,7 +1503,7 @@ version = "7.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686"
|
||||
dependencies = [
|
||||
"crossterm",
|
||||
"crossterm 0.27.0",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"unicode-width",
|
||||
@ -1718,6 +1718,22 @@ version = "0.8.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
|
||||
|
||||
[[package]]
|
||||
name = "crossterm"
|
||||
version = "0.25.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"crossterm_winapi",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot 0.12.1",
|
||||
"signal-hook",
|
||||
"signal-hook-mio",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossterm"
|
||||
version = "0.27.0"
|
||||
@ -5578,7 +5594,7 @@ checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb"
|
||||
dependencies = [
|
||||
"bitflags 2.4.2",
|
||||
"cassowary",
|
||||
"crossterm",
|
||||
"crossterm 0.27.0",
|
||||
"indoc",
|
||||
"itertools 0.12.0",
|
||||
"lru 0.12.1",
|
||||
@ -5767,7 +5783,7 @@ dependencies = [
|
||||
"comfy-table",
|
||||
"confy",
|
||||
"const-str",
|
||||
"crossterm",
|
||||
"crossterm 0.27.0",
|
||||
"dirs-next",
|
||||
"eyre",
|
||||
"fdlimit",
|
||||
@ -5810,6 +5826,7 @@ dependencies = [
|
||||
"reth-nippy-jar",
|
||||
"reth-node-api",
|
||||
"reth-node-builder",
|
||||
"reth-node-core",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-payload-builder",
|
||||
"reth-payload-validator",
|
||||
@ -6447,6 +6464,76 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-node-core"
|
||||
version = "0.1.0-alpha.15"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-rlp",
|
||||
"backon",
|
||||
"boyer-moore-magiclen",
|
||||
"clap",
|
||||
"comfy-table",
|
||||
"confy",
|
||||
"const-str",
|
||||
"crossterm 0.27.0",
|
||||
"dirs-next",
|
||||
"eyre",
|
||||
"fdlimit",
|
||||
"futures",
|
||||
"human_bytes",
|
||||
"humantime",
|
||||
"hyper",
|
||||
"itertools 0.12.0",
|
||||
"metrics",
|
||||
"metrics-exporter-prometheus",
|
||||
"metrics-process",
|
||||
"metrics-util",
|
||||
"once_cell",
|
||||
"pin-project",
|
||||
"pretty_assertions",
|
||||
"proptest",
|
||||
"rand 0.8.5",
|
||||
"rayon",
|
||||
"reth-basic-payload-builder",
|
||||
"reth-config",
|
||||
"reth-consensus-common",
|
||||
"reth-db",
|
||||
"reth-discv4",
|
||||
"reth-ethereum-payload-builder",
|
||||
"reth-interfaces",
|
||||
"reth-metrics",
|
||||
"reth-net-nat",
|
||||
"reth-network",
|
||||
"reth-network-api",
|
||||
"reth-node-api",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-payload-builder",
|
||||
"reth-primitives",
|
||||
"reth-provider",
|
||||
"reth-rpc",
|
||||
"reth-rpc-api",
|
||||
"reth-rpc-builder",
|
||||
"reth-rpc-engine-api",
|
||||
"reth-rpc-types",
|
||||
"reth-rpc-types-compat",
|
||||
"reth-tasks",
|
||||
"reth-tracing",
|
||||
"reth-transaction-pool",
|
||||
"revm-inspectors",
|
||||
"secp256k1 0.27.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"shellexpand",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"toml 0.8.8",
|
||||
"tracing",
|
||||
"tui",
|
||||
"vergen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-payload-builder"
|
||||
version = "0.1.0-alpha.15"
|
||||
@ -8748,6 +8835,19 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tui"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ccdd26cbd674007e649a272da4475fb666d3aa0ad0531da7136db6fab0e5bad1"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"cassowary",
|
||||
"crossterm 0.25.0",
|
||||
"unicode-segmentation",
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tungstenite"
|
||||
version = "0.20.1"
|
||||
|
||||
@ -37,6 +37,7 @@ members = [
|
||||
"crates/rpc/rpc-types/",
|
||||
"crates/rpc/rpc-types-compat/",
|
||||
"crates/node-builder/",
|
||||
"crates/node-core/",
|
||||
"crates/node-api/",
|
||||
"crates/snapshot/",
|
||||
"crates/stages/",
|
||||
@ -115,6 +116,7 @@ reth-db = { path = "crates/storage/db" }
|
||||
reth-discv4 = { path = "crates/net/discv4" }
|
||||
reth-dns-discovery = { path = "crates/net/dns" }
|
||||
reth-node-builder = { path = "crates/node-builder" }
|
||||
reth-node-core = { path = "crates/node-core" }
|
||||
reth-node-api = { path = "crates/node-api" }
|
||||
reth-downloaders = { path = "crates/net/downloaders" }
|
||||
reth-ecies = { path = "crates/net/ecies" }
|
||||
|
||||
@ -56,6 +56,7 @@ reth-trie.workspace = true
|
||||
reth-nippy-jar.workspace = true
|
||||
reth-node-api.workspace = true
|
||||
reth-node-builder.workspace = true
|
||||
reth-node-core.workspace = true
|
||||
|
||||
# crypto
|
||||
alloy-rlp.workspace = true
|
||||
@ -159,6 +160,7 @@ optimism = [
|
||||
"reth-ethereum-payload-builder/optimism",
|
||||
"reth-node-api/optimism",
|
||||
"reth-node-builder/optimism",
|
||||
"reth-node-core/optimism",
|
||||
]
|
||||
|
||||
# no-op feature flag for switching between the `optimism` and default functionality in CI matrices
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
//! Support for customizing the node
|
||||
|
||||
use super::cli::{components::RethRpcServerHandles, ext::DefaultRethNodeCommandConfig};
|
||||
use crate::{
|
||||
args::{
|
||||
@ -63,6 +64,7 @@ use reth_network_api::{NetworkInfo, PeersInfo};
|
||||
use reth_node_builder::EthEngineTypes;
|
||||
#[cfg(feature = "optimism")]
|
||||
use reth_node_builder::OptimismEngineTypes;
|
||||
|
||||
use reth_payload_builder::PayloadBuilderHandle;
|
||||
use reth_primitives::{
|
||||
constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP},
|
||||
@ -1401,9 +1403,10 @@ impl NodeHandle {
|
||||
/// let (_handle, _manager) = spawn_node(builder).await.unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
|
||||
pub async fn spawn_node(config: NodeConfig) -> eyre::Result<(NodeHandle, TaskManager)> {
|
||||
let task_manager = TaskManager::current();
|
||||
let ext = DefaultRethNodeCommandConfig;
|
||||
let ext = DefaultRethNodeCommandConfig::default();
|
||||
Ok((config.launch::<()>(ext, task_manager.executor()).await?, task_manager))
|
||||
}
|
||||
|
||||
|
||||
@ -33,7 +33,7 @@ impl RethCliExt for () {
|
||||
}
|
||||
|
||||
/// A trait that allows for extending and customizing parts of the node command
|
||||
/// [NodeCommand](crate::commands::node::NodeCommand).
|
||||
/// [NodeCommand](crate::cli::node::NodeCommand).
|
||||
///
|
||||
/// The functions are invoked during the initialization of the node command in the following order:
|
||||
///
|
||||
@ -185,7 +185,7 @@ pub trait RethNodeCommandExt: RethNodeCommandConfig + fmt::Debug + clap::Args {}
|
||||
impl<T> RethNodeCommandExt for T where T: RethNodeCommandConfig + fmt::Debug + clap::Args {}
|
||||
|
||||
/// The default configuration for the reth node command
|
||||
/// [Command](crate::commands::node::NodeCommand).
|
||||
/// [Command](crate::cli::node::NodeCommand).
|
||||
///
|
||||
/// This is a convenience type for [NoArgs<()>].
|
||||
#[derive(Debug, Clone, Copy, Default, Args)]
|
||||
@ -208,7 +208,7 @@ impl<Conf: RethNodeCommandConfig> RethCliExt for NoArgsCliExt<Conf> {
|
||||
/// additional CLI arguments.
|
||||
///
|
||||
/// Note: This type must be manually filled with a [RethNodeCommandConfig] manually before executing
|
||||
/// the [NodeCommand](crate::commands::node::NodeCommand).
|
||||
/// the [NodeCommand](crate::cli::node::NodeCommand).
|
||||
#[derive(Debug, Clone, Copy, Default, Args)]
|
||||
pub struct NoArgs<T = ()> {
|
||||
#[clap(skip)]
|
||||
|
||||
@ -21,7 +21,6 @@ pub mod components;
|
||||
pub mod config;
|
||||
pub mod db_type;
|
||||
pub mod ext;
|
||||
|
||||
/// The main reth cli interface.
|
||||
///
|
||||
/// This is the entrypoint to the executable.
|
||||
|
||||
@ -7,13 +7,13 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use crate::utils::DbTool;
|
||||
use clap::Parser;
|
||||
|
||||
use crate::{
|
||||
args::DatabaseArgs,
|
||||
dirs::{DataDirPath, PlatformPath},
|
||||
utils::DbTool,
|
||||
};
|
||||
use clap::Parser;
|
||||
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx,
|
||||
AccountChangeSet, AccountHistory, AccountsTrie, BlockBodyIndices, BlockOmmers,
|
||||
|
||||
@ -1,13 +1,6 @@
|
||||
//! Command for debugging block building.
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
runner::CliContext,
|
||||
};
|
||||
use crate::runner::CliContext;
|
||||
use alloy_rlp::Decodable;
|
||||
use clap::Parser;
|
||||
use eyre::Context;
|
||||
@ -47,6 +40,13 @@ use reth_transaction_pool::{
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
use tracing::*;
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
};
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
use reth_payload_builder::EthPayloadBuilderAttributes;
|
||||
|
||||
|
||||
@ -27,6 +27,7 @@ use reth_interfaces::{
|
||||
};
|
||||
use reth_network::{NetworkEvents, NetworkHandle};
|
||||
use reth_network_api::NetworkInfo;
|
||||
|
||||
use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, B256};
|
||||
use reth_provider::{BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader};
|
||||
use reth_stages::{
|
||||
|
||||
@ -18,6 +18,7 @@ use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEn
|
||||
use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient};
|
||||
use reth_network::NetworkHandle;
|
||||
use reth_network_api::NetworkInfo;
|
||||
|
||||
use reth_primitives::{
|
||||
fs,
|
||||
stage::{StageCheckpoint, StageId},
|
||||
|
||||
@ -1,12 +1,7 @@
|
||||
//! Command that initializes the node by importing a chain from a file.
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
commands::node::events::{handle_events, NodeEvent},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
init::init_genesis,
|
||||
version::SHORT_VERSION,
|
||||
};
|
||||
@ -31,6 +26,14 @@ use std::{path::PathBuf, sync::Arc};
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
};
|
||||
|
||||
/// Syncs RLP encoded blocks from a file.
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct ImportCommand {
|
||||
|
||||
@ -1,18 +1,19 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::init::init_genesis;
|
||||
use clap::Parser;
|
||||
use reth_db::init_db;
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
init::init_genesis,
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_db::init_db;
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
/// Initializes the database with the genesis block.
|
||||
#[derive(Debug, Parser)]
|
||||
|
||||
@ -2,6 +2,15 @@
|
||||
//!
|
||||
//! Starts the client
|
||||
|
||||
use clap::{value_parser, Parser};
|
||||
use reth_auto_seal_consensus::AutoSealConsensus;
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_interfaces::consensus::Consensus;
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
|
||||
|
||||
pub mod cl_events;
|
||||
pub mod events;
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, parse_socket_address, SUPPORTED_CHAINS},
|
||||
@ -13,15 +22,6 @@ use crate::{
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
runner::CliContext,
|
||||
};
|
||||
use clap::{value_parser, Parser};
|
||||
use reth_auto_seal_consensus::AutoSealConsensus;
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_interfaces::consensus::Consensus;
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
|
||||
|
||||
pub mod cl_events;
|
||||
pub mod events;
|
||||
|
||||
/// Start the node
|
||||
#[derive(Debug, Parser)]
|
||||
|
||||
@ -4,6 +4,11 @@ use crate::{
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
utils::DbTool,
|
||||
};
|
||||
|
||||
use crate::args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO, database::Database, init_db, table::TableImporter, tables,
|
||||
@ -23,10 +28,6 @@ mod execution;
|
||||
use execution::dump_execution_stage;
|
||||
|
||||
mod merkle;
|
||||
use crate::args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
};
|
||||
use merkle::dump_merkle_stage;
|
||||
|
||||
/// `reth dump-stage` command
|
||||
|
||||
@ -17,6 +17,7 @@ use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_db::init_db;
|
||||
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
|
||||
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::{ProviderFactory, StageCheckpointReader};
|
||||
use reth_stages::{
|
||||
|
||||
@ -1,5 +1,11 @@
|
||||
//! Unwinding a certain block range
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx};
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec};
|
||||
use reth_provider::{BlockExecutionWriter, ProviderFactory};
|
||||
use std::{ops::RangeInclusive, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
@ -7,12 +13,6 @@ use crate::{
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx};
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec};
|
||||
use reth_provider::{BlockExecutionWriter, ProviderFactory};
|
||||
use std::{ops::RangeInclusive, sync::Arc};
|
||||
|
||||
/// `reth stage unwind` command
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![allow(missing_debug_implementations)]
|
||||
#![allow(dead_code)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
pub mod args;
|
||||
@ -44,6 +45,11 @@ pub mod payload {
|
||||
pub use reth_payload_validator::ExecutionPayloadValidator;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_node_core`.
|
||||
pub mod node_core {
|
||||
pub use reth_node_core::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_provider`.
|
||||
pub mod providers {
|
||||
pub use reth_provider::*;
|
||||
|
||||
@ -22,6 +22,7 @@ reth-revm.workspace = true
|
||||
reth-transaction-pool.workspace = true
|
||||
reth-node-api.workspace = true
|
||||
|
||||
|
||||
# async
|
||||
futures-util.workspace = true
|
||||
tokio = { workspace = true, features = ["sync", "time"] }
|
||||
|
||||
@ -2,6 +2,13 @@
|
||||
|
||||
use clap::Parser;
|
||||
use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params};
|
||||
|
||||
use reth_primitives::{hex, revm_primitives::FixedBytes, ChainSpec, Genesis};
|
||||
use reth_provider::CanonStateSubscriptions;
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
|
||||
use reth::{
|
||||
cli::{
|
||||
components::RethNodeComponents,
|
||||
@ -11,11 +18,6 @@ use reth::{
|
||||
runner::CliRunner,
|
||||
tasks::TaskSpawner,
|
||||
};
|
||||
use reth_primitives::{hex, revm_primitives::FixedBytes, ChainSpec, Genesis};
|
||||
use reth_provider::CanonStateSubscriptions;
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AutoMineConfig;
|
||||
|
||||
128
crates/node-core/Cargo.toml
Normal file
128
crates/node-core/Cargo.toml
Normal file
@ -0,0 +1,128 @@
|
||||
[package]
|
||||
name = "reth-node-core"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-primitives.workspace = true
|
||||
reth-db = { workspace = true, features = ["mdbx"] }
|
||||
reth-interfaces = { workspace = true, features = ["clap"] }
|
||||
reth-provider.workspace = true
|
||||
reth-network = { workspace = true, features = ["serde"] }
|
||||
reth-rpc-engine-api.workspace = true
|
||||
reth-rpc-builder.workspace = true
|
||||
reth-rpc.workspace = true
|
||||
reth-rpc-types.workspace = true
|
||||
reth-rpc-types-compat.workspace = true
|
||||
reth-rpc-api = { workspace = true, features = ["client"] }
|
||||
reth-transaction-pool.workspace = true
|
||||
reth-tracing.workspace = true
|
||||
reth-config.workspace = true
|
||||
reth-discv4.workspace = true
|
||||
reth-net-nat.workspace = true
|
||||
reth-network-api.workspace = true
|
||||
reth-node-api.workspace = true
|
||||
reth-tasks.workspace = true
|
||||
reth-payload-builder.workspace = true
|
||||
reth-basic-payload-builder.workspace = true
|
||||
reth-optimism-payload-builder = { path = "../../crates/payload/optimism", optional = true }
|
||||
reth-ethereum-payload-builder.workspace = true
|
||||
reth-consensus-common.workspace = true
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# misc
|
||||
boyer-moore-magiclen = "0.2.16"
|
||||
eyre.workspace = true
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
humantime = "2.1.0"
|
||||
thiserror.workspace = true
|
||||
tempfile.workspace = true
|
||||
const-str = "0.5.6"
|
||||
rayon.workspace = true
|
||||
itertools.workspace = true
|
||||
backon = "0.4"
|
||||
pretty_assertions = "1.3.0"
|
||||
|
||||
|
||||
# http/rpc
|
||||
hyper = "0.14.25"
|
||||
|
||||
|
||||
# test vectors generation
|
||||
proptest.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
|
||||
# io
|
||||
dirs-next = "2.0.0"
|
||||
shellexpand = "3.0.0"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
fdlimit = "0.3.0"
|
||||
confy.workspace = true
|
||||
toml = { workspace = true, features = ["display"] }
|
||||
|
||||
# tracing
|
||||
tracing.workspace = true
|
||||
|
||||
# tui
|
||||
comfy-table = "7.0"
|
||||
crossterm = "0.27.0"
|
||||
tui = "0.19.0"
|
||||
human_bytes = "0.4.1"
|
||||
|
||||
|
||||
# crypto
|
||||
alloy-rlp.workspace = true
|
||||
alloy-chains.workspace = true
|
||||
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] }
|
||||
revm-inspectors.workspace = true
|
||||
|
||||
|
||||
# async
|
||||
tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] }
|
||||
futures.workspace = true
|
||||
pin-project.workspace = true
|
||||
|
||||
# metrics
|
||||
metrics-exporter-prometheus = "0.12.1"
|
||||
metrics-util = "0.15.0"
|
||||
metrics-process = "=1.0.14"
|
||||
reth-metrics.workspace = true
|
||||
metrics.workspace = true
|
||||
once_cell.workspace = true
|
||||
|
||||
[features]
|
||||
optimism = [
|
||||
"reth-primitives/optimism",
|
||||
"reth-optimism-payload-builder/optimism",
|
||||
"reth-interfaces/optimism",
|
||||
"reth-rpc/optimism",
|
||||
"reth-rpc-engine-api/optimism",
|
||||
"reth-transaction-pool/optimism",
|
||||
"reth-provider/optimism",
|
||||
"reth-network/optimism",
|
||||
"reth-network-api/optimism",
|
||||
"reth-payload-builder/optimism",
|
||||
"reth-optimism-payload-builder/optimism",
|
||||
"reth-ethereum-payload-builder/optimism",
|
||||
"reth-node-api/optimism",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[build-dependencies]
|
||||
vergen = { version = "8.0.0", features = ["build", "cargo", "git", "git2"] }
|
||||
15
crates/node-core/build.rs
Normal file
15
crates/node-core/build.rs
Normal file
@ -0,0 +1,15 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::error::Error;
|
||||
use vergen::EmitBuilder;
|
||||
|
||||
fn main() -> Result<(), Box<dyn Error>> {
|
||||
// Emit the instructions
|
||||
EmitBuilder::builder()
|
||||
.git_sha(true)
|
||||
.build_timestamp()
|
||||
.cargo_features()
|
||||
.cargo_target_triple()
|
||||
.emit()?;
|
||||
Ok(())
|
||||
}
|
||||
33
crates/node-core/src/args/database_args.rs
Normal file
33
crates/node-core/src/args/database_args.rs
Normal file
@ -0,0 +1,33 @@
|
||||
//! clap [Args](clap::Args) for database configuration
|
||||
|
||||
use clap::Args;
|
||||
use reth_interfaces::db::LogLevel;
|
||||
|
||||
/// Parameters for database configuration
|
||||
#[derive(Debug, Args, PartialEq, Default, Clone, Copy)]
|
||||
#[clap(next_help_heading = "Database")]
|
||||
pub struct DatabaseArgs {
|
||||
/// Database logging level. Levels higher than "notice" require a debug build.
|
||||
#[arg(long = "db.log-level", value_enum)]
|
||||
pub log_level: Option<LogLevel>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_database_args() {
|
||||
let default_args = DatabaseArgs::default();
|
||||
let args = CommandParser::<DatabaseArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
87
crates/node-core/src/args/debug_args.rs
Normal file
87
crates/node-core/src/args/debug_args.rs
Normal file
@ -0,0 +1,87 @@
|
||||
//! clap [Args](clap::Args) for debugging purposes
|
||||
|
||||
use clap::Args;
|
||||
use reth_primitives::{TxHash, B256};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Args, PartialEq, Default)]
|
||||
#[clap(next_help_heading = "Debug")]
|
||||
pub struct DebugArgs {
|
||||
/// Prompt the downloader to download blocks one at a time.
|
||||
///
|
||||
/// NOTE: This is for testing purposes only.
|
||||
#[arg(long = "debug.continuous", help_heading = "Debug", conflicts_with = "tip")]
|
||||
pub continuous: bool,
|
||||
|
||||
/// Flag indicating whether the node should be terminated after the pipeline sync.
|
||||
#[arg(long = "debug.terminate", help_heading = "Debug")]
|
||||
pub terminate: bool,
|
||||
|
||||
/// Set the chain tip manually for testing purposes.
|
||||
///
|
||||
/// NOTE: This is a temporary flag
|
||||
#[arg(long = "debug.tip", help_heading = "Debug", conflicts_with = "continuous")]
|
||||
pub tip: Option<B256>,
|
||||
|
||||
/// Runs the sync only up to the specified block.
|
||||
#[arg(long = "debug.max-block", help_heading = "Debug")]
|
||||
pub max_block: Option<u64>,
|
||||
|
||||
/// Print opcode level traces directly to console during execution.
|
||||
#[arg(long = "debug.print-inspector", help_heading = "Debug")]
|
||||
pub print_inspector: bool,
|
||||
|
||||
/// Hook on a specific block during execution.
|
||||
#[arg(
|
||||
long = "debug.hook-block",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "hook_transaction",
|
||||
conflicts_with = "hook_all"
|
||||
)]
|
||||
pub hook_block: Option<u64>,
|
||||
|
||||
/// Hook on a specific transaction during execution.
|
||||
#[arg(
|
||||
long = "debug.hook-transaction",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "hook_block",
|
||||
conflicts_with = "hook_all"
|
||||
)]
|
||||
pub hook_transaction: Option<TxHash>,
|
||||
|
||||
/// Hook on every transaction in a block.
|
||||
#[arg(
|
||||
long = "debug.hook-all",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "hook_block",
|
||||
conflicts_with = "hook_transaction"
|
||||
)]
|
||||
pub hook_all: bool,
|
||||
|
||||
/// The path to store engine API messages at.
|
||||
/// If specified, all of the intercepted engine API messages
|
||||
/// will be written to specified location.
|
||||
#[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")]
|
||||
pub engine_api_store: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_database_args() {
|
||||
let default_args = DebugArgs::default();
|
||||
let args = CommandParser::<DebugArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
107
crates/node-core/src/args/dev_args.rs
Normal file
107
crates/node-core/src/args/dev_args.rs
Normal file
@ -0,0 +1,107 @@
|
||||
//! clap [Args](clap::Args) for Dev testnet configuration
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Args;
|
||||
use humantime::parse_duration;
|
||||
|
||||
/// Parameters for Dev testnet configuration
|
||||
#[derive(Debug, Args, PartialEq, Default, Clone, Copy)]
|
||||
#[clap(next_help_heading = "Dev testnet")]
|
||||
pub struct DevArgs {
|
||||
/// Start the node in dev mode
|
||||
///
|
||||
/// This mode uses a local proof-of-authority consensus engine with either fixed block times
|
||||
/// or automatically mined blocks.
|
||||
/// Disables network discovery and enables local http server.
|
||||
/// Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test
|
||||
/// test junk" with 10 000 ETH each.
|
||||
#[arg(long = "dev", alias = "auto-mine", help_heading = "Dev testnet", verbatim_doc_comment)]
|
||||
pub dev: bool,
|
||||
|
||||
/// How many transactions to mine per block.
|
||||
#[arg(
|
||||
long = "dev.block-max-transactions",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_time"
|
||||
)]
|
||||
pub block_max_transactions: Option<usize>,
|
||||
|
||||
/// Interval between blocks.
|
||||
///
|
||||
/// Parses strings using [humantime::parse_duration]
|
||||
/// --dev.block-time 12s
|
||||
#[arg(
|
||||
long = "dev.block-time",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_max_transactions",
|
||||
value_parser = parse_duration,
|
||||
verbatim_doc_comment
|
||||
)]
|
||||
pub block_time: Option<Duration>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args() {
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--dev"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--auto-mine"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None });
|
||||
|
||||
let args =
|
||||
CommandParser::<DevArgs>::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
DevArgs {
|
||||
dev: true,
|
||||
block_max_transactions: None,
|
||||
block_time: Some(std::time::Duration::from_secs(1))
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args_conflicts() {
|
||||
let args = CommandParser::<DevArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
"--dev.block-time",
|
||||
"1s",
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dev_args_default_sanity_check() {
|
||||
let default_args = DevArgs::default();
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
86
crates/node-core/src/args/gas_price_oracle_args.rs
Normal file
86
crates/node-core/src/args/gas_price_oracle_args.rs
Normal file
@ -0,0 +1,86 @@
|
||||
use crate::primitives::U256;
|
||||
use clap::Args;
|
||||
use reth_rpc::eth::gas_oracle::GasPriceOracleConfig;
|
||||
use reth_rpc_builder::constants::{
|
||||
DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE,
|
||||
DEFAULT_MAX_GAS_PRICE,
|
||||
};
|
||||
|
||||
/// Parameters to configure Gas Price Oracle
|
||||
#[derive(Debug, Clone, Copy, Args, PartialEq, Eq)]
|
||||
#[clap(next_help_heading = "Gas Price Oracle")]
|
||||
pub struct GasPriceOracleArgs {
|
||||
/// Number of recent blocks to check for gas price
|
||||
#[arg(long = "gpo.blocks", default_value_t = DEFAULT_GAS_PRICE_BLOCKS)]
|
||||
pub blocks: u32,
|
||||
|
||||
/// Gas Price below which gpo will ignore transactions
|
||||
#[arg(long = "gpo.ignoreprice", default_value_t = DEFAULT_IGNORE_GAS_PRICE.to())]
|
||||
pub ignore_price: u64,
|
||||
|
||||
/// Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo
|
||||
#[arg(long = "gpo.maxprice", default_value_t = DEFAULT_MAX_GAS_PRICE.to())]
|
||||
pub max_price: u64,
|
||||
|
||||
/// The percentile of gas prices to use for the estimate
|
||||
#[arg(long = "gpo.percentile", default_value_t = DEFAULT_GAS_PRICE_PERCENTILE)]
|
||||
pub percentile: u32,
|
||||
}
|
||||
|
||||
impl GasPriceOracleArgs {
|
||||
/// Returns a [GasPriceOracleConfig] from the arguments.
|
||||
pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
|
||||
let Self { blocks, ignore_price, max_price, percentile } = self;
|
||||
GasPriceOracleConfig {
|
||||
max_price: Some(U256::from(*max_price)),
|
||||
ignore_price: Some(U256::from(*ignore_price)),
|
||||
percentile: *percentile,
|
||||
blocks: *blocks,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GasPriceOracleArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gpo_args() {
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
GasPriceOracleArgs {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gpo_args_default_sanity_test() {
|
||||
let default_args = GasPriceOracleArgs::default();
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
179
crates/node-core/src/args/log_args.rs
Normal file
179
crates/node-core/src/args/log_args.rs
Normal file
@ -0,0 +1,179 @@
|
||||
//! clap [Args](clap::Args) for logging configuration.
|
||||
|
||||
use crate::dirs::{LogsDir, PlatformPath};
|
||||
use clap::{ArgAction, Args, ValueEnum};
|
||||
use reth_tracing::{
|
||||
tracing_subscriber::filter::Directive, FileInfo, FileWorkerGuard, LayerInfo, LogFormat,
|
||||
RethTracer, Tracer,
|
||||
};
|
||||
use std::{fmt, fmt::Display};
|
||||
use tracing::{level_filters::LevelFilter, Level};
|
||||
/// Constant to convert megabytes to bytes
|
||||
const MB_TO_BYTES: u64 = 1024 * 1024;
|
||||
|
||||
/// The log configuration.
|
||||
#[derive(Debug, Args)]
|
||||
#[command(next_help_heading = "Logging")]
|
||||
pub struct LogArgs {
|
||||
/// The format to use for logs written to stdout.
|
||||
#[arg(long = "log.stdout.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_stdout_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to stdout.
|
||||
#[arg(
|
||||
long = "log.stdout.filter",
|
||||
value_name = "FILTER",
|
||||
global = true,
|
||||
default_value = "info"
|
||||
)]
|
||||
pub log_stdout_filter: String,
|
||||
|
||||
/// The format to use for logs written to the log file.
|
||||
#[arg(long = "log.file.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_file_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to the log file.
|
||||
#[arg(long = "log.file.filter", value_name = "FILTER", global = true, default_value = "debug")]
|
||||
pub log_file_filter: String,
|
||||
|
||||
/// The path to put log files in.
|
||||
#[arg(long = "log.file.directory", value_name = "PATH", global = true, default_value_t)]
|
||||
pub log_file_directory: PlatformPath<LogsDir>,
|
||||
|
||||
/// The maximum size (in MB) of one log file.
|
||||
#[arg(long = "log.file.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
|
||||
pub log_file_max_size: u64,
|
||||
|
||||
/// The maximum amount of log files that will be stored. If set to 0, background file logging
|
||||
/// is disabled.
|
||||
#[arg(long = "log.file.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
|
||||
pub log_file_max_files: usize,
|
||||
|
||||
/// Write logs to journald.
|
||||
#[arg(long = "log.journald", global = true)]
|
||||
pub journald: bool,
|
||||
|
||||
/// The filter to use for logs written to journald.
|
||||
#[arg(
|
||||
long = "log.journald.filter",
|
||||
value_name = "FILTER",
|
||||
global = true,
|
||||
default_value = "error"
|
||||
)]
|
||||
pub journald_filter: String,
|
||||
|
||||
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
|
||||
/// text formatting.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "COLOR",
|
||||
global = true,
|
||||
default_value_t = ColorMode::Always
|
||||
)]
|
||||
pub color: ColorMode,
|
||||
/// The verbosity settings for the tracer.
|
||||
#[clap(flatten)]
|
||||
pub verbosity: Verbosity,
|
||||
}
|
||||
|
||||
impl LogArgs {
|
||||
/// Creates a [LayerInfo] instance.
|
||||
fn layer(&self, format: LogFormat, filter: String, use_color: bool) -> LayerInfo {
|
||||
LayerInfo::new(
|
||||
format,
|
||||
filter,
|
||||
self.verbosity.directive(),
|
||||
if use_color { Some(self.color.to_string()) } else { None },
|
||||
)
|
||||
}
|
||||
|
||||
/// File info from the current log options.
|
||||
fn file_info(&self) -> FileInfo {
|
||||
FileInfo::new(
|
||||
self.log_file_directory.clone().into(),
|
||||
self.log_file_max_size * MB_TO_BYTES,
|
||||
self.log_file_max_files,
|
||||
)
|
||||
}
|
||||
|
||||
/// Initializes tracing with the configured options from cli args.
|
||||
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||
let mut tracer = RethTracer::new();
|
||||
|
||||
let stdout = self.layer(self.log_stdout_format, self.log_stdout_filter.clone(), true);
|
||||
tracer = tracer.with_stdout(stdout);
|
||||
|
||||
if self.journald {
|
||||
tracer = tracer.with_journald(self.journald_filter.clone());
|
||||
}
|
||||
|
||||
if self.log_file_max_files > 0 {
|
||||
let info = self.file_info();
|
||||
let file = self.layer(self.log_file_format, self.log_file_filter.clone(), false);
|
||||
tracer = tracer.with_file(file, info);
|
||||
}
|
||||
|
||||
let guard = tracer.init()?;
|
||||
Ok(guard)
|
||||
}
|
||||
}
|
||||
|
||||
/// The color mode for the cli.
|
||||
#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)]
|
||||
pub enum ColorMode {
|
||||
/// Colors on
|
||||
Always,
|
||||
/// Colors on
|
||||
Auto,
|
||||
/// Colors off
|
||||
Never,
|
||||
}
|
||||
|
||||
impl Display for ColorMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ColorMode::Always => write!(f, "always"),
|
||||
ColorMode::Auto => write!(f, "auto"),
|
||||
ColorMode::Never => write!(f, "never"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The verbosity settings for the cli.
|
||||
#[derive(Debug, Copy, Clone, Args)]
|
||||
#[command(next_help_heading = "Display")]
|
||||
pub struct Verbosity {
|
||||
/// Set the minimum log level.
|
||||
///
|
||||
/// -v Errors
|
||||
/// -vv Warnings
|
||||
/// -vvv Info
|
||||
/// -vvvv Debug
|
||||
/// -vvvvv Traces (warning: very verbose!)
|
||||
#[clap(short, long, action = ArgAction::Count, global = true, default_value_t = 3, verbatim_doc_comment, help_heading = "Display")]
|
||||
verbosity: u8,
|
||||
|
||||
/// Silence all log output.
|
||||
#[clap(long, alias = "silent", short = 'q', global = true, help_heading = "Display")]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
impl Verbosity {
|
||||
/// Get the corresponding [Directive] for the given verbosity, or none if the verbosity
|
||||
/// corresponds to silent.
|
||||
pub fn directive(&self) -> Directive {
|
||||
if self.quiet {
|
||||
LevelFilter::OFF.into()
|
||||
} else {
|
||||
let level = match self.verbosity - 1 {
|
||||
0 => Level::ERROR,
|
||||
1 => Level::WARN,
|
||||
2 => Level::INFO,
|
||||
3 => Level::DEBUG,
|
||||
_ => Level::TRACE,
|
||||
};
|
||||
|
||||
level.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
62
crates/node-core/src/args/mod.rs
Normal file
62
crates/node-core/src/args/mod.rs
Normal file
@ -0,0 +1,62 @@
|
||||
//! Parameters for configuring the rpc more granularity via CLI
|
||||
|
||||
/// NetworkArg struct for configuring the network
|
||||
mod network_args;
|
||||
pub use network_args::{DiscoveryArgs, NetworkArgs};
|
||||
|
||||
/// RpcServerArg struct for configuring the RPC
|
||||
mod rpc_server_args;
|
||||
pub use rpc_server_args::RpcServerArgs;
|
||||
|
||||
/// RpcStateCacheArgs struct for configuring RPC state cache
|
||||
mod rpc_state_cache_args;
|
||||
pub use rpc_state_cache_args::RpcStateCacheArgs;
|
||||
|
||||
/// DebugArgs struct for debugging purposes
|
||||
mod debug_args;
|
||||
pub use debug_args::DebugArgs;
|
||||
|
||||
/// DatabaseArgs struct for configuring the database
|
||||
mod database_args;
|
||||
pub use database_args::DatabaseArgs;
|
||||
|
||||
/// LogArgs struct for configuring the logger
|
||||
mod log_args;
|
||||
pub use log_args::{ColorMode, LogArgs};
|
||||
|
||||
mod secret_key;
|
||||
pub use secret_key::{get_secret_key, SecretKeyError};
|
||||
|
||||
/// PayloadBuilderArgs struct for configuring the payload builder
|
||||
mod payload_builder_args;
|
||||
pub use payload_builder_args::PayloadBuilderArgs;
|
||||
|
||||
/// Stage related arguments
|
||||
mod stage_args;
|
||||
pub use stage_args::StageEnum;
|
||||
|
||||
/// Gas price oracle related arguments
|
||||
mod gas_price_oracle_args;
|
||||
pub use gas_price_oracle_args::GasPriceOracleArgs;
|
||||
|
||||
/// TxPoolArgs for configuring the transaction pool
|
||||
mod txpool_args;
|
||||
pub use txpool_args::TxPoolArgs;
|
||||
|
||||
/// DevArgs for configuring the dev testnet
|
||||
mod dev_args;
|
||||
pub use dev_args::DevArgs;
|
||||
|
||||
/// PruneArgs for configuring the pruning and full node
|
||||
mod pruning_args;
|
||||
pub use pruning_args::PruningArgs;
|
||||
|
||||
/// RollupArgs for configuring the op-reth rollup
|
||||
#[cfg(feature = "optimism")]
|
||||
mod rollup_args;
|
||||
#[cfg(feature = "optimism")]
|
||||
pub use rollup_args::RollupArgs;
|
||||
|
||||
pub mod utils;
|
||||
|
||||
pub mod types;
|
||||
266
crates/node-core/src/args/network_args.rs
Normal file
266
crates/node-core/src/args/network_args.rs
Normal file
@ -0,0 +1,266 @@
|
||||
//! clap [Args](clap::Args) for network related arguments.
|
||||
|
||||
use crate::version::P2P_CLIENT_VERSION;
|
||||
use clap::Args;
|
||||
use reth_config::Config;
|
||||
use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
|
||||
use reth_net_nat::NatResolver;
|
||||
use reth_network::{HelloMessageWithProtocols, NetworkConfigBuilder};
|
||||
use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{net::Ipv4Addr, path::PathBuf, sync::Arc};
|
||||
|
||||
/// Parameters for configuring the network more granularity via CLI
|
||||
#[derive(Debug, Args, PartialEq, Eq)]
|
||||
#[clap(next_help_heading = "Networking")]
|
||||
pub struct NetworkArgs {
|
||||
/// Disable the discovery service.
|
||||
#[command(flatten)]
|
||||
pub discovery: DiscoveryArgs,
|
||||
|
||||
/// Comma separated enode URLs of trusted peers for P2P connections.
|
||||
///
|
||||
/// --trusted-peers enode://abcd@192.168.0.1:30303
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub trusted_peers: Vec<NodeRecord>,
|
||||
|
||||
/// Connect only to trusted peers
|
||||
#[arg(long)]
|
||||
pub trusted_only: bool,
|
||||
|
||||
/// Comma separated enode URLs for P2P discovery bootstrap.
|
||||
///
|
||||
/// Will fall back to a network-specific default if not specified.
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub bootnodes: Option<Vec<NodeRecord>>,
|
||||
|
||||
/// The path to the known peers file. Connected peers are dumped to this file on nodes
|
||||
/// shutdown, and read on startup. Cannot be used with `--no-persist-peers`.
|
||||
#[arg(long, value_name = "FILE", verbatim_doc_comment, conflicts_with = "no_persist_peers")]
|
||||
pub peers_file: Option<PathBuf>,
|
||||
|
||||
/// Custom node identity
|
||||
#[arg(long, value_name = "IDENTITY", default_value = P2P_CLIENT_VERSION)]
|
||||
pub identity: String,
|
||||
|
||||
/// Secret key to use for this node.
|
||||
///
|
||||
/// This will also deterministically set the peer ID. If not specified, it will be set in the
|
||||
/// data dir for the chain being used.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub p2p_secret_key: Option<PathBuf>,
|
||||
|
||||
/// Do not persist peers.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub no_persist_peers: bool,
|
||||
|
||||
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
|
||||
#[arg(long, default_value = "any")]
|
||||
pub nat: NatResolver,
|
||||
|
||||
/// Network listening address
|
||||
#[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: Ipv4Addr,
|
||||
|
||||
/// Network listening port
|
||||
#[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
|
||||
/// Maximum number of outbound requests. default: 100
|
||||
#[arg(long)]
|
||||
pub max_outbound_peers: Option<usize>,
|
||||
|
||||
/// Maximum number of inbound requests. default: 30
|
||||
#[arg(long)]
|
||||
pub max_inbound_peers: Option<usize>,
|
||||
}
|
||||
|
||||
impl NetworkArgs {
|
||||
/// Build a [`NetworkConfigBuilder`] from a [`Config`] and a [`ChainSpec`], in addition to the
|
||||
/// values in this option struct.
|
||||
///
|
||||
/// The `default_peers_file` will be used as the default location to store the persistent peers
|
||||
/// file if `no_persist_peers` is false, and there is no provided `peers_file`.
|
||||
pub fn network_config(
|
||||
&self,
|
||||
config: &Config,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
secret_key: SecretKey,
|
||||
default_peers_file: PathBuf,
|
||||
) -> NetworkConfigBuilder {
|
||||
let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes);
|
||||
let peers_file = self.peers_file.clone().unwrap_or(default_peers_file);
|
||||
|
||||
// Configure peer connections
|
||||
let peer_config = config
|
||||
.peers
|
||||
.clone()
|
||||
.with_max_inbound_opt(self.max_inbound_peers)
|
||||
.with_max_outbound_opt(self.max_outbound_peers);
|
||||
|
||||
// Configure basic network stack
|
||||
let mut network_config_builder = config
|
||||
.network_config(self.nat, self.persistent_peers_file(peers_file), secret_key)
|
||||
.peer_config(peer_config)
|
||||
.boot_nodes(self.bootnodes.clone().unwrap_or(chain_bootnodes))
|
||||
.chain_spec(chain_spec);
|
||||
|
||||
// Configure node identity
|
||||
let peer_id = network_config_builder.get_peer_id();
|
||||
network_config_builder = network_config_builder.hello_message(
|
||||
HelloMessageWithProtocols::builder(peer_id).client_version(&self.identity).build(),
|
||||
);
|
||||
|
||||
self.discovery.apply_to_builder(network_config_builder)
|
||||
}
|
||||
|
||||
/// If `no_persist_peers` is true then this returns the path to the persistent peers file path.
|
||||
pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option<PathBuf> {
|
||||
if self.no_persist_peers {
|
||||
return None
|
||||
}
|
||||
|
||||
Some(peers_file)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
discovery: DiscoveryArgs::default(),
|
||||
trusted_peers: vec![],
|
||||
trusted_only: false,
|
||||
bootnodes: None,
|
||||
peers_file: None,
|
||||
identity: P2P_CLIENT_VERSION.to_string(),
|
||||
p2p_secret_key: None,
|
||||
no_persist_peers: false,
|
||||
nat: NatResolver::Any,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
max_outbound_peers: None,
|
||||
max_inbound_peers: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Arguments to setup discovery
|
||||
#[derive(Debug, Args, PartialEq, Eq)]
|
||||
pub struct DiscoveryArgs {
|
||||
/// Disable the discovery service.
|
||||
#[arg(short, long, default_value_if("dev", "true", "true"))]
|
||||
pub disable_discovery: bool,
|
||||
|
||||
/// Disable the DNS discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_dns_discovery: bool,
|
||||
|
||||
/// Disable Discv4 discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_discv4_discovery: bool,
|
||||
|
||||
/// The UDP address to use for P2P discovery/networking
|
||||
#[arg(long = "discovery.addr", name = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: Ipv4Addr,
|
||||
|
||||
/// The UDP port to use for P2P discovery/networking
|
||||
#[arg(long = "discovery.port", name = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl DiscoveryArgs {
|
||||
/// Apply the discovery settings to the given [NetworkConfigBuilder]
|
||||
pub fn apply_to_builder(
|
||||
&self,
|
||||
mut network_config_builder: NetworkConfigBuilder,
|
||||
) -> NetworkConfigBuilder {
|
||||
if self.disable_discovery || self.disable_dns_discovery {
|
||||
network_config_builder = network_config_builder.disable_dns_discovery();
|
||||
}
|
||||
|
||||
if self.disable_discovery || self.disable_discv4_discovery {
|
||||
network_config_builder = network_config_builder.disable_discv4_discovery();
|
||||
}
|
||||
network_config_builder
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiscoveryArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
disable_discovery: false,
|
||||
disable_dns_discovery: false,
|
||||
disable_discv4_discovery: false,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_nat_args() {
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "none"]).args;
|
||||
assert_eq!(args.nat, NatResolver::None);
|
||||
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "extip:0.0.0.0"]).args;
|
||||
assert_eq!(args.nat, NatResolver::ExternalIp("0.0.0.0".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--max-outbound-peers", "50"]).args;
|
||||
assert_eq!(args.max_outbound_peers, Some(50));
|
||||
assert_eq!(args.max_inbound_peers, None);
|
||||
|
||||
let args = CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--max-outbound-peers",
|
||||
"75",
|
||||
"--max-inbound-peers",
|
||||
"15",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.max_outbound_peers, Some(75));
|
||||
assert_eq!(args.max_inbound_peers, Some(15));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_trusted_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--trusted-peers",
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303,enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"
|
||||
])
|
||||
.args;
|
||||
|
||||
assert_eq!(
|
||||
args.trusted_peers,
|
||||
vec![
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303".parse().unwrap(),
|
||||
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303".parse().unwrap()
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn network_args_default_sanity_test() {
|
||||
let default_args = NetworkArgs::default();
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
178
crates/node-core/src/args/payload_builder_args.rs
Normal file
178
crates/node-core/src/args/payload_builder_args.rs
Normal file
@ -0,0 +1,178 @@
|
||||
use crate::{
|
||||
args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig,
|
||||
version::default_extradata,
|
||||
};
|
||||
use clap::{
|
||||
builder::{RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use reth_primitives::constants::{
|
||||
ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION,
|
||||
};
|
||||
use std::{borrow::Cow, ffi::OsStr, time::Duration};
|
||||
|
||||
/// Parameters for configuring the Payload Builder
|
||||
#[derive(Debug, Args, PartialEq)]
|
||||
#[clap(next_help_heading = "Builder")]
|
||||
pub struct PayloadBuilderArgs {
|
||||
/// Block extra data set by the payload builder.
|
||||
#[arg(long = "builder.extradata", value_parser=ExtradataValueParser::default(), default_value_t = default_extradata())]
|
||||
pub extradata: String,
|
||||
|
||||
/// Target gas ceiling for built blocks.
|
||||
#[arg(long = "builder.gaslimit", default_value = "30000000", value_name = "GAS_LIMIT")]
|
||||
pub max_gas_limit: u64,
|
||||
|
||||
/// The interval at which the job should build a new payload after the last (in seconds).
|
||||
#[arg(long = "builder.interval", value_parser = parse_duration_from_secs, default_value = "1", value_name = "SECONDS")]
|
||||
pub interval: Duration,
|
||||
|
||||
/// The deadline for when the payload builder job should resolve.
|
||||
#[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")]
|
||||
pub deadline: Duration,
|
||||
|
||||
/// Maximum number of tasks to spawn for building a payload.
|
||||
#[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::<usize>::new().range(1..))]
|
||||
pub max_payload_tasks: usize,
|
||||
|
||||
/// By default the pending block equals the latest block
|
||||
/// to save resources and not leak txs from the tx-pool,
|
||||
/// this flag enables computing of the pending block
|
||||
/// from the tx-pool instead.
|
||||
///
|
||||
/// If `compute_pending_block` is not enabled, the payload builder
|
||||
/// will use the payload attributes from the latest block. Note
|
||||
/// that this flag is not yet functional.
|
||||
#[cfg(feature = "optimism")]
|
||||
#[arg(long = "rollup.compute-pending-block")]
|
||||
pub compute_pending_block: bool,
|
||||
}
|
||||
|
||||
impl Default for PayloadBuilderArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
extradata: default_extradata(),
|
||||
max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT,
|
||||
interval: Duration::from_secs(1),
|
||||
deadline: SLOT_DURATION,
|
||||
max_payload_tasks: 3,
|
||||
#[cfg(feature = "optimism")]
|
||||
compute_pending_block: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadBuilderConfig for PayloadBuilderArgs {
|
||||
fn extradata(&self) -> Cow<'_, str> {
|
||||
self.extradata.as_str().into()
|
||||
}
|
||||
|
||||
fn interval(&self) -> Duration {
|
||||
self.interval
|
||||
}
|
||||
|
||||
fn deadline(&self) -> Duration {
|
||||
self.deadline
|
||||
}
|
||||
|
||||
fn max_gas_limit(&self) -> u64 {
|
||||
self.max_gas_limit
|
||||
}
|
||||
|
||||
fn max_payload_tasks(&self) -> usize {
|
||||
self.max_payload_tasks
|
||||
}
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
fn compute_pending_block(&self) -> bool {
|
||||
self.compute_pending_block
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct ExtradataValueParser;
|
||||
|
||||
impl TypedValueParser for ExtradataValueParser {
|
||||
type Value = String;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
_arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE {
|
||||
return Err(clap::Error::raw(
|
||||
clap::error::ErrorKind::InvalidValue,
|
||||
format!(
|
||||
"Payload builder extradata size exceeds {MAXIMUM_EXTRA_DATA_SIZE}bytes limit"
|
||||
),
|
||||
))
|
||||
}
|
||||
Ok(val.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_valid_max_tasks() {
|
||||
let args =
|
||||
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.max-tasks", "1"])
|
||||
.args;
|
||||
assert_eq!(args.max_payload_tasks, 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_invalid_max_tasks() {
|
||||
assert!(CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.max-tasks",
|
||||
"0"
|
||||
])
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_extradata() {
|
||||
let extradata = default_extradata();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.extradata, extradata);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_extradata() {
|
||||
let extradata = "x".repeat(MAXIMUM_EXTRA_DATA_SIZE + 1);
|
||||
let args = CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn payload_builder_args_default_sanity_check() {
|
||||
let default_args = PayloadBuilderArgs::default();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
69
crates/node-core/src/args/pruning_args.rs
Normal file
69
crates/node-core/src/args/pruning_args.rs
Normal file
@ -0,0 +1,69 @@
|
||||
//! Pruning and full node arguments
|
||||
|
||||
use clap::Args;
|
||||
use reth_config::config::PruneConfig;
|
||||
use reth_primitives::{
|
||||
ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Parameters for pruning and full node
|
||||
#[derive(Debug, Args, PartialEq, Default)]
|
||||
#[clap(next_help_heading = "Pruning")]
|
||||
pub struct PruningArgs {
|
||||
/// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored.
|
||||
/// This flag takes priority over pruning configuration in reth.toml.
|
||||
#[arg(long, default_value_t = false)]
|
||||
pub full: bool,
|
||||
}
|
||||
|
||||
impl PruningArgs {
|
||||
/// Returns pruning configuration.
|
||||
pub fn prune_config(&self, chain_spec: Arc<ChainSpec>) -> eyre::Result<Option<PruneConfig>> {
|
||||
Ok(if self.full {
|
||||
Some(PruneConfig {
|
||||
block_interval: 5,
|
||||
segments: PruneModes {
|
||||
sender_recovery: Some(PruneMode::Full),
|
||||
transaction_lookup: None,
|
||||
receipts: chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| PruneMode::Before(contract.block)),
|
||||
account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
receipts_log_filter: ReceiptsLogPruneConfig(
|
||||
chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| (contract.address, PruneMode::Before(contract.block)))
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_args_sanity_check() {
|
||||
let default_args = PruningArgs::default();
|
||||
let args = CommandParser::<PruningArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
39
crates/node-core/src/args/rollup_args.rs
Normal file
39
crates/node-core/src/args/rollup_args.rs
Normal file
@ -0,0 +1,39 @@
|
||||
//! clap [Args](clap::Args) for op-reth rollup configuration
|
||||
|
||||
/// Parameters for rollup configuration
|
||||
#[derive(Debug, Default, PartialEq, Eq, clap::Args)]
|
||||
#[clap(next_help_heading = "Rollup")]
|
||||
pub struct RollupArgs {
|
||||
/// HTTP endpoint for the sequencer mempool
|
||||
#[arg(long = "rollup.sequencer-http", value_name = "HTTP_URL")]
|
||||
pub sequencer_http: Option<String>,
|
||||
|
||||
/// Disable transaction pool gossip
|
||||
#[arg(long = "rollup.disable-tx-pool-gossip")]
|
||||
pub disable_txpool_gossip: bool,
|
||||
|
||||
/// Enable walkback to genesis on startup. This is useful for re-validating the existing DB
|
||||
/// prior to beginning normal syncing.
|
||||
#[arg(long = "rollup.enable-genesis-walkback")]
|
||||
pub enable_genesis_walkback: bool,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_database_args() {
|
||||
let default_args = RollupArgs::default();
|
||||
let args = CommandParser::<RollupArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
758
crates/node-core/src/args/rpc_server_args.rs
Normal file
758
crates/node-core/src/args/rpc_server_args.rs
Normal file
@ -0,0 +1,758 @@
|
||||
//! clap [Args](clap::Args) for RPC related arguments.
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
types::{MaxU32, ZeroAsNoneU64},
|
||||
GasPriceOracleArgs, RpcStateCacheArgs,
|
||||
},
|
||||
cli::{
|
||||
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
|
||||
config::RethRpcConfig,
|
||||
ext::RethNodeCommandConfig,
|
||||
},
|
||||
utils::get_or_create_jwt_secret_from_path,
|
||||
};
|
||||
use clap::{
|
||||
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use futures::TryFutureExt;
|
||||
use reth_network_api::{NetworkInfo, Peers};
|
||||
use reth_node_api::EngineTypes;
|
||||
use reth_provider::{
|
||||
AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader,
|
||||
EvmEnvProvider, HeaderProvider, StateProviderFactory,
|
||||
};
|
||||
use reth_rpc::{
|
||||
eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP},
|
||||
JwtError, JwtSecret,
|
||||
};
|
||||
use reth_rpc_builder::{
|
||||
auth::{AuthServerConfig, AuthServerHandle},
|
||||
constants,
|
||||
error::RpcError,
|
||||
EthConfig, IpcServerBuilder, RethRpcModule, RpcModuleBuilder, RpcModuleConfig,
|
||||
RpcModuleSelection, RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig,
|
||||
};
|
||||
use reth_rpc_engine_api::{EngineApi, EngineApiServer};
|
||||
use reth_tasks::TaskSpawner;
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use std::{
|
||||
ffi::OsStr,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
};
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Default max number of subscriptions per connection.
|
||||
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
|
||||
/// Default max request size in MB.
|
||||
pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
|
||||
/// Default max response size in MB.
|
||||
///
|
||||
/// This is only relevant for very large trace responses.
|
||||
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 150;
|
||||
/// Default number of incoming connections.
|
||||
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
|
||||
|
||||
/// Parameters for configuring the rpc more granularity via CLI
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[clap(next_help_heading = "RPC")]
|
||||
pub struct RpcServerArgs {
|
||||
/// Enable the HTTP-RPC server
|
||||
#[arg(long, default_value_if("dev", "true", "true"))]
|
||||
pub http: bool,
|
||||
|
||||
/// Http server address to listen on
|
||||
#[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub http_addr: IpAddr,
|
||||
|
||||
/// Http server port to listen on
|
||||
#[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)]
|
||||
pub http_port: u16,
|
||||
|
||||
/// Rpc Modules to be configured for the HTTP server
|
||||
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub http_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Http Corsdomain to allow request from
|
||||
#[arg(long = "http.corsdomain")]
|
||||
pub http_corsdomain: Option<String>,
|
||||
|
||||
/// Enable the WS-RPC server
|
||||
#[arg(long)]
|
||||
pub ws: bool,
|
||||
|
||||
/// Ws server address to listen on
|
||||
#[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub ws_addr: IpAddr,
|
||||
|
||||
/// Ws server port to listen on
|
||||
#[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)]
|
||||
pub ws_port: u16,
|
||||
|
||||
/// Origins from which to accept WebSocket requests
|
||||
#[arg(long = "ws.origins", name = "ws.origins")]
|
||||
pub ws_allowed_origins: Option<String>,
|
||||
|
||||
/// Rpc Modules to be configured for the WS server
|
||||
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub ws_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Disable the IPC-RPC server
|
||||
#[arg(long)]
|
||||
pub ipcdisable: bool,
|
||||
|
||||
/// Filename for IPC socket/pipe within the datadir
|
||||
#[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())]
|
||||
pub ipcpath: String,
|
||||
|
||||
/// Auth server address to listen on
|
||||
#[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub auth_addr: IpAddr,
|
||||
|
||||
/// Auth server port to listen on
|
||||
#[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)]
|
||||
pub auth_port: u16,
|
||||
|
||||
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
|
||||
///
|
||||
/// This will enforce JWT authentication for all requests coming from the consensus layer.
|
||||
///
|
||||
/// If no path is provided, a secret will be generated and stored in the datadir under
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
|
||||
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)]
|
||||
pub auth_jwtsecret: Option<PathBuf>,
|
||||
|
||||
/// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and
|
||||
/// `--ws.api`.
|
||||
///
|
||||
/// This is __not__ used for the authenticated engine-API RPC server, see
|
||||
/// `--authrpc.jwtsecret`.
|
||||
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)]
|
||||
pub rpc_jwtsecret: Option<JwtSecret>,
|
||||
|
||||
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())]
|
||||
pub rpc_max_request_size: MaxU32,
|
||||
|
||||
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long, visible_alias = "--rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())]
|
||||
pub rpc_max_response_size: MaxU32,
|
||||
|
||||
/// Set the the maximum concurrent subscriptions per connection.
|
||||
#[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())]
|
||||
pub rpc_max_subscriptions_per_connection: MaxU32,
|
||||
|
||||
/// Maximum number of RPC server connections.
|
||||
#[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())]
|
||||
pub rpc_max_connections: MaxU32,
|
||||
|
||||
/// Maximum number of concurrent tracing requests.
|
||||
#[arg(long, value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_TRACING_REQUESTS)]
|
||||
pub rpc_max_tracing_requests: u32,
|
||||
|
||||
/// Maximum number of blocks that could be scanned per filter request. (0 = entire chain)
|
||||
#[arg(long, value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))]
|
||||
pub rpc_max_blocks_per_filter: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum number of logs that can be returned in a single response. (0 = no limit)
|
||||
#[arg(long, value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))]
|
||||
pub rpc_max_logs_per_response: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
|
||||
#[arg(
|
||||
long,
|
||||
alias = "rpc.gascap",
|
||||
value_name = "GAS_CAP",
|
||||
value_parser = RangedU64ValueParser::<u64>::new().range(1..),
|
||||
default_value_t = RPC_DEFAULT_GAS_CAP.into()
|
||||
)]
|
||||
pub rpc_gas_cap: u64,
|
||||
|
||||
/// State cache configuration.
|
||||
#[clap(flatten)]
|
||||
pub rpc_state_cache: RpcStateCacheArgs,
|
||||
|
||||
/// Gas price oracle configuration.
|
||||
#[clap(flatten)]
|
||||
pub gas_price_oracle: GasPriceOracleArgs,
|
||||
}
|
||||
|
||||
impl RpcServerArgs {
|
||||
/// Enables the HTTP-RPC server.
|
||||
pub fn with_http(mut self) -> Self {
|
||||
self.http = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables the WS-RPC server.
|
||||
pub fn with_ws(mut self) -> Self {
|
||||
self.ws = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Change rpc port numbers based on the instance number.
|
||||
/// * The `auth_port` is scaled by a factor of `instance * 100`
|
||||
/// * The `http_port` is scaled by a factor of `-instance`
|
||||
/// * The `ws_port` is scaled by a factor of `instance * 2`
|
||||
/// * The `ipcpath` is appended with the instance number: `/tmp/reth.ipc-<instance>`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
///
|
||||
/// This will also panic in debug mode if either:
|
||||
/// * `instance` is greater than `655` (scaling would overflow `u16`)
|
||||
/// * `self.auth_port / 100 + (instance - 1)` would overflow `u16`
|
||||
///
|
||||
/// In release mode, this will silently wrap around.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
// auth port is scaled by a factor of instance * 100
|
||||
self.auth_port += instance * 100 - 100;
|
||||
// http port is scaled by a factor of -instance
|
||||
self.http_port -= instance - 1;
|
||||
// ws port is scaled by a factor of instance * 2
|
||||
self.ws_port += instance * 2 - 2;
|
||||
|
||||
// also adjust the ipc path by appending the instance number to the path used for the
|
||||
// endpoint
|
||||
self.ipcpath = format!("{}-{}", self.ipcpath, instance);
|
||||
}
|
||||
|
||||
/// Configures and launches _all_ servers.
|
||||
///
|
||||
/// Returns the handles for the launched regular RPC server(s) (if any) and the server handle
|
||||
/// for the auth server that handles the `engine_` API that's accessed by the consensus
|
||||
/// layer.
|
||||
pub async fn start_servers<Reth, Engine, Conf, EngineT: EngineTypes>(
|
||||
&self,
|
||||
components: &Reth,
|
||||
engine_api: Engine,
|
||||
jwt_secret: JwtSecret,
|
||||
conf: &mut Conf,
|
||||
) -> eyre::Result<RethRpcServerHandles>
|
||||
where
|
||||
Reth: RethNodeComponents,
|
||||
Engine: EngineApiServer<EngineT>,
|
||||
Conf: RethNodeCommandConfig,
|
||||
{
|
||||
let auth_config = self.auth_server_config(jwt_secret)?;
|
||||
|
||||
let module_config = self.transport_rpc_module_config();
|
||||
debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config");
|
||||
|
||||
let (mut modules, mut auth_module, mut registry) = RpcModuleBuilder::default()
|
||||
.with_provider(components.provider())
|
||||
.with_pool(components.pool())
|
||||
.with_network(components.network())
|
||||
.with_events(components.events())
|
||||
.with_executor(components.task_executor())
|
||||
.build_with_auth_server(module_config, engine_api);
|
||||
|
||||
let rpc_components = RethRpcComponents {
|
||||
registry: &mut registry,
|
||||
modules: &mut modules,
|
||||
auth_module: &mut auth_module,
|
||||
};
|
||||
// apply configured customization
|
||||
conf.extend_rpc_modules(self, components, rpc_components)?;
|
||||
|
||||
let server_config = self.rpc_server_config();
|
||||
let launch_rpc = modules.clone().start_server(server_config).map_ok(|handle| {
|
||||
if let Some(url) = handle.ipc_endpoint() {
|
||||
info!(target: "reth::cli", url=%url, "RPC IPC server started");
|
||||
}
|
||||
if let Some(addr) = handle.http_local_addr() {
|
||||
info!(target: "reth::cli", url=%addr, "RPC HTTP server started");
|
||||
}
|
||||
if let Some(addr) = handle.ws_local_addr() {
|
||||
info!(target: "reth::cli", url=%addr, "RPC WS server started");
|
||||
}
|
||||
handle
|
||||
});
|
||||
|
||||
let launch_auth = auth_module.clone().start_server(auth_config).map_ok(|handle| {
|
||||
let addr = handle.local_addr();
|
||||
info!(target: "reth::cli", url=%addr, "RPC auth server started");
|
||||
handle
|
||||
});
|
||||
|
||||
// launch servers concurrently
|
||||
let (rpc, auth) = futures::future::try_join(launch_rpc, launch_auth).await?;
|
||||
let handles = RethRpcServerHandles { rpc, auth };
|
||||
|
||||
// call hook
|
||||
let rpc_components = RethRpcComponents {
|
||||
registry: &mut registry,
|
||||
modules: &mut modules,
|
||||
auth_module: &mut auth_module,
|
||||
};
|
||||
conf.on_rpc_server_started(self, components, rpc_components, handles.clone())?;
|
||||
|
||||
Ok(handles)
|
||||
}
|
||||
|
||||
/// Convenience function for starting a rpc server with configs which extracted from cli args.
|
||||
pub async fn start_rpc_server<Provider, Pool, Network, Tasks, Events>(
|
||||
&self,
|
||||
provider: Provider,
|
||||
pool: Pool,
|
||||
network: Network,
|
||||
executor: Tasks,
|
||||
events: Events,
|
||||
) -> Result<RpcServerHandle, RpcError>
|
||||
where
|
||||
Provider: BlockReaderIdExt
|
||||
+ AccountReader
|
||||
+ HeaderProvider
|
||||
+ StateProviderFactory
|
||||
+ EvmEnvProvider
|
||||
+ ChainSpecProvider
|
||||
+ ChangeSetReader
|
||||
+ Clone
|
||||
+ Unpin
|
||||
+ 'static,
|
||||
Pool: TransactionPool + Clone + 'static,
|
||||
Network: NetworkInfo + Peers + Clone + 'static,
|
||||
Tasks: TaskSpawner + Clone + 'static,
|
||||
Events: CanonStateSubscriptions + Clone + 'static,
|
||||
{
|
||||
reth_rpc_builder::launch(
|
||||
provider,
|
||||
pool,
|
||||
network,
|
||||
self.transport_rpc_module_config(),
|
||||
self.rpc_server_config(),
|
||||
executor,
|
||||
events,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Create Engine API server.
|
||||
pub async fn start_auth_server<Provider, Pool, Network, Tasks, EngineT>(
|
||||
&self,
|
||||
provider: Provider,
|
||||
pool: Pool,
|
||||
network: Network,
|
||||
executor: Tasks,
|
||||
engine_api: EngineApi<Provider, EngineT>,
|
||||
jwt_secret: JwtSecret,
|
||||
) -> Result<AuthServerHandle, RpcError>
|
||||
where
|
||||
Provider: BlockReaderIdExt
|
||||
+ ChainSpecProvider
|
||||
+ EvmEnvProvider
|
||||
+ HeaderProvider
|
||||
+ StateProviderFactory
|
||||
+ Clone
|
||||
+ Unpin
|
||||
+ 'static,
|
||||
Pool: TransactionPool + Clone + 'static,
|
||||
Network: NetworkInfo + Peers + Clone + 'static,
|
||||
Tasks: TaskSpawner + Clone + 'static,
|
||||
EngineT: EngineTypes + 'static,
|
||||
{
|
||||
let socket_address = SocketAddr::new(self.auth_addr, self.auth_port);
|
||||
|
||||
reth_rpc_builder::auth::launch(
|
||||
provider,
|
||||
pool,
|
||||
network,
|
||||
executor,
|
||||
engine_api,
|
||||
socket_address,
|
||||
jwt_secret,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl RethRpcConfig for RpcServerArgs {
|
||||
fn is_ipc_enabled(&self) -> bool {
|
||||
// By default IPC is enabled therefor it is enabled if the `ipcdisable` is false.
|
||||
!self.ipcdisable
|
||||
}
|
||||
|
||||
fn ipc_path(&self) -> &str {
|
||||
self.ipcpath.as_str()
|
||||
}
|
||||
|
||||
fn eth_config(&self) -> EthConfig {
|
||||
EthConfig::default()
|
||||
.max_tracing_requests(self.rpc_max_tracing_requests)
|
||||
.max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max())
|
||||
.max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize)
|
||||
.rpc_gas_cap(self.rpc_gas_cap)
|
||||
.state_cache(self.state_cache_config())
|
||||
.gpo_config(self.gas_price_oracle_config())
|
||||
}
|
||||
|
||||
fn state_cache_config(&self) -> EthStateCacheConfig {
|
||||
EthStateCacheConfig {
|
||||
max_blocks: self.rpc_state_cache.max_blocks,
|
||||
max_receipts: self.rpc_state_cache.max_receipts,
|
||||
max_envs: self.rpc_state_cache.max_envs,
|
||||
max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests,
|
||||
}
|
||||
}
|
||||
|
||||
fn rpc_max_request_size_bytes(&self) -> u32 {
|
||||
self.rpc_max_request_size.get().saturating_mul(1024 * 1024)
|
||||
}
|
||||
|
||||
fn rpc_max_response_size_bytes(&self) -> u32 {
|
||||
self.rpc_max_response_size.get().saturating_mul(1024 * 1024)
|
||||
}
|
||||
|
||||
fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
|
||||
self.gas_price_oracle.gas_price_oracle_config()
|
||||
}
|
||||
|
||||
fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig {
|
||||
let mut config = TransportRpcModuleConfig::default()
|
||||
.with_config(RpcModuleConfig::new(self.eth_config()));
|
||||
|
||||
if self.http {
|
||||
config = config.with_http(
|
||||
self.http_api
|
||||
.clone()
|
||||
.unwrap_or_else(|| RpcModuleSelection::standard_modules().into()),
|
||||
);
|
||||
}
|
||||
|
||||
if self.ws {
|
||||
config = config.with_ws(
|
||||
self.ws_api
|
||||
.clone()
|
||||
.unwrap_or_else(|| RpcModuleSelection::standard_modules().into()),
|
||||
);
|
||||
}
|
||||
|
||||
if self.is_ipc_enabled() {
|
||||
config = config.with_ipc(RpcModuleSelection::default_ipc_modules());
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn http_ws_server_builder(&self) -> ServerBuilder {
|
||||
ServerBuilder::new()
|
||||
.max_connections(self.rpc_max_connections.get())
|
||||
.max_request_body_size(self.rpc_max_request_size_bytes())
|
||||
.max_response_body_size(self.rpc_max_response_size_bytes())
|
||||
.max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get())
|
||||
}
|
||||
|
||||
fn ipc_server_builder(&self) -> IpcServerBuilder {
|
||||
IpcServerBuilder::default()
|
||||
.max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get())
|
||||
.max_request_body_size(self.rpc_max_request_size_bytes())
|
||||
.max_response_body_size(self.rpc_max_response_size_bytes())
|
||||
.max_connections(self.rpc_max_connections.get())
|
||||
}
|
||||
|
||||
fn rpc_server_config(&self) -> RpcServerConfig {
|
||||
let mut config = RpcServerConfig::default().with_jwt_secret(self.rpc_secret_key());
|
||||
|
||||
if self.http {
|
||||
let socket_address = SocketAddr::new(self.http_addr, self.http_port);
|
||||
config = config
|
||||
.with_http_address(socket_address)
|
||||
.with_http(self.http_ws_server_builder())
|
||||
.with_http_cors(self.http_corsdomain.clone())
|
||||
.with_ws_cors(self.ws_allowed_origins.clone());
|
||||
}
|
||||
|
||||
if self.ws {
|
||||
let socket_address = SocketAddr::new(self.ws_addr, self.ws_port);
|
||||
config = config.with_ws_address(socket_address).with_ws(self.http_ws_server_builder());
|
||||
}
|
||||
|
||||
if self.is_ipc_enabled() {
|
||||
config =
|
||||
config.with_ipc(self.ipc_server_builder()).with_ipc_endpoint(self.ipcpath.clone());
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result<AuthServerConfig, RpcError> {
|
||||
let address = SocketAddr::new(self.auth_addr, self.auth_port);
|
||||
|
||||
Ok(AuthServerConfig::builder(jwt_secret).socket_addr(address).build())
|
||||
}
|
||||
|
||||
fn auth_jwt_secret(&self, default_jwt_path: PathBuf) -> Result<JwtSecret, JwtError> {
|
||||
match self.auth_jwtsecret.as_ref() {
|
||||
Some(fpath) => {
|
||||
debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file");
|
||||
JwtSecret::from_file(fpath)
|
||||
}
|
||||
None => get_or_create_jwt_secret_from_path(&default_jwt_path),
|
||||
}
|
||||
}
|
||||
|
||||
fn rpc_secret_key(&self) -> Option<JwtSecret> {
|
||||
self.rpc_jwtsecret.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RpcServerArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
http: false,
|
||||
http_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
http_port: constants::DEFAULT_HTTP_RPC_PORT,
|
||||
http_api: None,
|
||||
http_corsdomain: None,
|
||||
ws: false,
|
||||
ws_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
ws_port: constants::DEFAULT_WS_RPC_PORT,
|
||||
ws_allowed_origins: None,
|
||||
ws_api: None,
|
||||
ipcdisable: false,
|
||||
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
|
||||
auth_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
auth_port: constants::DEFAULT_AUTH_PORT,
|
||||
auth_jwtsecret: None,
|
||||
rpc_jwtsecret: None,
|
||||
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
|
||||
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
|
||||
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
|
||||
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
|
||||
rpc_max_tracing_requests: constants::DEFAULT_MAX_TRACING_REQUESTS,
|
||||
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
|
||||
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
|
||||
rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(),
|
||||
gas_price_oracle: GasPriceOracleArgs::default(),
|
||||
rpc_state_cache: RpcStateCacheArgs::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// clap value parser for [RpcModuleSelection].
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct RpcModuleSelectionValueParser;
|
||||
|
||||
impl TypedValueParser for RpcModuleSelectionValueParser {
|
||||
type Value = RpcModuleSelection;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
val.parse::<RpcModuleSelection>().map_err(|err| {
|
||||
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
|
||||
let possible_values = RethRpcModule::all_variants().to_vec().join(",");
|
||||
let msg = format!(
|
||||
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
|
||||
);
|
||||
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
|
||||
})
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
let values = RethRpcModule::all_variants().iter().map(PossibleValue::new);
|
||||
Some(Box::new(values))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
use reth_rpc_builder::RpcModuleSelection::Selection;
|
||||
use std::net::SocketAddrV4;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_gas_cap() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
|
||||
let config = args.eth_config();
|
||||
assert_eq!(config.rpc_gas_cap, Into::<u64>::into(RPC_DEFAULT_GAS_CAP));
|
||||
|
||||
let args =
|
||||
CommandParser::<RpcServerArgs>::parse_from(["reth", "--rpc.gascap", "1000"]).args;
|
||||
let config = args.eth_config();
|
||||
assert_eq!(config.rpc_gas_cap, 1000);
|
||||
|
||||
let args = CommandParser::<RpcServerArgs>::try_parse_from(["reth", "--rpc.gascap", "0"]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser() {
|
||||
let args =
|
||||
CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "eth,admin,debug"])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = RpcModuleSelection::try_from_selection(["eth", "admin", "debug"]).unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_eth_call_bundle_args() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
"eth,admin,debug,eth-call-bundle",
|
||||
])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected =
|
||||
RpcModuleSelection::try_from_selection(["eth", "admin", "debug", "eth-call-bundle"])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser_none() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "none"]).args;
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = Selection(vec![]);
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transport_rpc_module_config() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
"eth,admin,debug",
|
||||
"--http",
|
||||
"--ws",
|
||||
])
|
||||
.args;
|
||||
let config = args.transport_rpc_module_config();
|
||||
let expected = vec![RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
|
||||
assert_eq!(config.http().cloned().unwrap().into_selection(), expected);
|
||||
assert_eq!(
|
||||
config.ws().cloned().unwrap().into_selection(),
|
||||
RpcModuleSelection::standard_modules()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transport_rpc_module_trim_config() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
" eth, admin, debug",
|
||||
"--http",
|
||||
"--ws",
|
||||
])
|
||||
.args;
|
||||
let config = args.transport_rpc_module_config();
|
||||
let expected = vec![RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
|
||||
assert_eq!(config.http().cloned().unwrap().into_selection(), expected);
|
||||
assert_eq!(
|
||||
config.ws().cloned().unwrap().into_selection(),
|
||||
RpcModuleSelection::standard_modules()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unique_rpc_modules() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
" eth, admin, debug, eth,admin",
|
||||
"--http",
|
||||
"--ws",
|
||||
])
|
||||
.args;
|
||||
let config = args.transport_rpc_module_config();
|
||||
let expected = vec![RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
|
||||
assert_eq!(config.http().cloned().unwrap().into_selection(), expected);
|
||||
assert_eq!(
|
||||
config.ws().cloned().unwrap().into_selection(),
|
||||
RpcModuleSelection::standard_modules()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_config() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
"eth,admin,debug",
|
||||
"--http",
|
||||
"--ws",
|
||||
"--ws.addr",
|
||||
"127.0.0.1",
|
||||
"--ws.port",
|
||||
"8888",
|
||||
])
|
||||
.args;
|
||||
let config = args.rpc_server_config();
|
||||
assert_eq!(
|
||||
config.http_address().unwrap(),
|
||||
SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::LOCALHOST,
|
||||
constants::DEFAULT_HTTP_RPC_PORT
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
config.ws_address().unwrap(),
|
||||
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8888))
|
||||
);
|
||||
assert_eq!(config.ipc_endpoint().unwrap().path(), constants::DEFAULT_IPC_ENDPOINT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_filter_limits() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--rpc-max-blocks-per-filter",
|
||||
"0",
|
||||
"--rpc-max-logs-per-response",
|
||||
"0",
|
||||
])
|
||||
.args;
|
||||
|
||||
let config = args.eth_config().filter_config();
|
||||
assert_eq!(config.max_blocks_per_filter, Some(u64::MAX));
|
||||
assert_eq!(config.max_logs_per_response, Some(usize::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_filter_limits() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--rpc-max-blocks-per-filter",
|
||||
"100",
|
||||
"--rpc-max-logs-per-response",
|
||||
"200",
|
||||
])
|
||||
.args;
|
||||
|
||||
let config = args.eth_config().filter_config();
|
||||
assert_eq!(config.max_blocks_per_filter, Some(100));
|
||||
assert_eq!(config.max_logs_per_response, Some(200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_server_args_default_sanity_test() {
|
||||
let default_args = RpcServerArgs::default();
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
50
crates/node-core/src/args/rpc_state_cache_args.rs
Normal file
50
crates/node-core/src/args/rpc_state_cache_args.rs
Normal file
@ -0,0 +1,50 @@
|
||||
use clap::Args;
|
||||
use reth_rpc::eth::cache::{
|
||||
DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
};
|
||||
|
||||
/// Parameters to configure RPC state cache.
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[clap(next_help_heading = "RPC State Cache")]
|
||||
|
||||
pub struct RpcStateCacheArgs {
|
||||
/// Max number of blocks in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-blocks",
|
||||
default_value_t = DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_blocks: u32,
|
||||
|
||||
/// Max number receipts in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-receipts",
|
||||
default_value_t = DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_receipts: u32,
|
||||
|
||||
/// Max number of bytes for cached env data.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-envs",
|
||||
default_value_t = DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_envs: u32,
|
||||
|
||||
/// Max number of concurrent database requests.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-concurrent-db-requests",
|
||||
default_value_t = DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
)]
|
||||
pub max_concurrent_db_requests: usize,
|
||||
}
|
||||
|
||||
impl Default for RpcStateCacheArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
max_envs: DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
}
|
||||
}
|
||||
}
|
||||
59
crates/node-core/src/args/secret_key.rs
Normal file
59
crates/node-core/src/args/secret_key.rs
Normal file
@ -0,0 +1,59 @@
|
||||
use reth_network::config::rng_secret_key;
|
||||
use reth_primitives::{fs, fs::FsPathError, hex::encode as hex_encode};
|
||||
use secp256k1::{Error as SecretKeyBaseError, SecretKey};
|
||||
use std::{
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors returned by loading a [`SecretKey`], including IO errors.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SecretKeyError {
|
||||
/// Error encountered during decoding of the secret key.
|
||||
#[error(transparent)]
|
||||
SecretKeyDecodeError(#[from] SecretKeyBaseError),
|
||||
|
||||
/// Error related to file system path operations.
|
||||
#[error(transparent)]
|
||||
SecretKeyFsPathError(#[from] FsPathError),
|
||||
|
||||
/// Represents an error when failed to access the key file.
|
||||
#[error("failed to access key file {secret_file:?}: {error}")]
|
||||
FailedToAccessKeyFile {
|
||||
/// The encountered IO error.
|
||||
error: io::Error,
|
||||
/// Path to the secret key file.
|
||||
secret_file: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
|
||||
/// generates a secret key and stores it in the provided path. I/O errors might occur during write
|
||||
/// operations in the form of a [`SecretKeyError`]
|
||||
pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyError> {
|
||||
let exists = secret_key_path.try_exists();
|
||||
|
||||
match exists {
|
||||
Ok(true) => {
|
||||
let contents = fs::read_to_string(secret_key_path)?;
|
||||
Ok((contents.as_str().parse::<SecretKey>())
|
||||
.map_err(SecretKeyError::SecretKeyDecodeError)?)
|
||||
}
|
||||
Ok(false) => {
|
||||
if let Some(dir) = secret_key_path.parent() {
|
||||
// Create parent directory
|
||||
fs::create_dir_all(dir)?;
|
||||
}
|
||||
|
||||
let secret = rng_secret_key();
|
||||
let hex = hex_encode(secret.as_ref());
|
||||
fs::write(secret_key_path, hex)?;
|
||||
Ok(secret)
|
||||
}
|
||||
Err(error) => Err(SecretKeyError::FailedToAccessKeyFile {
|
||||
error,
|
||||
secret_file: secret_key_path.to_path_buf(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
56
crates/node-core/src/args/stage_args.rs
Normal file
56
crates/node-core/src/args/stage_args.rs
Normal file
@ -0,0 +1,56 @@
|
||||
//! Shared arguments related to stages
|
||||
|
||||
/// Represents a specific stage within the data pipeline.
|
||||
///
|
||||
/// Different stages within the pipeline have dedicated functionalities and operations.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, clap::ValueEnum)]
|
||||
pub enum StageEnum {
|
||||
/// The headers stage within the pipeline.
|
||||
///
|
||||
/// This stage handles operations related to block headers.
|
||||
Headers,
|
||||
/// The bodies stage within the pipeline.
|
||||
///
|
||||
/// This stage deals with block bodies and their associated data.
|
||||
Bodies,
|
||||
/// The senders stage within the pipeline.
|
||||
///
|
||||
/// Responsible for sender-related processes and data recovery.
|
||||
Senders,
|
||||
/// The execution stage within the pipeline.
|
||||
///
|
||||
/// Handles the execution of transactions and contracts.
|
||||
Execution,
|
||||
/// The account hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing account data.
|
||||
AccountHashing,
|
||||
/// The storage hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing storage data.
|
||||
StorageHashing,
|
||||
/// The hashing stage within the pipeline.
|
||||
///
|
||||
/// Covers general data hashing operations.
|
||||
Hashing,
|
||||
/// The Merkle stage within the pipeline.
|
||||
///
|
||||
/// Handles Merkle tree-related computations and data processing.
|
||||
Merkle,
|
||||
/// The transaction lookup stage within the pipeline.
|
||||
///
|
||||
/// Deals with the retrieval and processing of transactions.
|
||||
TxLookup,
|
||||
/// The account history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to accounts.
|
||||
AccountHistory,
|
||||
/// The storage history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to storage.
|
||||
StorageHistory,
|
||||
/// The total difficulty stage within the pipeline.
|
||||
///
|
||||
/// Handles computations and data related to total difficulty.
|
||||
TotalDifficulty,
|
||||
}
|
||||
124
crates/node-core/src/args/txpool_args.rs
Normal file
124
crates/node-core/src/args/txpool_args.rs
Normal file
@ -0,0 +1,124 @@
|
||||
//! Transaction pool arguments
|
||||
|
||||
use crate::cli::config::RethTransactionPoolConfig;
|
||||
use clap::Args;
|
||||
use reth_primitives::Address;
|
||||
use reth_transaction_pool::{
|
||||
LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP,
|
||||
REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
};
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Args, PartialEq)]
|
||||
#[clap(next_help_heading = "TxPool")]
|
||||
pub struct TxPoolArgs {
|
||||
/// Max number of transaction in the pending sub-pool.
|
||||
#[arg(long = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub pending_max_count: usize,
|
||||
/// Max size of the pending sub-pool in megabytes.
|
||||
#[arg(long = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub pending_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the basefee sub-pool
|
||||
#[arg(long = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub basefee_max_count: usize,
|
||||
/// Max size of the basefee sub-pool in megabytes.
|
||||
#[arg(long = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub basefee_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the queued sub-pool
|
||||
#[arg(long = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub queued_max_count: usize,
|
||||
/// Max size of the queued sub-pool in megabytes.
|
||||
#[arg(long = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub queued_max_size: usize,
|
||||
|
||||
/// Max number of executable transaction slots guaranteed per account
|
||||
#[arg(long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)]
|
||||
pub max_account_slots: usize,
|
||||
|
||||
/// Price bump (in %) for the transaction pool underpriced check.
|
||||
#[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)]
|
||||
pub price_bump: u128,
|
||||
|
||||
/// Price bump percentage to replace an already existing blob transaction
|
||||
#[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)]
|
||||
pub blob_transaction_price_bump: u128,
|
||||
/// Flag to disable local transaction exemptions.
|
||||
#[arg(long = "txpool.nolocals")]
|
||||
pub no_locals: bool,
|
||||
/// Flag to allow certain addresses as local
|
||||
#[arg(long = "txpool.locals")]
|
||||
pub locals: Vec<Address>,
|
||||
}
|
||||
|
||||
impl Default for TxPoolArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
price_bump: DEFAULT_PRICE_BUMP,
|
||||
blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP,
|
||||
no_locals: false,
|
||||
locals: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RethTransactionPoolConfig for TxPoolArgs {
|
||||
/// Returns transaction pool configuration.
|
||||
fn pool_config(&self) -> PoolConfig {
|
||||
PoolConfig {
|
||||
local_transactions_config: LocalTransactionConfig {
|
||||
no_exemptions: self.no_locals,
|
||||
local_addresses: self.locals.clone().into_iter().collect(),
|
||||
},
|
||||
pending_limit: SubPoolLimit {
|
||||
max_txs: self.pending_max_count,
|
||||
max_size: self.pending_max_size * 1024 * 1024,
|
||||
},
|
||||
basefee_limit: SubPoolLimit {
|
||||
max_txs: self.basefee_max_count,
|
||||
max_size: self.basefee_max_size * 1024 * 1024,
|
||||
},
|
||||
queued_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
blob_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
max_account_slots: self.max_account_slots,
|
||||
price_bumps: PriceBumpConfig {
|
||||
default_price_bump: self.price_bump,
|
||||
replace_blob_tx_price_bump: self.blob_transaction_price_bump,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[clap(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn txpool_args_default_sanity_test() {
|
||||
let default_args = TxPoolArgs::default();
|
||||
let args = CommandParser::<TxPoolArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
120
crates/node-core/src/args/types.rs
Normal file
120
crates/node-core/src/args/types.rs
Normal file
@ -0,0 +1,120 @@
|
||||
//! Additional helper types for CLI parsing.
|
||||
|
||||
use std::{fmt, num::ParseIntError, str::FromStr};
|
||||
|
||||
/// A macro that generates types that maps "0" to "None" when parsing CLI arguments.
|
||||
|
||||
macro_rules! zero_as_none {
|
||||
($type_name:ident, $inner_type:ty) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type that maps `0` to `None` when parsing CLI arguments.
|
||||
pub struct $type_name(pub Option<$inner_type>);
|
||||
|
||||
impl $type_name {
|
||||
/// Returns the inner value.
|
||||
pub const fn new(value: $inner_type) -> Self {
|
||||
Self(Some(value))
|
||||
}
|
||||
|
||||
/// Returns the inner value or `$inner_type::MAX` if `None`.
|
||||
pub fn unwrap_or_max(self) -> $inner_type {
|
||||
self.0.unwrap_or(<$inner_type>::MAX)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for $type_name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.0 {
|
||||
Some(value) => write!(f, "{}", value),
|
||||
None => write!(f, "0"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$inner_type> for $type_name {
|
||||
#[inline]
|
||||
fn from(value: $inner_type) -> Self {
|
||||
Self(if value == 0 { None } else { Some(value) })
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for $type_name {
|
||||
type Err = std::num::ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let value = s.parse::<$inner_type>()?;
|
||||
Ok(Self::from(value))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
zero_as_none!(ZeroAsNoneU64, u64);
|
||||
zero_as_none!(ZeroAsNoneU32, u32);
|
||||
|
||||
/// A macro that generates types that map "max" to "MAX" when parsing CLI arguments.
|
||||
macro_rules! max_values {
|
||||
($name:ident, $ty:ident) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type for parsing "max" as the maximum value of the specified type.
|
||||
|
||||
pub struct $name(pub $ty);
|
||||
|
||||
impl $name {
|
||||
/// Returns the inner value.
|
||||
pub const fn get(&self) -> $ty {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$ty> for $name {
|
||||
#[inline]
|
||||
fn from(value: $ty) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for $name {
|
||||
type Err = ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s.eq_ignore_ascii_case("max") {
|
||||
Ok($name(<$ty>::MAX))
|
||||
} else {
|
||||
s.parse::<$ty>().map($name)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
max_values!(MaxU32, u32);
|
||||
max_values!(MaxU64, u64);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_zero_parse() {
|
||||
let val = "0".parse::<ZeroAsNoneU64>().unwrap();
|
||||
assert_eq!(val, ZeroAsNoneU64(None));
|
||||
assert_eq!(val.unwrap_or_max(), u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u64() {
|
||||
let original = 1u64;
|
||||
let expected = ZeroAsNoneU64(Some(1u64));
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
|
||||
let original = 0u64;
|
||||
let expected = ZeroAsNoneU64(None);
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
}
|
||||
}
|
||||
292
crates/node-core/src/args/utils.rs
Normal file
292
crates/node-core/src/args/utils.rs
Normal file
@ -0,0 +1,292 @@
|
||||
//! Clap parser utilities
|
||||
|
||||
use reth_primitives::{fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, B256};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
use reth_primitives::{BASE_GOERLI, BASE_MAINNET, BASE_SEPOLIA};
|
||||
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
use reth_primitives::{DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA};
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
/// Chains supported by op-reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["base", "base-goerli", "base-sepolia"];
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
/// Chains supported by reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"];
|
||||
|
||||
/// Helper to parse a [Duration] from seconds
|
||||
pub fn parse_duration_from_secs(arg: &str) -> eyre::Result<Duration, std::num::ParseIntError> {
|
||||
let seconds = arg.parse()?;
|
||||
Ok(Duration::from_secs(seconds))
|
||||
}
|
||||
|
||||
/// Clap value parser for [ChainSpec]s that takes either a built-in chainspec or the path
|
||||
/// to a custom one.
|
||||
pub fn chain_spec_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> {
|
||||
Ok(match s {
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"mainnet" => MAINNET.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"goerli" => GOERLI.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"sepolia" => SEPOLIA.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"holesky" => HOLESKY.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"dev" => DEV.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_goerli" | "base-goerli" => BASE_GOERLI.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base" => BASE_MAINNET.clone(),
|
||||
_ => {
|
||||
let raw = fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?;
|
||||
serde_json::from_str(&raw)?
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// The help info for the --chain flag
|
||||
pub fn chain_help() -> String {
|
||||
format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", "))
|
||||
}
|
||||
|
||||
/// Clap value parser for [ChainSpec]s.
|
||||
///
|
||||
/// The value parser matches either a known chain, the path
|
||||
/// to a json file, or a json formatted string in-memory. The json can be either
|
||||
/// a serialized [ChainSpec] or Genesis struct.
|
||||
pub fn genesis_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> {
|
||||
Ok(match s {
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"mainnet" => MAINNET.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"goerli" => GOERLI.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"sepolia" => SEPOLIA.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"holesky" => HOLESKY.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"dev" => DEV.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_goerli" | "base-goerli" => BASE_GOERLI.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base" => BASE_MAINNET.clone(),
|
||||
_ => {
|
||||
// try to read json from path first
|
||||
let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) {
|
||||
Ok(raw) => raw,
|
||||
Err(io_err) => {
|
||||
// valid json may start with "\n", but must contain "{"
|
||||
if s.contains('{') {
|
||||
s.to_string()
|
||||
} else {
|
||||
return Err(io_err.into()) // assume invalid path
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// both serialized Genesis and ChainSpec structs supported
|
||||
let genesis: AllGenesisFormats = serde_json::from_str(&raw)?;
|
||||
|
||||
Arc::new(genesis.into())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse [BlockHashOrNumber]
|
||||
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
|
||||
match B256::from_str(value) {
|
||||
Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)),
|
||||
Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Error thrown while parsing a socket address.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum SocketAddressParsingError {
|
||||
/// Failed to convert the string into a socket addr
|
||||
#[error("could not parse socket address: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
/// Input must not be empty
|
||||
#[error("cannot parse socket address from empty string")]
|
||||
Empty,
|
||||
/// Failed to parse the address
|
||||
#[error("could not parse socket address from {0}")]
|
||||
Parse(String),
|
||||
/// Failed to parse port
|
||||
#[error("could not parse port: {0}")]
|
||||
Port(#[from] std::num::ParseIntError),
|
||||
}
|
||||
|
||||
/// Parse a [SocketAddr] from a `str`.
|
||||
///
|
||||
/// The following formats are checked:
|
||||
///
|
||||
/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the
|
||||
/// hostname is set to `localhost`.
|
||||
/// - If the value contains `:` it is assumed to be the format `<host>:<port>`
|
||||
/// - Otherwise it is assumed to be a hostname
|
||||
///
|
||||
/// An error is returned if the value is empty.
|
||||
pub fn parse_socket_address(value: &str) -> eyre::Result<SocketAddr, SocketAddressParsingError> {
|
||||
if value.is_empty() {
|
||||
return Err(SocketAddressParsingError::Empty)
|
||||
}
|
||||
|
||||
if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) {
|
||||
let port: u16 = port.parse()?;
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
if let Ok(port) = value.parse::<u16>() {
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
value
|
||||
.to_socket_addrs()?
|
||||
.next()
|
||||
.ok_or_else(|| SocketAddressParsingError::Parse(value.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use proptest::prelude::Rng;
|
||||
use reth_primitives::{
|
||||
hex, Address, ChainConfig, ChainSpecBuilder, Genesis, GenesisAccount, U256,
|
||||
};
|
||||
use secp256k1::rand::thread_rng;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn parse_known_chain_spec() {
|
||||
for chain in SUPPORTED_CHAINS {
|
||||
chain_spec_value_parser(chain).unwrap();
|
||||
genesis_value_parser(chain).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_chain_spec_from_memory() {
|
||||
let custom_genesis_from_json = r#"
|
||||
{
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x653FEE9E",
|
||||
"gasLimit": "0x1388",
|
||||
"difficulty": "0x0",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0x6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b": {
|
||||
"balance": "0x21"
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"config": {
|
||||
"chainId": 2600,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"shanghaiTime": 0
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
let chain_from_json = genesis_value_parser(custom_genesis_from_json).unwrap();
|
||||
|
||||
// using structs
|
||||
let config = ChainConfig {
|
||||
chain_id: 2600,
|
||||
homestead_block: Some(0),
|
||||
eip150_block: Some(0),
|
||||
eip155_block: Some(0),
|
||||
eip158_block: Some(0),
|
||||
byzantium_block: Some(0),
|
||||
constantinople_block: Some(0),
|
||||
petersburg_block: Some(0),
|
||||
istanbul_block: Some(0),
|
||||
berlin_block: Some(0),
|
||||
london_block: Some(0),
|
||||
shanghai_time: Some(0),
|
||||
terminal_total_difficulty: Some(U256::ZERO),
|
||||
terminal_total_difficulty_passed: true,
|
||||
..Default::default()
|
||||
};
|
||||
let genesis = Genesis {
|
||||
config,
|
||||
nonce: 0,
|
||||
timestamp: 1698688670,
|
||||
gas_limit: 5000,
|
||||
difficulty: U256::ZERO,
|
||||
mix_hash: B256::ZERO,
|
||||
coinbase: Address::ZERO,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// seed accounts after genesis struct created
|
||||
let address = hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b").into();
|
||||
let account = GenesisAccount::default().with_balance(U256::from(33));
|
||||
let genesis = genesis.extend_accounts(HashMap::from([(address, account)]));
|
||||
|
||||
let custom_genesis_from_struct = serde_json::to_string(&genesis).unwrap();
|
||||
let chain_from_struct = genesis_value_parser(&custom_genesis_from_struct).unwrap();
|
||||
assert_eq!(chain_from_json.genesis(), chain_from_struct.genesis());
|
||||
|
||||
// chain spec
|
||||
let chain_spec = ChainSpecBuilder::default()
|
||||
.chain(2600.into())
|
||||
.genesis(genesis)
|
||||
.cancun_activated()
|
||||
.build();
|
||||
|
||||
let chain_spec_json = serde_json::to_string(&chain_spec).unwrap();
|
||||
let custom_genesis_from_spec = genesis_value_parser(&chain_spec_json).unwrap();
|
||||
|
||||
assert_eq!(custom_genesis_from_spec.chain(), chain_from_struct.chain());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_addresses() {
|
||||
for value in ["localhost:9000", ":9000", "9000"] {
|
||||
let socket_addr = parse_socket_address(value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), 9000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_address_random() {
|
||||
let port: u16 = thread_rng().gen();
|
||||
|
||||
for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] {
|
||||
let socket_addr = parse_socket_address(&value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), port);
|
||||
}
|
||||
}
|
||||
}
|
||||
180
crates/node-core/src/cli/components.rs
Normal file
180
crates/node-core/src/cli/components.rs
Normal file
@ -0,0 +1,180 @@
|
||||
//! Components that are used by the node command.
|
||||
|
||||
use reth_network::{NetworkEvents, NetworkProtocols};
|
||||
use reth_network_api::{NetworkInfo, Peers};
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::{
|
||||
AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader,
|
||||
EvmEnvProvider, StateProviderFactory,
|
||||
};
|
||||
use reth_rpc_builder::{
|
||||
auth::{AuthRpcModule, AuthServerHandle},
|
||||
RethModuleRegistry, RpcServerHandle, TransportRpcModules,
|
||||
};
|
||||
use reth_tasks::TaskSpawner;
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Helper trait to unify all provider traits for simplicity.
|
||||
pub trait FullProvider:
|
||||
BlockReaderIdExt
|
||||
+ AccountReader
|
||||
+ StateProviderFactory
|
||||
+ EvmEnvProvider
|
||||
+ ChainSpecProvider
|
||||
+ ChangeSetReader
|
||||
+ Clone
|
||||
+ Unpin
|
||||
+ 'static
|
||||
{
|
||||
}
|
||||
|
||||
impl<T> FullProvider for T where
|
||||
T: BlockReaderIdExt
|
||||
+ AccountReader
|
||||
+ StateProviderFactory
|
||||
+ EvmEnvProvider
|
||||
+ ChainSpecProvider
|
||||
+ ChangeSetReader
|
||||
+ Clone
|
||||
+ Unpin
|
||||
+ 'static
|
||||
{
|
||||
}
|
||||
|
||||
/// The trait that is implemented for the Node command.
|
||||
pub trait RethNodeComponents: Clone + Send + Sync + 'static {
|
||||
/// The Provider type that is provided by the node itself
|
||||
type Provider: FullProvider;
|
||||
/// The transaction pool type
|
||||
type Pool: TransactionPool + Clone + Unpin + 'static;
|
||||
/// The network type used to communicate with p2p.
|
||||
type Network: NetworkInfo + Peers + NetworkProtocols + NetworkEvents + Clone + 'static;
|
||||
/// The events type used to create subscriptions.
|
||||
type Events: CanonStateSubscriptions + Clone + 'static;
|
||||
/// The type that is used to spawn tasks.
|
||||
type Tasks: TaskSpawner + Clone + Unpin + 'static;
|
||||
|
||||
/// Returns the instance of the provider
|
||||
fn provider(&self) -> Self::Provider;
|
||||
|
||||
/// Returns the instance of the task executor.
|
||||
fn task_executor(&self) -> Self::Tasks;
|
||||
|
||||
/// Returns the instance of the transaction pool.
|
||||
fn pool(&self) -> Self::Pool;
|
||||
|
||||
/// Returns the instance of the network API.
|
||||
fn network(&self) -> Self::Network;
|
||||
|
||||
/// Returns the instance of the events subscription handler.
|
||||
fn events(&self) -> Self::Events;
|
||||
|
||||
/// Helper function to return the chain spec.
|
||||
fn chain_spec(&self) -> Arc<ChainSpec> {
|
||||
self.provider().chain_spec()
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper container to encapsulate [RethModuleRegistry],[TransportRpcModules] and [AuthRpcModule].
|
||||
///
|
||||
/// This can be used to access installed modules, or create commonly used handlers like
|
||||
/// [reth_rpc::EthApi], and ultimately merge additional rpc handler into the configured transport
|
||||
/// modules [TransportRpcModules] as well as configured authenticated methods [AuthRpcModule].
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct RethRpcComponents<'a, Reth: RethNodeComponents> {
|
||||
/// A Helper type the holds instances of the configured modules.
|
||||
///
|
||||
/// This provides easy access to rpc handlers, such as [RethModuleRegistry::eth_api].
|
||||
pub registry: &'a mut RethModuleRegistry<
|
||||
Reth::Provider,
|
||||
Reth::Pool,
|
||||
Reth::Network,
|
||||
Reth::Tasks,
|
||||
Reth::Events,
|
||||
>,
|
||||
/// Holds installed modules per transport type.
|
||||
///
|
||||
/// This can be used to merge additional modules into the configured transports (http, ipc,
|
||||
/// ws). See [TransportRpcModules::merge_configured]
|
||||
pub modules: &'a mut TransportRpcModules,
|
||||
/// Holds jwt authenticated rpc module.
|
||||
///
|
||||
/// This can be used to merge additional modules into the configured authenticated methods
|
||||
pub auth_module: &'a mut AuthRpcModule,
|
||||
}
|
||||
|
||||
/// A Generic implementation of the RethNodeComponents trait.
|
||||
///
|
||||
/// Represents components required for the Reth node.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RethNodeComponentsImpl<Provider, Pool, Network, Events, Tasks> {
|
||||
/// Represents the provider instance.
|
||||
pub provider: Provider,
|
||||
/// Represents the transaction pool instance.
|
||||
pub pool: Pool,
|
||||
/// Represents the network instance used for communication.
|
||||
pub network: Network,
|
||||
/// Represents the task executor instance.
|
||||
pub task_executor: Tasks,
|
||||
/// Represents the events subscription handler instance.
|
||||
pub events: Events,
|
||||
}
|
||||
|
||||
impl<Provider, Pool, Network, Events, Tasks> RethNodeComponents
|
||||
for RethNodeComponentsImpl<Provider, Pool, Network, Events, Tasks>
|
||||
where
|
||||
Provider: FullProvider + Clone + 'static,
|
||||
Tasks: TaskSpawner + Clone + Unpin + 'static,
|
||||
Pool: TransactionPool + Clone + Unpin + 'static,
|
||||
Network: NetworkInfo + Peers + NetworkProtocols + NetworkEvents + Clone + 'static,
|
||||
Events: CanonStateSubscriptions + Clone + 'static,
|
||||
{
|
||||
type Provider = Provider;
|
||||
type Pool = Pool;
|
||||
type Network = Network;
|
||||
type Events = Events;
|
||||
type Tasks = Tasks;
|
||||
|
||||
fn provider(&self) -> Self::Provider {
|
||||
self.provider.clone()
|
||||
}
|
||||
|
||||
fn task_executor(&self) -> Self::Tasks {
|
||||
self.task_executor.clone()
|
||||
}
|
||||
|
||||
fn pool(&self) -> Self::Pool {
|
||||
self.pool.clone()
|
||||
}
|
||||
|
||||
fn network(&self) -> Self::Network {
|
||||
self.network.clone()
|
||||
}
|
||||
|
||||
fn events(&self) -> Self::Events {
|
||||
self.events.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the handles to the spawned RPC servers.
|
||||
///
|
||||
/// This can be used to access the endpoints of the servers.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use reth_node_core::{cli::components::RethRpcServerHandles, rpc::api::EthApiClient};
|
||||
/// # async fn t(handles: RethRpcServerHandles) {
|
||||
/// let client = handles.rpc.http_client().expect("http server not started");
|
||||
/// let block_number = client.block_number().await.unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RethRpcServerHandles {
|
||||
/// The regular RPC server handle.
|
||||
pub rpc: RpcServerHandle,
|
||||
/// The handle to the auth server (engine API)
|
||||
pub auth: AuthServerHandle,
|
||||
}
|
||||
147
crates/node-core/src/cli/config.rs
Normal file
147
crates/node-core/src/cli/config.rs
Normal file
@ -0,0 +1,147 @@
|
||||
//! Config traits for various node components.
|
||||
|
||||
use alloy_rlp::Encodable;
|
||||
use reth_network::protocol::IntoRlpxSubProtocol;
|
||||
use reth_primitives::{Bytes, BytesMut};
|
||||
use reth_rpc::{
|
||||
eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig},
|
||||
JwtError, JwtSecret,
|
||||
};
|
||||
use reth_rpc_builder::{
|
||||
auth::AuthServerConfig, error::RpcError, EthConfig, IpcServerBuilder, RpcServerConfig,
|
||||
ServerBuilder, TransportRpcModuleConfig,
|
||||
};
|
||||
use reth_transaction_pool::PoolConfig;
|
||||
use std::{borrow::Cow, path::PathBuf, time::Duration};
|
||||
|
||||
/// A trait that provides a configured RPC server.
|
||||
///
|
||||
/// This provides all basic config values for the RPC server and is implemented by the
|
||||
/// [RpcServerArgs](crate::args::RpcServerArgs) type.
|
||||
pub trait RethRpcConfig {
|
||||
/// Returns whether ipc is enabled.
|
||||
fn is_ipc_enabled(&self) -> bool;
|
||||
|
||||
/// Returns the path to the target ipc socket if enabled.
|
||||
fn ipc_path(&self) -> &str;
|
||||
|
||||
/// The configured ethereum RPC settings.
|
||||
fn eth_config(&self) -> EthConfig;
|
||||
|
||||
/// Returns state cache configuration.
|
||||
fn state_cache_config(&self) -> EthStateCacheConfig;
|
||||
|
||||
/// Returns the max request size in bytes.
|
||||
fn rpc_max_request_size_bytes(&self) -> u32;
|
||||
|
||||
/// Returns the max response size in bytes.
|
||||
fn rpc_max_response_size_bytes(&self) -> u32;
|
||||
|
||||
/// Extracts the gas price oracle config from the args.
|
||||
fn gas_price_oracle_config(&self) -> GasPriceOracleConfig;
|
||||
|
||||
/// Creates the [TransportRpcModuleConfig] from cli args.
|
||||
///
|
||||
/// This sets all the api modules, and configures additional settings like gas price oracle
|
||||
/// settings in the [TransportRpcModuleConfig].
|
||||
fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig;
|
||||
|
||||
/// Returns the default server builder for http/ws
|
||||
fn http_ws_server_builder(&self) -> ServerBuilder;
|
||||
|
||||
/// Returns the default ipc server builder
|
||||
fn ipc_server_builder(&self) -> IpcServerBuilder;
|
||||
|
||||
/// Creates the [RpcServerConfig] from cli args.
|
||||
fn rpc_server_config(&self) -> RpcServerConfig;
|
||||
|
||||
/// Creates the [AuthServerConfig] from cli args.
|
||||
fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result<AuthServerConfig, RpcError>;
|
||||
|
||||
/// The execution layer and consensus layer clients SHOULD accept a configuration parameter:
|
||||
/// jwt-secret, which designates a file containing the hex-encoded 256 bit secret key to be used
|
||||
/// for verifying/generating JWT tokens.
|
||||
///
|
||||
/// If such a parameter is given, but the file cannot be read, or does not contain a hex-encoded
|
||||
/// key of 256 bits, the client SHOULD treat this as an error.
|
||||
///
|
||||
/// If such a parameter is not given, the client SHOULD generate such a token, valid for the
|
||||
/// duration of the execution, and SHOULD store the hex-encoded secret as a jwt.hex file on
|
||||
/// the filesystem. This file can then be used to provision the counterpart client.
|
||||
///
|
||||
/// The `default_jwt_path` provided as an argument will be used as the default location for the
|
||||
/// jwt secret in case the `auth_jwtsecret` argument is not provided.
|
||||
fn auth_jwt_secret(&self, default_jwt_path: PathBuf) -> Result<JwtSecret, JwtError>;
|
||||
|
||||
/// Returns the configured jwt secret key for the regular rpc servers, if any.
|
||||
///
|
||||
/// Note: this is not used for the auth server (engine API).
|
||||
fn rpc_secret_key(&self) -> Option<JwtSecret>;
|
||||
}
|
||||
|
||||
/// A trait that provides payload builder settings.
|
||||
///
|
||||
/// This provides all basic payload builder settings and is implemented by the
|
||||
/// [PayloadBuilderArgs](crate::args::PayloadBuilderArgs) type.
|
||||
pub trait PayloadBuilderConfig {
|
||||
/// Block extra data set by the payload builder.
|
||||
fn extradata(&self) -> Cow<'_, str>;
|
||||
|
||||
/// Returns the rlp-encoded extradata bytes.
|
||||
fn extradata_rlp_bytes(&self) -> Bytes {
|
||||
let mut extradata = BytesMut::new();
|
||||
self.extradata().as_bytes().encode(&mut extradata);
|
||||
extradata.freeze().into()
|
||||
}
|
||||
|
||||
/// The interval at which the job should build a new payload after the last.
|
||||
fn interval(&self) -> Duration;
|
||||
|
||||
/// The deadline for when the payload builder job should resolve.
|
||||
fn deadline(&self) -> Duration;
|
||||
|
||||
/// Target gas ceiling for built blocks.
|
||||
fn max_gas_limit(&self) -> u64;
|
||||
|
||||
/// Maximum number of tasks to spawn for building a payload.
|
||||
fn max_payload_tasks(&self) -> usize;
|
||||
|
||||
/// Returns whether or not to construct the pending block.
|
||||
#[cfg(feature = "optimism")]
|
||||
fn compute_pending_block(&self) -> bool;
|
||||
}
|
||||
|
||||
/// A trait that represents the configured network and can be used to apply additional configuration
|
||||
/// to the network.
|
||||
pub trait RethNetworkConfig {
|
||||
/// Adds a new additional protocol to the RLPx sub-protocol list.
|
||||
///
|
||||
/// These additional protocols are negotiated during the RLPx handshake.
|
||||
/// If both peers share the same protocol, the corresponding handler will be included alongside
|
||||
/// the `eth` protocol.
|
||||
///
|
||||
/// See also [ProtocolHandler](reth_network::protocol::ProtocolHandler)
|
||||
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol);
|
||||
|
||||
/// Returns the secret key used for authenticating sessions.
|
||||
fn secret_key(&self) -> secp256k1::SecretKey;
|
||||
|
||||
// TODO add more network config methods here
|
||||
}
|
||||
|
||||
impl<C> RethNetworkConfig for reth_network::NetworkManager<C> {
|
||||
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
|
||||
reth_network::NetworkManager::add_rlpx_sub_protocol(self, protocol);
|
||||
}
|
||||
|
||||
fn secret_key(&self) -> secp256k1::SecretKey {
|
||||
self.secret_key()
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait that provides all basic config values for the transaction pool and is implemented by the
|
||||
/// [TxPoolArgs](crate::args::TxPoolArgs) type.
|
||||
pub trait RethTransactionPoolConfig {
|
||||
/// Returns transaction pool configuration.
|
||||
fn pool_config(&self) -> PoolConfig;
|
||||
}
|
||||
360
crates/node-core/src/cli/ext.rs
Normal file
360
crates/node-core/src/cli/ext.rs
Normal file
@ -0,0 +1,360 @@
|
||||
//! Support for integrating customizations into the CLI.
|
||||
|
||||
use crate::cli::{
|
||||
components::{RethNodeComponents, RethRpcComponents, RethRpcServerHandles},
|
||||
config::{PayloadBuilderConfig, RethNetworkConfig, RethRpcConfig},
|
||||
};
|
||||
use clap::Args;
|
||||
use reth_basic_payload_builder::{
|
||||
BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, PayloadBuilder,
|
||||
};
|
||||
use reth_node_api::EngineTypes;
|
||||
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService};
|
||||
use reth_provider::CanonStateSubscriptions;
|
||||
use reth_tasks::TaskSpawner;
|
||||
use std::{fmt, marker::PhantomData};
|
||||
|
||||
/// A trait that allows for extending parts of the CLI with additional functionality.
|
||||
///
|
||||
/// This is intended as a way to allow to _extend_ the node command. For example, to register
|
||||
/// additional RPC namespaces.
|
||||
pub trait RethCliExt {
|
||||
/// Provides additional configuration for the node CLI command.
|
||||
///
|
||||
/// This supports additional CLI arguments that can be used to modify the node configuration.
|
||||
///
|
||||
/// If no additional CLI arguments are required, the [NoArgs] wrapper type can be used.
|
||||
type Node: RethNodeCommandExt;
|
||||
}
|
||||
|
||||
/// The default CLI extension.
|
||||
impl RethCliExt for () {
|
||||
type Node = DefaultRethNodeCommandConfig;
|
||||
}
|
||||
|
||||
/// A trait that allows for extending and customizing parts of the rethr node command.
|
||||
///
|
||||
/// The functions are invoked during the initialization of the node command in the following order:
|
||||
///
|
||||
/// 1. [configure_network](RethNodeCommandConfig::configure_network)
|
||||
/// 2. [on_components_initialized](RethNodeCommandConfig::on_components_initialized)
|
||||
/// 3. [spawn_payload_builder_service](RethNodeCommandConfig::spawn_payload_builder_service)
|
||||
/// 4. [extend_rpc_modules](RethNodeCommandConfig::extend_rpc_modules)
|
||||
/// 5. [on_rpc_server_started](RethNodeCommandConfig::on_rpc_server_started)
|
||||
/// 6. [on_node_started](RethNodeCommandConfig::on_node_started)
|
||||
pub trait RethNodeCommandConfig: fmt::Debug {
|
||||
/// Invoked with the network configuration before the network is configured.
|
||||
///
|
||||
/// This allows additional configuration of the network before it is launched.
|
||||
fn configure_network<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &mut Conf,
|
||||
components: &Reth,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethNetworkConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
let _ = config;
|
||||
let _ = components;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Event hook called once all components have been initialized.
|
||||
///
|
||||
/// This is called as soon as the node components have been initialized.
|
||||
fn on_components_initialized<Reth: RethNodeComponents>(
|
||||
&mut self,
|
||||
components: &Reth,
|
||||
) -> eyre::Result<()> {
|
||||
let _ = components;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Event hook called once the node has been launched.
|
||||
///
|
||||
/// This is called last after the node has been launched.
|
||||
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
|
||||
let _ = components;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Event hook called once the rpc servers has been started.
|
||||
///
|
||||
/// This is called after the rpc server has been started.
|
||||
fn on_rpc_server_started<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &Conf,
|
||||
components: &Reth,
|
||||
rpc_components: RethRpcComponents<'_, Reth>,
|
||||
handles: RethRpcServerHandles,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethRpcConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
let _ = config;
|
||||
let _ = components;
|
||||
let _ = rpc_components;
|
||||
let _ = handles;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allows for registering additional RPC modules for the transports.
|
||||
///
|
||||
/// This is expected to call the merge functions of [reth_rpc_builder::TransportRpcModules], for
|
||||
/// example [reth_rpc_builder::TransportRpcModules::merge_configured].
|
||||
///
|
||||
/// This is called before the rpc server will be started [Self::on_rpc_server_started].
|
||||
fn extend_rpc_modules<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &Conf,
|
||||
components: &Reth,
|
||||
rpc_components: RethRpcComponents<'_, Reth>,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethRpcConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
let _ = config;
|
||||
let _ = components;
|
||||
let _ = rpc_components;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Configures the [PayloadBuilderService] for the node, spawns it and returns the
|
||||
/// [PayloadBuilderHandle].
|
||||
///
|
||||
/// By default this spawns a [BasicPayloadJobGenerator] with the default configuration
|
||||
/// [BasicPayloadJobGeneratorConfig].
|
||||
fn spawn_payload_builder_service<Conf, Reth, Builder, Engine>(
|
||||
&mut self,
|
||||
conf: &Conf,
|
||||
components: &Reth,
|
||||
payload_builder: Builder,
|
||||
) -> eyre::Result<PayloadBuilderHandle<Engine>>
|
||||
where
|
||||
Conf: PayloadBuilderConfig,
|
||||
Reth: RethNodeComponents,
|
||||
Engine: EngineTypes + 'static,
|
||||
Builder: PayloadBuilder<
|
||||
Reth::Pool,
|
||||
Reth::Provider,
|
||||
Attributes = Engine::PayloadBuilderAttributes,
|
||||
BuiltPayload = Engine::BuiltPayload,
|
||||
> + Unpin
|
||||
+ 'static,
|
||||
{
|
||||
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
|
||||
.interval(conf.interval())
|
||||
.deadline(conf.deadline())
|
||||
.max_payload_tasks(conf.max_payload_tasks())
|
||||
.extradata(conf.extradata_rlp_bytes())
|
||||
.max_gas_limit(conf.max_gas_limit());
|
||||
|
||||
// no extradata for optimism
|
||||
#[cfg(feature = "optimism")]
|
||||
let payload_job_config = payload_job_config.extradata(Default::default());
|
||||
|
||||
let payload_generator = BasicPayloadJobGenerator::with_builder(
|
||||
components.provider(),
|
||||
components.pool(),
|
||||
components.task_executor(),
|
||||
payload_job_config,
|
||||
components.chain_spec(),
|
||||
payload_builder,
|
||||
);
|
||||
let (payload_service, payload_builder) = PayloadBuilderService::new(
|
||||
payload_generator,
|
||||
components.events().canonical_state_stream(),
|
||||
);
|
||||
|
||||
components
|
||||
.task_executor()
|
||||
.spawn_critical("payload builder service", Box::pin(payload_service));
|
||||
|
||||
Ok(payload_builder)
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait that allows for extending parts of the CLI with additional functionality.
|
||||
pub trait RethNodeCommandExt: RethNodeCommandConfig + fmt::Debug + clap::Args {}
|
||||
|
||||
// blanket impl for all types that implement the required traits.
|
||||
impl<T> RethNodeCommandExt for T where T: RethNodeCommandConfig + fmt::Debug + clap::Args {}
|
||||
|
||||
/// The default configuration for the reth node command.
|
||||
///
|
||||
/// This is a convenience type for [NoArgs<()>].
|
||||
#[derive(Debug, Clone, Copy, Default, Args)]
|
||||
#[non_exhaustive]
|
||||
pub struct DefaultRethNodeCommandConfig;
|
||||
|
||||
impl RethNodeCommandConfig for DefaultRethNodeCommandConfig {}
|
||||
|
||||
impl RethNodeCommandConfig for () {}
|
||||
|
||||
/// A helper type for [RethCliExt] extension that don't require any additional clap Arguments.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NoArgsCliExt<Conf>(PhantomData<Conf>);
|
||||
|
||||
impl<Conf: RethNodeCommandConfig> RethCliExt for NoArgsCliExt<Conf> {
|
||||
type Node = NoArgs<Conf>;
|
||||
}
|
||||
|
||||
/// A helper struct that allows for wrapping a [RethNodeCommandConfig] value without providing
|
||||
/// additional CLI arguments.
|
||||
///
|
||||
/// Note: This type must be manually filled with a [RethNodeCommandConfig] manually before executing
|
||||
/// the reth node command.
|
||||
#[derive(Debug, Clone, Copy, Default, Args)]
|
||||
pub struct NoArgs<T = ()> {
|
||||
#[clap(skip)]
|
||||
inner: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> NoArgs<T> {
|
||||
/// Creates a new instance of the wrapper type.
|
||||
pub fn with(inner: T) -> Self {
|
||||
Self { inner: Some(inner) }
|
||||
}
|
||||
|
||||
/// Sets the inner value.
|
||||
pub fn set(&mut self, inner: T) {
|
||||
self.inner = Some(inner)
|
||||
}
|
||||
|
||||
/// Transforms the configured value.
|
||||
pub fn map<U>(self, inner: U) -> NoArgs<U> {
|
||||
NoArgs::with(inner)
|
||||
}
|
||||
|
||||
/// Returns the inner value if it exists.
|
||||
pub fn inner(&self) -> Option<&T> {
|
||||
self.inner.as_ref()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the inner value if it exists.
|
||||
pub fn inner_mut(&mut self) -> Option<&mut T> {
|
||||
self.inner.as_mut()
|
||||
}
|
||||
|
||||
/// Consumes the wrapper and returns the inner value if it exists.
|
||||
pub fn into_inner(self) -> Option<T> {
|
||||
self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: RethNodeCommandConfig> RethNodeCommandConfig for NoArgs<T> {
|
||||
fn configure_network<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &mut Conf,
|
||||
components: &Reth,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethNetworkConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
if let Some(conf) = self.inner_mut() {
|
||||
conf.configure_network(config, components)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn on_components_initialized<Reth: RethNodeComponents>(
|
||||
&mut self,
|
||||
components: &Reth,
|
||||
) -> eyre::Result<()> {
|
||||
if let Some(conf) = self.inner_mut() {
|
||||
conf.on_components_initialized(components)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn on_node_started<Reth: RethNodeComponents>(&mut self, components: &Reth) -> eyre::Result<()> {
|
||||
if let Some(conf) = self.inner_mut() {
|
||||
conf.on_node_started(components)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn on_rpc_server_started<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &Conf,
|
||||
components: &Reth,
|
||||
rpc_components: RethRpcComponents<'_, Reth>,
|
||||
handles: RethRpcServerHandles,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethRpcConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
if let Some(conf) = self.inner_mut() {
|
||||
conf.on_rpc_server_started(config, components, rpc_components, handles)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn extend_rpc_modules<Conf, Reth>(
|
||||
&mut self,
|
||||
config: &Conf,
|
||||
components: &Reth,
|
||||
rpc_components: RethRpcComponents<'_, Reth>,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Conf: RethRpcConfig,
|
||||
Reth: RethNodeComponents,
|
||||
{
|
||||
if let Some(conf) = self.inner_mut() {
|
||||
conf.extend_rpc_modules(config, components, rpc_components)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_payload_builder_service<Conf, Reth, Builder, Engine>(
|
||||
&mut self,
|
||||
conf: &Conf,
|
||||
components: &Reth,
|
||||
payload_builder: Builder,
|
||||
) -> eyre::Result<PayloadBuilderHandle<Engine>>
|
||||
where
|
||||
Conf: PayloadBuilderConfig,
|
||||
Reth: RethNodeComponents,
|
||||
Engine: EngineTypes + 'static,
|
||||
Builder: PayloadBuilder<
|
||||
Reth::Pool,
|
||||
Reth::Provider,
|
||||
Attributes = Engine::PayloadBuilderAttributes,
|
||||
BuiltPayload = Engine::BuiltPayload,
|
||||
> + Unpin
|
||||
+ 'static,
|
||||
{
|
||||
self.inner_mut()
|
||||
.ok_or_else(|| eyre::eyre!("config value must be set"))?
|
||||
.spawn_payload_builder_service(conf, components, payload_builder)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for NoArgs<T> {
|
||||
fn from(value: T) -> Self {
|
||||
Self::with(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_ext<T: RethNodeCommandExt>() {}
|
||||
|
||||
#[test]
|
||||
fn ensure_ext() {
|
||||
assert_ext::<DefaultRethNodeCommandConfig>();
|
||||
assert_ext::<NoArgs<()>>();
|
||||
}
|
||||
}
|
||||
3
crates/node-core/src/cli/mod.rs
Normal file
3
crates/node-core/src/cli/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
pub mod components;
|
||||
pub mod config;
|
||||
pub mod ext;
|
||||
384
crates/node-core/src/dirs.rs
Normal file
384
crates/node-core/src/dirs.rs
Normal file
@ -0,0 +1,384 @@
|
||||
//! reth data directories.
|
||||
|
||||
use crate::utils::parse_path;
|
||||
use reth_primitives::Chain;
|
||||
use std::{
|
||||
env::VarError,
|
||||
fmt::{Debug, Display, Formatter},
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
/// Constructs a string to be used as a path for configuration and db paths.
|
||||
pub fn config_path_prefix(chain: Chain) -> String {
|
||||
chain.to_string()
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data directory.
|
||||
///
|
||||
/// Refer to [dirs_next::data_dir] for cross-platform behavior.
|
||||
pub fn data_dir() -> Option<PathBuf> {
|
||||
dirs_next::data_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth database.
|
||||
///
|
||||
/// Refer to [dirs_next::data_dir] for cross-platform behavior.
|
||||
pub fn database_path() -> Option<PathBuf> {
|
||||
data_dir().map(|root| root.join("db"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth configuration directory.
|
||||
///
|
||||
/// Refer to [dirs_next::config_dir] for cross-platform behavior.
|
||||
pub fn config_dir() -> Option<PathBuf> {
|
||||
dirs_next::config_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth cache directory.
|
||||
///
|
||||
/// Refer to [dirs_next::cache_dir] for cross-platform behavior.
|
||||
pub fn cache_dir() -> Option<PathBuf> {
|
||||
dirs_next::cache_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth logs directory.
|
||||
///
|
||||
/// Refer to [dirs_next::cache_dir] for cross-platform behavior.
|
||||
pub fn logs_dir() -> Option<PathBuf> {
|
||||
cache_dir().map(|root| root.join("logs"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data dir.
|
||||
///
|
||||
/// The data dir should contain a subdirectory for each chain, and those chain directories will
|
||||
/// include all information for that chain, such as the p2p secret.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct DataDirPath;
|
||||
|
||||
impl XdgPath for DataDirPath {
|
||||
fn resolve() -> Option<PathBuf> {
|
||||
data_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the path to the reth logs directory.
|
||||
///
|
||||
/// Refer to [dirs_next::cache_dir] for cross-platform behavior.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct LogsDir;
|
||||
|
||||
impl XdgPath for LogsDir {
|
||||
fn resolve() -> Option<PathBuf> {
|
||||
logs_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// A small helper trait for unit structs that represent a standard path following the XDG
|
||||
/// path specification.
|
||||
pub trait XdgPath {
|
||||
/// Resolve the standard path.
|
||||
fn resolve() -> Option<PathBuf>;
|
||||
}
|
||||
|
||||
/// A wrapper type that either parses a user-given path or defaults to an
|
||||
/// OS-specific path.
|
||||
///
|
||||
/// The [FromStr] implementation supports shell expansions and common patterns such as `~` for the
|
||||
/// home directory.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_node_core::dirs::{DataDirPath, PlatformPath};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// // Resolves to the platform-specific database path
|
||||
/// let default: PlatformPath<DataDirPath> = PlatformPath::default();
|
||||
/// // Resolves to `$(pwd)/my/path/to/datadir`
|
||||
/// let custom: PlatformPath<DataDirPath> = PlatformPath::from_str("my/path/to/datadir").unwrap();
|
||||
///
|
||||
/// assert_ne!(default.as_ref(), custom.as_ref());
|
||||
/// ```
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct PlatformPath<D>(PathBuf, std::marker::PhantomData<D>);
|
||||
|
||||
impl<D> Display for PlatformPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0.display())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Clone for PlatformPath<D> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone(), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: XdgPath> Default for PlatformPath<D> {
|
||||
fn default() -> Self {
|
||||
Self(
|
||||
D::resolve().expect("Could not resolve default path. Set one manually."),
|
||||
std::marker::PhantomData,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> FromStr for PlatformPath<D> {
|
||||
type Err = shellexpand::LookupError<VarError>;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self(parse_path(s)?, std::marker::PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> AsRef<Path> for PlatformPath<D> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_path()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<PlatformPath<D>> for PathBuf {
|
||||
fn from(value: PlatformPath<D>) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> PlatformPath<D> {
|
||||
/// Returns the path joined with another path
|
||||
pub fn join<P: AsRef<Path>>(&self, path: P) -> PlatformPath<D> {
|
||||
PlatformPath::<D>(self.0.join(path), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> PlatformPath<D> {
|
||||
/// Converts the path to a `ChainPath` with the given `Chain`.
|
||||
pub fn with_chain(&self, chain: Chain) -> ChainPath<D> {
|
||||
// extract chain name
|
||||
let chain_name = config_path_prefix(chain);
|
||||
|
||||
let path = self.0.join(chain_name);
|
||||
|
||||
let platform_path = PlatformPath::<D>(path, std::marker::PhantomData);
|
||||
ChainPath::new(platform_path, chain)
|
||||
}
|
||||
|
||||
/// Map the inner path to a new type `T`.
|
||||
pub fn map_to<T>(&self) -> PlatformPath<T> {
|
||||
PlatformPath(self.0.clone(), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
/// An Optional wrapper type around [PlatformPath].
|
||||
///
|
||||
/// This is useful for when a path is optional, such as the `--data-dir` flag.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct MaybePlatformPath<D>(Option<PlatformPath<D>>);
|
||||
|
||||
// === impl MaybePlatformPath ===
|
||||
|
||||
impl<D: XdgPath> MaybePlatformPath<D> {
|
||||
/// Returns the path if it is set, otherwise returns the default path for the given chain.
|
||||
pub fn unwrap_or_chain_default(&self, chain: Chain) -> ChainPath<D> {
|
||||
ChainPath(
|
||||
self.0.clone().unwrap_or_else(|| PlatformPath::default().with_chain(chain).0),
|
||||
chain,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the default platform path for the specified [Chain].
|
||||
pub fn chain_default(chain: Chain) -> ChainPath<D> {
|
||||
PlatformPath::default().with_chain(chain)
|
||||
}
|
||||
|
||||
/// Returns true if a custom path is set
|
||||
pub fn is_some(&self) -> bool {
|
||||
self.0.is_some()
|
||||
}
|
||||
|
||||
/// Returns the path if it is set, otherwise returns `None`.
|
||||
pub fn as_ref(&self) -> Option<&Path> {
|
||||
self.0.as_ref().map(|p| p.as_ref())
|
||||
}
|
||||
|
||||
/// Returns the path if it is set, otherwise returns the default path, without any chain
|
||||
/// directory.
|
||||
pub fn unwrap_or_default(&self) -> PlatformPath<D> {
|
||||
self.0.clone().unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: XdgPath> Display for MaybePlatformPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(path) = &self.0 {
|
||||
path.fmt(f)
|
||||
} else {
|
||||
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
|
||||
// computes the default value via `Default -> Display -> FromStr`
|
||||
write!(f, "default")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Default for MaybePlatformPath<D> {
|
||||
fn default() -> Self {
|
||||
Self(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> FromStr for MaybePlatformPath<D> {
|
||||
type Err = shellexpand::LookupError<VarError>;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let p = match s {
|
||||
"default" => {
|
||||
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
|
||||
// computes the default value via `Default -> Display -> FromStr`
|
||||
None
|
||||
}
|
||||
_ => Some(PlatformPath::from_str(s)?),
|
||||
};
|
||||
Ok(Self(p))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<PathBuf> for MaybePlatformPath<D> {
|
||||
fn from(path: PathBuf) -> Self {
|
||||
Self(Some(PlatformPath(path, std::marker::PhantomData)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper type around PlatformPath that includes a `Chain`, used for separating reth data for
|
||||
/// different networks.
|
||||
///
|
||||
/// If the chain is either mainnet, goerli, or sepolia, then the path will be:
|
||||
/// * mainnet: `<DIR>/mainnet`
|
||||
/// * goerli: `<DIR>/goerli`
|
||||
/// * sepolia: `<DIR>/sepolia`
|
||||
/// Otherwise, the path will be dependent on the chain ID:
|
||||
/// * `<DIR>/<CHAIN_ID>`
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ChainPath<D>(PlatformPath<D>, Chain);
|
||||
|
||||
impl<D> ChainPath<D> {
|
||||
/// Returns a new `ChainPath` given a `PlatformPath` and a `Chain`.
|
||||
pub fn new(path: PlatformPath<D>, chain: Chain) -> Self {
|
||||
Self(path, chain)
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data directory for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>`
|
||||
pub fn data_dir_path(&self) -> PathBuf {
|
||||
self.0.as_ref().into()
|
||||
}
|
||||
|
||||
/// Returns the path to the db directory for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/db`
|
||||
pub fn db_path(&self) -> PathBuf {
|
||||
self.0.join("db").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the snapshots directory for this chain.
|
||||
pub fn snapshots_path(&self) -> PathBuf {
|
||||
self.0.join("snapshots").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the reth p2p secret key for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/discovery-secret`
|
||||
pub fn p2p_secret_path(&self) -> PathBuf {
|
||||
self.0.join("discovery-secret").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the known peers file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/known-peers.json`
|
||||
pub fn known_peers_path(&self) -> PathBuf {
|
||||
self.0.join("known-peers.json").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
|
||||
/// transactions are stored.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/blobstore`
|
||||
pub fn blobstore_path(&self) -> PathBuf {
|
||||
self.0.join("blobstore").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the local transactions backup file
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
|
||||
pub fn txpool_transactions_path(&self) -> PathBuf {
|
||||
self.0.join("txpool-transactions-backup.rlp").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the config file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/reth.toml`
|
||||
pub fn config_path(&self) -> PathBuf {
|
||||
self.0.join("reth.toml").into()
|
||||
}
|
||||
|
||||
/// Returns the path to the jwtsecret file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`
|
||||
pub fn jwt_path(&self) -> PathBuf {
|
||||
self.0.join("jwt.hex").into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> AsRef<Path> for ChainPath<D> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Display for ChainPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<ChainPath<D>> for PathBuf {
|
||||
fn from(value: ChainPath<D>) -> Self {
|
||||
value.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_maybe_data_dir_path() {
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::mainnet());
|
||||
assert!(path.as_ref().ends_with("reth/mainnet"), "{:?}", path);
|
||||
|
||||
let db_path = path.db_path();
|
||||
assert!(db_path.ends_with("reth/mainnet/db"), "{:?}", db_path);
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();
|
||||
let path = path.unwrap_or_chain_default(Chain::mainnet());
|
||||
assert!(path.as_ref().ends_with("my/path/to/datadir"), "{:?}", path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maybe_testnet_datadir_path() {
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::goerli());
|
||||
assert!(path.as_ref().ends_with("reth/goerli"), "{:?}", path);
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::holesky());
|
||||
assert!(path.as_ref().ends_with("reth/holesky"), "{:?}", path);
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::sepolia());
|
||||
assert!(path.as_ref().ends_with("reth/sepolia"), "{:?}", path);
|
||||
}
|
||||
}
|
||||
348
crates/node-core/src/init.rs
Normal file
348
crates/node-core/src/init.rs
Normal file
@ -0,0 +1,348 @@
|
||||
//! Reth genesis initialization utility functions.
|
||||
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO,
|
||||
database::Database,
|
||||
tables,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
};
|
||||
use reth_interfaces::{db::DatabaseError, provider::ProviderResult};
|
||||
use reth_primitives::{
|
||||
stage::StageId, Account, Bytecode, ChainSpec, Receipts, StorageEntry, B256, U256,
|
||||
};
|
||||
use reth_provider::{
|
||||
bundle_state::{BundleStateInit, RevertsInit},
|
||||
BundleStateWithReceipts, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown,
|
||||
ProviderError, ProviderFactory,
|
||||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::debug;
|
||||
|
||||
/// Database initialization error type.
|
||||
#[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)]
|
||||
pub enum InitDatabaseError {
|
||||
/// An existing genesis block was found in the database, and its hash did not match the hash of
|
||||
/// the chainspec.
|
||||
#[error("genesis hash in the database does not match the specified chainspec: chainspec is {chainspec_hash}, database is {database_hash}")]
|
||||
GenesisHashMismatch {
|
||||
/// Expected genesis hash.
|
||||
chainspec_hash: B256,
|
||||
/// Actual genesis hash.
|
||||
database_hash: B256,
|
||||
},
|
||||
|
||||
/// Provider error.
|
||||
#[error(transparent)]
|
||||
Provider(#[from] ProviderError),
|
||||
}
|
||||
|
||||
impl From<DatabaseError> for InitDatabaseError {
|
||||
fn from(error: DatabaseError) -> Self {
|
||||
Self::Provider(ProviderError::Database(error))
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the genesis block if it has not already been written
|
||||
pub fn init_genesis<DB: Database>(
|
||||
db: Arc<DB>,
|
||||
chain: Arc<ChainSpec>,
|
||||
) -> Result<B256, InitDatabaseError> {
|
||||
let genesis = chain.genesis();
|
||||
|
||||
let hash = chain.genesis_hash();
|
||||
|
||||
let tx = db.tx()?;
|
||||
if let Some((_, db_hash)) = tx.cursor_read::<tables::CanonicalHeaders>()?.first()? {
|
||||
if db_hash == hash {
|
||||
debug!("Genesis already written, skipping.");
|
||||
return Ok(hash)
|
||||
}
|
||||
|
||||
return Err(InitDatabaseError::GenesisHashMismatch {
|
||||
chainspec_hash: hash,
|
||||
database_hash: db_hash,
|
||||
})
|
||||
}
|
||||
|
||||
drop(tx);
|
||||
debug!("Writing genesis block.");
|
||||
|
||||
// use transaction to insert genesis header
|
||||
let factory = ProviderFactory::new(&db, chain.clone());
|
||||
let provider_rw = factory.provider_rw()?;
|
||||
insert_genesis_hashes(&provider_rw, genesis)?;
|
||||
insert_genesis_history(&provider_rw, genesis)?;
|
||||
provider_rw.commit()?;
|
||||
|
||||
// Insert header
|
||||
let tx = db.tx_mut()?;
|
||||
insert_genesis_header::<DB>(&tx, chain.clone())?;
|
||||
|
||||
insert_genesis_state::<DB>(&tx, genesis)?;
|
||||
|
||||
// insert sync stage
|
||||
for stage in StageId::ALL.iter() {
|
||||
tx.put::<tables::SyncStage>(stage.to_string(), Default::default())?;
|
||||
}
|
||||
|
||||
tx.commit()?;
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
/// Inserts the genesis state into the database.
|
||||
pub fn insert_genesis_state<DB: Database>(
|
||||
tx: &<DB as Database>::TXMut,
|
||||
genesis: &reth_primitives::Genesis,
|
||||
) -> ProviderResult<()> {
|
||||
let mut state_init: BundleStateInit = HashMap::new();
|
||||
let mut reverts_init = HashMap::new();
|
||||
let mut contracts: HashMap<B256, Bytecode> = HashMap::new();
|
||||
|
||||
for (address, account) in &genesis.alloc {
|
||||
let bytecode_hash = if let Some(code) = &account.code {
|
||||
let bytecode = Bytecode::new_raw(code.clone());
|
||||
let hash = bytecode.hash_slow();
|
||||
contracts.insert(hash, bytecode);
|
||||
Some(hash)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get state
|
||||
let storage = account
|
||||
.storage
|
||||
.as_ref()
|
||||
.map(|m| {
|
||||
m.iter()
|
||||
.map(|(key, value)| {
|
||||
let value = U256::from_be_bytes(value.0);
|
||||
(*key, (U256::ZERO, value))
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
reverts_init.insert(
|
||||
*address,
|
||||
(Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
|
||||
);
|
||||
|
||||
state_init.insert(
|
||||
*address,
|
||||
(
|
||||
None,
|
||||
Some(Account {
|
||||
nonce: account.nonce.unwrap_or_default(),
|
||||
balance: account.balance,
|
||||
bytecode_hash,
|
||||
}),
|
||||
storage,
|
||||
),
|
||||
);
|
||||
}
|
||||
let all_reverts_init: RevertsInit = HashMap::from([(0, reverts_init)]);
|
||||
|
||||
let bundle = BundleStateWithReceipts::new_init(
|
||||
state_init,
|
||||
all_reverts_init,
|
||||
contracts.into_iter().collect(),
|
||||
Receipts::new(),
|
||||
0,
|
||||
);
|
||||
|
||||
bundle.write_to_db(tx, OriginalValuesKnown::Yes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inserts hashes for the genesis state.
|
||||
pub fn insert_genesis_hashes<DB: Database>(
|
||||
provider: &DatabaseProviderRW<&DB>,
|
||||
genesis: &reth_primitives::Genesis,
|
||||
) -> ProviderResult<()> {
|
||||
// insert and hash accounts to hashing table
|
||||
let alloc_accounts = genesis
|
||||
.alloc
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(addr, account)| (addr, Some(Account::from_genesis_account(account))));
|
||||
provider.insert_account_for_hashing(alloc_accounts)?;
|
||||
|
||||
let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| {
|
||||
// only return Some if there is storage
|
||||
account.storage.map(|storage| {
|
||||
(
|
||||
addr,
|
||||
storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }),
|
||||
)
|
||||
})
|
||||
});
|
||||
provider.insert_storage_for_hashing(alloc_storage)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inserts history indices for genesis accounts and storage.
|
||||
pub fn insert_genesis_history<DB: Database>(
|
||||
provider: &DatabaseProviderRW<&DB>,
|
||||
genesis: &reth_primitives::Genesis,
|
||||
) -> ProviderResult<()> {
|
||||
let account_transitions =
|
||||
genesis.alloc.keys().map(|addr| (*addr, vec![0])).collect::<BTreeMap<_, _>>();
|
||||
provider.insert_account_history_index(account_transitions)?;
|
||||
|
||||
let storage_transitions = genesis
|
||||
.alloc
|
||||
.iter()
|
||||
.filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
|
||||
.flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), vec![0])))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
provider.insert_storage_history_index(storage_transitions)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inserts header for the genesis state.
|
||||
pub fn insert_genesis_header<DB: Database>(
|
||||
tx: &<DB as Database>::TXMut,
|
||||
chain: Arc<ChainSpec>,
|
||||
) -> ProviderResult<()> {
|
||||
let header = chain.sealed_genesis_header();
|
||||
|
||||
tx.put::<tables::CanonicalHeaders>(0, header.hash)?;
|
||||
tx.put::<tables::HeaderNumbers>(header.hash, 0)?;
|
||||
tx.put::<tables::BlockBodyIndices>(0, Default::default())?;
|
||||
tx.put::<tables::HeaderTD>(0, header.difficulty.into())?;
|
||||
tx.put::<tables::Headers>(0, header.header)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use reth_db::{
|
||||
models::{storage_sharded_key::StorageShardedKey, ShardedKey},
|
||||
table::{Table, TableRow},
|
||||
test_utils::create_test_rw_db,
|
||||
DatabaseEnv,
|
||||
};
|
||||
use reth_primitives::{
|
||||
Address, Chain, ForkTimestamps, Genesis, GenesisAccount, IntegerList, GOERLI,
|
||||
GOERLI_GENESIS_HASH, MAINNET, MAINNET_GENESIS_HASH, SEPOLIA, SEPOLIA_GENESIS_HASH,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn collect_table_entries<DB, T>(
|
||||
tx: &<DB as Database>::TX,
|
||||
) -> Result<Vec<TableRow<T>>, InitDatabaseError>
|
||||
where
|
||||
DB: Database,
|
||||
T: Table,
|
||||
{
|
||||
Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn success_init_genesis_mainnet() {
|
||||
let db = create_test_rw_db();
|
||||
let genesis_hash = init_genesis(db, MAINNET.clone()).unwrap();
|
||||
|
||||
// actual, expected
|
||||
assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn success_init_genesis_goerli() {
|
||||
let db = create_test_rw_db();
|
||||
let genesis_hash = init_genesis(db, GOERLI.clone()).unwrap();
|
||||
|
||||
// actual, expected
|
||||
assert_eq!(genesis_hash, GOERLI_GENESIS_HASH);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn success_init_genesis_sepolia() {
|
||||
let db = create_test_rw_db();
|
||||
let genesis_hash = init_genesis(db, SEPOLIA.clone()).unwrap();
|
||||
|
||||
// actual, expected
|
||||
assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fail_init_inconsistent_db() {
|
||||
let db = create_test_rw_db();
|
||||
init_genesis(db.clone(), SEPOLIA.clone()).unwrap();
|
||||
|
||||
// Try to init db with a different genesis block
|
||||
let genesis_hash = init_genesis(db, MAINNET.clone());
|
||||
|
||||
assert_eq!(
|
||||
genesis_hash.unwrap_err(),
|
||||
InitDatabaseError::GenesisHashMismatch {
|
||||
chainspec_hash: MAINNET_GENESIS_HASH,
|
||||
database_hash: SEPOLIA_GENESIS_HASH
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_genesis_history() {
|
||||
let address_with_balance = Address::with_last_byte(1);
|
||||
let address_with_storage = Address::with_last_byte(2);
|
||||
let storage_key = B256::with_last_byte(1);
|
||||
let chain_spec = Arc::new(ChainSpec {
|
||||
chain: Chain::from_id(1),
|
||||
genesis: Genesis {
|
||||
alloc: HashMap::from([
|
||||
(
|
||||
address_with_balance,
|
||||
GenesisAccount { balance: U256::from(1), ..Default::default() },
|
||||
),
|
||||
(
|
||||
address_with_storage,
|
||||
GenesisAccount {
|
||||
storage: Some(HashMap::from([(storage_key, B256::random())])),
|
||||
..Default::default()
|
||||
},
|
||||
),
|
||||
]),
|
||||
..Default::default()
|
||||
},
|
||||
hardforks: BTreeMap::default(),
|
||||
fork_timestamps: ForkTimestamps::default(),
|
||||
genesis_hash: None,
|
||||
paris_block_and_final_difficulty: None,
|
||||
deposit_contract: None,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let db = create_test_rw_db();
|
||||
init_genesis(db.clone(), chain_spec).unwrap();
|
||||
|
||||
let tx = db.tx().expect("failed to init tx");
|
||||
|
||||
assert_eq!(
|
||||
collect_table_entries::<Arc<DatabaseEnv>, tables::AccountHistory>(&tx)
|
||||
.expect("failed to collect"),
|
||||
vec![
|
||||
(ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
|
||||
(ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
|
||||
],
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
collect_table_entries::<Arc<DatabaseEnv>, tables::StorageHistory>(&tx)
|
||||
.expect("failed to collect"),
|
||||
vec![(
|
||||
StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
|
||||
IntegerList::new([0]).unwrap()
|
||||
)],
|
||||
);
|
||||
}
|
||||
}
|
||||
50
crates/node-core/src/lib.rs
Normal file
50
crates/node-core/src/lib.rs
Normal file
@ -0,0 +1,50 @@
|
||||
//! The core of the Ethereum node. Collection of utilities and libraries that are used by the node.
|
||||
|
||||
#![allow(missing_docs)]
|
||||
#![allow(missing_debug_implementations)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod args;
|
||||
pub mod cli;
|
||||
pub mod dirs;
|
||||
pub mod init;
|
||||
pub mod utils;
|
||||
pub mod version;
|
||||
|
||||
/// Re-exported from `reth_primitives`.
|
||||
pub mod primitives {
|
||||
pub use reth_primitives::*;
|
||||
}
|
||||
|
||||
/// Re-export of `reth_rpc_*` crates.
|
||||
pub mod rpc {
|
||||
|
||||
/// Re-exported from `reth_rpc_builder`.
|
||||
pub mod builder {
|
||||
pub use reth_rpc_builder::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc_types`.
|
||||
pub mod types {
|
||||
pub use reth_rpc_types::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc_api`.
|
||||
pub mod api {
|
||||
pub use reth_rpc_api::*;
|
||||
}
|
||||
/// Re-exported from `reth_rpc::eth`.
|
||||
pub mod eth {
|
||||
pub use reth_rpc::eth::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc::rpc`.
|
||||
pub mod result {
|
||||
pub use reth_rpc::result::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc::eth`.
|
||||
pub mod compat {
|
||||
pub use reth_rpc_types_compat::*;
|
||||
}
|
||||
}
|
||||
293
crates/node-core/src/utils.rs
Normal file
293
crates/node-core/src/utils.rs
Normal file
@ -0,0 +1,293 @@
|
||||
//! Common CLI utility functions.
|
||||
|
||||
use boyer_moore_magiclen::BMByte;
|
||||
use eyre::Result;
|
||||
use reth_consensus_common::validation::validate_block_standalone;
|
||||
use reth_db::{
|
||||
cursor::{DbCursorRO, DbDupCursorRO},
|
||||
database::Database,
|
||||
table::{Decode, Decompress, DupSort, Table, TableRow},
|
||||
transaction::{DbTx, DbTxMut},
|
||||
DatabaseError, RawTable, TableRawRow,
|
||||
};
|
||||
use reth_interfaces::p2p::{
|
||||
bodies::client::BodiesClient,
|
||||
headers::client::{HeadersClient, HeadersRequest},
|
||||
priority::Priority,
|
||||
};
|
||||
use reth_network::NetworkManager;
|
||||
use reth_primitives::{
|
||||
fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader,
|
||||
};
|
||||
use reth_provider::BlockReader;
|
||||
use reth_rpc::{JwtError, JwtSecret};
|
||||
use std::{
|
||||
env::VarError,
|
||||
path::{Path, PathBuf},
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
/// Exposing `open_db_read_only` function
|
||||
pub mod db {
|
||||
pub use reth_db::open_db_read_only;
|
||||
}
|
||||
|
||||
/// Get a single header from network
|
||||
pub async fn get_single_header<Client>(
|
||||
client: Client,
|
||||
id: BlockHashOrNumber,
|
||||
) -> Result<SealedHeader>
|
||||
where
|
||||
Client: HeadersClient,
|
||||
{
|
||||
let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id };
|
||||
|
||||
let (peer_id, response) =
|
||||
client.get_headers_with_priority(request, Priority::High).await?.split();
|
||||
|
||||
if response.len() != 1 {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len())
|
||||
}
|
||||
|
||||
let header = response.into_iter().next().unwrap().seal_slow();
|
||||
|
||||
let valid = match id {
|
||||
BlockHashOrNumber::Hash(hash) => header.hash() == hash,
|
||||
BlockHashOrNumber::Number(number) => header.number == number,
|
||||
};
|
||||
|
||||
if !valid {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!(
|
||||
"Received invalid header. Received: {:?}. Expected: {:?}",
|
||||
header.num_hash(),
|
||||
id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// Get a body from network based on header
|
||||
pub async fn get_single_body<Client>(
|
||||
client: Client,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
header: SealedHeader,
|
||||
) -> Result<SealedBlock>
|
||||
where
|
||||
Client: BodiesClient,
|
||||
{
|
||||
let (peer_id, response) = client.get_block_body(header.hash).await?.split();
|
||||
|
||||
if response.is_none() {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0")
|
||||
}
|
||||
|
||||
let block = response.unwrap();
|
||||
let block = SealedBlock {
|
||||
header,
|
||||
body: block.transactions,
|
||||
ommers: block.ommers,
|
||||
withdrawals: block.withdrawals,
|
||||
};
|
||||
|
||||
validate_block_standalone(&block, &chain_spec)?;
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// Wrapper over DB that implements many useful DB queries.
|
||||
pub struct DbTool<'a, DB: Database> {
|
||||
pub(crate) db: &'a DB,
|
||||
pub(crate) chain: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
impl<'a, DB: Database> DbTool<'a, DB> {
|
||||
/// Takes a DB where the tables have already been created.
|
||||
pub(crate) fn new(db: &'a DB, chain: Arc<ChainSpec>) -> eyre::Result<Self> {
|
||||
Ok(Self { db, chain })
|
||||
}
|
||||
|
||||
/// Grabs the contents of the table within a certain index range and places the
|
||||
/// entries into a [`HashMap`][std::collections::HashMap].
|
||||
///
|
||||
/// [`ListFilter`] can be used to further
|
||||
/// filter down the desired results. (eg. List only rows which include `0xd3adbeef`)
|
||||
pub fn list<T: Table>(&self, filter: &ListFilter) -> Result<(Vec<TableRow<T>>, usize)> {
|
||||
let bmb = Rc::new(BMByte::from(&filter.search));
|
||||
if bmb.is_none() && filter.has_search() {
|
||||
eyre::bail!("Invalid search.")
|
||||
}
|
||||
|
||||
let mut hits = 0;
|
||||
|
||||
let data = self.db.view(|tx| {
|
||||
let mut cursor =
|
||||
tx.cursor_read::<RawTable<T>>().expect("Was not able to obtain a cursor.");
|
||||
|
||||
let map_filter = |row: Result<TableRawRow<T>, _>| {
|
||||
if let Ok((k, v)) = row {
|
||||
let (key, value) = (k.into_key(), v.into_value());
|
||||
|
||||
if key.len() + value.len() < filter.min_row_size {
|
||||
return None
|
||||
}
|
||||
if key.len() < filter.min_key_size {
|
||||
return None
|
||||
}
|
||||
if value.len() < filter.min_value_size {
|
||||
return None
|
||||
}
|
||||
|
||||
let result = || {
|
||||
if filter.only_count {
|
||||
return None
|
||||
}
|
||||
Some((
|
||||
<T as Table>::Key::decode(&key).unwrap(),
|
||||
<T as Table>::Value::decompress(&value).unwrap(),
|
||||
))
|
||||
};
|
||||
|
||||
match &*bmb {
|
||||
Some(searcher) => {
|
||||
if searcher.find_first_in(&value).is_some() ||
|
||||
searcher.find_first_in(&key).is_some()
|
||||
{
|
||||
hits += 1;
|
||||
return result()
|
||||
}
|
||||
}
|
||||
None => {
|
||||
hits += 1;
|
||||
return result()
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
if filter.reverse {
|
||||
Ok(cursor
|
||||
.walk_back(None)?
|
||||
.skip(filter.skip)
|
||||
.filter_map(map_filter)
|
||||
.take(filter.len)
|
||||
.collect::<Vec<(_, _)>>())
|
||||
} else {
|
||||
Ok(cursor
|
||||
.walk(None)?
|
||||
.skip(filter.skip)
|
||||
.filter_map(map_filter)
|
||||
.take(filter.len)
|
||||
.collect::<Vec<(_, _)>>())
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok((data.map_err(|e: DatabaseError| eyre::eyre!(e))?, hits))
|
||||
}
|
||||
|
||||
/// Grabs the content of the table for the given key
|
||||
pub fn get<T: Table>(&self, key: T::Key) -> Result<Option<T::Value>> {
|
||||
self.db.view(|tx| tx.get::<T>(key))?.map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
/// Grabs the content of the DupSort table for the given key and subkey
|
||||
pub fn get_dup<T: DupSort>(&self, key: T::Key, subkey: T::SubKey) -> Result<Option<T::Value>> {
|
||||
self.db
|
||||
.view(|tx| tx.cursor_dup_read::<T>()?.seek_by_key_subkey(key, subkey))?
|
||||
.map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
/// Drops the database at the given path.
|
||||
pub fn drop(&mut self, path: impl AsRef<Path>) -> Result<()> {
|
||||
let path = path.as_ref();
|
||||
info!(target: "reth::cli", "Dropping database at {:?}", path);
|
||||
fs::remove_dir_all(path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Drops the provided table from the database.
|
||||
pub fn drop_table<T: Table>(&mut self) -> Result<()> {
|
||||
self.db.update(|tx| tx.clear::<T>())??;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a user-specified path with support for environment variables and common shorthands (e.g.
|
||||
/// ~ for the user's home directory).
|
||||
pub fn parse_path(value: &str) -> Result<PathBuf, shellexpand::LookupError<VarError>> {
|
||||
shellexpand::full(value).map(|path| PathBuf::from(path.into_owned()))
|
||||
}
|
||||
|
||||
/// Filters the results coming from the database.
|
||||
#[derive(Debug)]
|
||||
pub struct ListFilter {
|
||||
/// Skip first N entries.
|
||||
pub skip: usize,
|
||||
/// Take N entries.
|
||||
pub len: usize,
|
||||
/// Sequence of bytes that will be searched on values and keys from the database.
|
||||
pub search: Vec<u8>,
|
||||
/// Minimum row size.
|
||||
pub min_row_size: usize,
|
||||
/// Minimum key size.
|
||||
pub min_key_size: usize,
|
||||
/// Minimum value size.
|
||||
pub min_value_size: usize,
|
||||
/// Reverse order of entries.
|
||||
pub reverse: bool,
|
||||
/// Only counts the number of filtered entries without decoding and returning them.
|
||||
pub only_count: bool,
|
||||
}
|
||||
|
||||
impl ListFilter {
|
||||
/// If `search` has a list of bytes, then filter for rows that have this sequence.
|
||||
pub fn has_search(&self) -> bool {
|
||||
!self.search.is_empty()
|
||||
}
|
||||
|
||||
/// Updates the page with new `skip` and `len` values.
|
||||
pub fn update_page(&mut self, skip: usize, len: usize) {
|
||||
self.skip = skip;
|
||||
self.len = len;
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to retrieve or create a JWT secret from the specified path.
|
||||
pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result<JwtSecret, JwtError> {
|
||||
if path.exists() {
|
||||
debug!(target: "reth::cli", ?path, "Reading JWT auth secret file");
|
||||
JwtSecret::from_file(path)
|
||||
} else {
|
||||
info!(target: "reth::cli", ?path, "Creating JWT auth secret file");
|
||||
JwtSecret::try_create(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Collect the peers from the [NetworkManager] and write them to the given `persistent_peers_file`,
|
||||
/// if configured.
|
||||
pub fn write_peers_to_file<C>(network: &NetworkManager<C>, persistent_peers_file: Option<PathBuf>)
|
||||
where
|
||||
C: BlockReader + Unpin,
|
||||
{
|
||||
if let Some(file_path) = persistent_peers_file {
|
||||
let known_peers = network.all_peers().collect::<Vec<_>>();
|
||||
if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) {
|
||||
trace!(target: "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers");
|
||||
let parent_dir = file_path.parent().map(fs::create_dir_all).transpose();
|
||||
match parent_dir.and_then(|_| fs::write(&file_path, known_peers)) {
|
||||
Ok(_) => {
|
||||
info!(target: "reth::cli", peers_file=?file_path, "Wrote network peers to file");
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "reth::cli", ?err, peers_file=?file_path, "Failed to write network peers to file");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
104
crates/node-core/src/version.rs
Normal file
104
crates/node-core/src/version.rs
Normal file
@ -0,0 +1,104 @@
|
||||
//! Version information for reth.
|
||||
|
||||
/// The short version information for reth.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The short SHA of the latest commit.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// 0.1.0 (defa64b2)
|
||||
/// ```
|
||||
pub(crate) const SHORT_VERSION: &str =
|
||||
concat!(env!("CARGO_PKG_VERSION"), " (", env!("VERGEN_GIT_SHA"), ")");
|
||||
|
||||
/// The long version information for reth.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The long SHA of the latest commit.
|
||||
/// - The build datetime
|
||||
/// - The build features
|
||||
///
|
||||
/// # Example:
|
||||
///
|
||||
/// ```text
|
||||
/// Version: 0.1.0
|
||||
/// Commit SHA: defa64b2
|
||||
/// Build Timestamp: 2023-05-19T01:47:19.815651705Z
|
||||
/// Build Features: jemalloc
|
||||
/// ```
|
||||
pub(crate) const LONG_VERSION: &str = const_str::concat!(
|
||||
"Version: ",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
"\n",
|
||||
"Commit SHA: ",
|
||||
env!("VERGEN_GIT_SHA"),
|
||||
"\n",
|
||||
"Build Timestamp: ",
|
||||
env!("VERGEN_BUILD_TIMESTAMP"),
|
||||
"\n",
|
||||
"Build Features: ",
|
||||
env!("VERGEN_CARGO_FEATURES"),
|
||||
"\n",
|
||||
"Build Profile: ",
|
||||
build_profile_name()
|
||||
);
|
||||
|
||||
/// The version information for reth formatted for P2P (devp2p).
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The target triple
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// reth/v{major}.{minor}.{patch}-{sha1}/{target}
|
||||
/// ```
|
||||
/// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin`
|
||||
pub(crate) const P2P_CLIENT_VERSION: &str = concat!(
|
||||
"reth/v",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
"-",
|
||||
env!("VERGEN_GIT_SHA"),
|
||||
"/",
|
||||
env!("VERGEN_CARGO_TARGET_TRIPLE")
|
||||
);
|
||||
|
||||
/// The default extradata used for payload building.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The OS identifier
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// reth/v{major}.{minor}.{patch}/{OS}
|
||||
/// ```
|
||||
pub fn default_extradata() -> String {
|
||||
format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS)
|
||||
}
|
||||
|
||||
const fn build_profile_name() -> &'static str {
|
||||
// Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime
|
||||
// We split on the path separator of the *host* machine, which may be different from
|
||||
// `std::path::MAIN_SEPARATOR_STR`.
|
||||
const OUT_DIR: &str = env!("OUT_DIR");
|
||||
const SEP: char = if const_str::contains!(OUT_DIR, "/") { '/' } else { '\\' };
|
||||
let parts = const_str::split!(OUT_DIR, SEP);
|
||||
parts[parts.len() - 4]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn assert_extradata_less_32bytes() {
|
||||
let extradata = default_extradata();
|
||||
assert!(
|
||||
extradata.as_bytes().len() <= 32,
|
||||
"extradata must be less than 32 bytes: {extradata}"
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -14,5 +14,4 @@ serde_json.workspace = true
|
||||
tracing.workspace = true
|
||||
futures-util.workspace = true
|
||||
tokio = { workspace = true, features = ["time"] }
|
||||
|
||||
mev-share-sse = { version = "0.1.6" , default-features = false }
|
||||
Reference in New Issue
Block a user