mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore: extract db commands (#9217)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
@ -21,7 +21,7 @@ reth-fs-util.workspace = true
|
||||
reth-db = { workspace = true, features = ["mdbx"] }
|
||||
reth-db-api.workspace = true
|
||||
reth-exex.workspace = true
|
||||
reth-provider = { workspace = true }
|
||||
reth-provider.workspace = true
|
||||
reth-evm.workspace = true
|
||||
reth-revm.workspace = true
|
||||
reth-stages.workspace = true
|
||||
@ -30,6 +30,7 @@ reth-errors.workspace = true
|
||||
reth-transaction-pool.workspace = true
|
||||
reth-beacon-consensus.workspace = true
|
||||
reth-cli-runner.workspace = true
|
||||
reth-cli-commands.workspace = true
|
||||
reth-consensus-common.workspace = true
|
||||
reth-blockchain-tree.workspace = true
|
||||
reth-rpc-builder.workspace = true
|
||||
@ -41,34 +42,29 @@ reth-rpc-eth-types.workspace = true
|
||||
reth-rpc-server-types.workspace = true
|
||||
reth-network = { workspace = true, features = ["serde"] }
|
||||
reth-network-p2p.workspace = true
|
||||
reth-net-banlist.workspace = true
|
||||
reth-network-api.workspace = true
|
||||
reth-downloaders.workspace = true
|
||||
reth-tracing.workspace = true
|
||||
reth-tasks.workspace = true
|
||||
reth-ethereum-payload-builder.workspace = true
|
||||
reth-payload-builder.workspace = true
|
||||
reth-payload-primitives.workspace = true
|
||||
reth-payload-validator.workspace = true
|
||||
reth-basic-payload-builder.workspace = true
|
||||
reth-discv4.workspace = true
|
||||
reth-discv5.workspace = true
|
||||
reth-static-file.workspace = true
|
||||
reth-static-file-types = { workspace = true, features = ["clap"] }
|
||||
reth-trie = { workspace = true, features = ["metrics"] }
|
||||
reth-nippy-jar.workspace = true
|
||||
reth-node-api.workspace = true
|
||||
reth-node-ethereum.workspace = true
|
||||
reth-node-optimism = { workspace = true, optional = true, features = [
|
||||
"optimism",
|
||||
] }
|
||||
reth-node-core.workspace = true
|
||||
reth-ethereum-payload-builder.workspace = true
|
||||
reth-db-common.workspace = true
|
||||
reth-node-ethereum.workspace = true
|
||||
reth-node-builder.workspace = true
|
||||
reth-node-events.workspace = true
|
||||
reth-consensus.workspace = true
|
||||
reth-optimism-primitives.workspace = true
|
||||
reth-prune-types.workspace = true
|
||||
reth-engine-util.workspace = true
|
||||
reth-prune.workspace = true
|
||||
|
||||
@ -92,15 +88,7 @@ metrics-process.workspace = true
|
||||
proptest.workspace = true
|
||||
arbitrary.workspace = true
|
||||
proptest-arbitrary-interop.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
# tui
|
||||
comfy-table = "7.1"
|
||||
crossterm = "0.27.0"
|
||||
ratatui = { version = "0.27", default-features = false, features = [
|
||||
"crossterm",
|
||||
] }
|
||||
human_bytes = "0.4.1"
|
||||
|
||||
# async
|
||||
tokio = { workspace = true, features = [
|
||||
@ -119,8 +107,6 @@ tempfile.workspace = true
|
||||
backon.workspace = true
|
||||
similar-asserts.workspace = true
|
||||
itertools.workspace = true
|
||||
rayon.workspace = true
|
||||
ahash = "0.8"
|
||||
|
||||
# p2p
|
||||
discv5.workspace = true
|
||||
@ -130,8 +116,9 @@ tikv-jemallocator = { version = "0.5.0", optional = true }
|
||||
libc = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpsee.workspace = true
|
||||
assert_matches = "1.5.0"
|
||||
reth-discv4.workspace = true
|
||||
|
||||
|
||||
|
||||
[features]
|
||||
default = ["jemalloc"]
|
||||
|
||||
@ -6,7 +6,7 @@ use crate::{
|
||||
LogArgs,
|
||||
},
|
||||
commands::{
|
||||
config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state,
|
||||
config_cmd, debug_cmd, dump_genesis, import, init_cmd, init_state,
|
||||
node::{self, NoArgs},
|
||||
p2p, prune, recover, stage, test_vectors,
|
||||
},
|
||||
@ -14,6 +14,7 @@ use crate::{
|
||||
};
|
||||
use clap::{value_parser, Parser, Subcommand};
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_cli_commands::db;
|
||||
use reth_cli_runner::CliRunner;
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
||||
|
||||
@ -1,184 +0,0 @@
|
||||
//! Contains common `reth` arguments
|
||||
|
||||
use clap::Parser;
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_config::{config::EtlConfig, Config};
|
||||
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
|
||||
use reth_db_common::init::init_genesis;
|
||||
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
|
||||
use reth_evm::noop::NoopBlockExecutorProvider;
|
||||
use reth_node_core::{
|
||||
args::{
|
||||
utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs, DatadirArgs,
|
||||
},
|
||||
dirs::{ChainPath, DataDirPath},
|
||||
};
|
||||
use reth_primitives::B256;
|
||||
use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory};
|
||||
use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget};
|
||||
use reth_static_file::StaticFileProducer;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Struct to hold config and datadir paths
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct EnvironmentArgs {
|
||||
/// Parameters for datadir configuration
|
||||
#[command(flatten)]
|
||||
pub datadir: DatadirArgs,
|
||||
|
||||
/// The path to the configuration file to use.
|
||||
#[arg(long, value_name = "FILE")]
|
||||
pub config: Option<PathBuf>,
|
||||
|
||||
/// The chain this node is running.
|
||||
///
|
||||
/// Possible values are either a built-in chain or the path to a chain specification file.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "CHAIN_OR_PATH",
|
||||
long_help = chain_help(),
|
||||
default_value = SUPPORTED_CHAINS[0],
|
||||
value_parser = chain_value_parser
|
||||
)]
|
||||
pub chain: Arc<ChainSpec>,
|
||||
|
||||
/// All database related arguments
|
||||
#[command(flatten)]
|
||||
pub db: DatabaseArgs,
|
||||
}
|
||||
|
||||
impl EnvironmentArgs {
|
||||
/// Initializes environment according to [`AccessRights`] and returns an instance of
|
||||
/// [`Environment`].
|
||||
pub fn init(&self, access: AccessRights) -> eyre::Result<Environment> {
|
||||
let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain);
|
||||
let db_path = data_dir.db();
|
||||
let sf_path = data_dir.static_files();
|
||||
|
||||
if access.is_read_write() {
|
||||
reth_fs_util::create_dir_all(&db_path)?;
|
||||
reth_fs_util::create_dir_all(&sf_path)?;
|
||||
}
|
||||
|
||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||
let mut config: Config = confy::load_path(config_path)
|
||||
.inspect_err(
|
||||
|err| warn!(target: "reth::cli", %err, "Failed to load config file, using default"),
|
||||
)
|
||||
.unwrap_or_default();
|
||||
|
||||
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
||||
if config.stages.etl.dir.is_none() {
|
||||
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
|
||||
}
|
||||
|
||||
info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage");
|
||||
let (db, sfp) = match access {
|
||||
AccessRights::RW => (
|
||||
Arc::new(init_db(db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_write(sf_path)?,
|
||||
),
|
||||
AccessRights::RO => (
|
||||
Arc::new(open_db_read_only(&db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_only(sf_path)?,
|
||||
),
|
||||
};
|
||||
|
||||
let provider_factory = self.create_provider_factory(&config, db, sfp)?;
|
||||
if access.is_read_write() {
|
||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||
init_genesis(provider_factory.clone())?;
|
||||
}
|
||||
|
||||
Ok(Environment { config, provider_factory, data_dir })
|
||||
}
|
||||
|
||||
/// Returns a [`ProviderFactory`] after executing consistency checks.
|
||||
///
|
||||
/// If it's a read-write environment and an issue is found, it will attempt to heal (including a
|
||||
/// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the
|
||||
/// node to heal.
|
||||
fn create_provider_factory(
|
||||
&self,
|
||||
config: &Config,
|
||||
db: Arc<DatabaseEnv>,
|
||||
static_file_provider: StaticFileProvider,
|
||||
) -> eyre::Result<ProviderFactory<Arc<DatabaseEnv>>> {
|
||||
let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning());
|
||||
let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider);
|
||||
|
||||
info!(target: "reth::cli", "Verifying storage consistency.");
|
||||
|
||||
// Check for consistency between database and static files.
|
||||
if let Some(unwind_target) = factory
|
||||
.static_file_provider()
|
||||
.check_consistency(&factory.provider()?, has_receipt_pruning)?
|
||||
{
|
||||
if factory.db_ref().is_read_only() {
|
||||
warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal.");
|
||||
return Ok(factory)
|
||||
}
|
||||
|
||||
let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default();
|
||||
|
||||
// Highly unlikely to happen, and given its destructive nature, it's better to panic
|
||||
// instead.
|
||||
assert_ne!(unwind_target, PipelineTarget::Unwind(0), "A static file <> database inconsistency was found that would trigger an unwind to block 0");
|
||||
|
||||
info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check.");
|
||||
|
||||
let (_tip_tx, tip_rx) = watch::channel(B256::ZERO);
|
||||
|
||||
// Builds and executes an unwind-only pipeline
|
||||
let mut pipeline = Pipeline::builder()
|
||||
.add_stages(DefaultStages::new(
|
||||
factory.clone(),
|
||||
tip_rx,
|
||||
Arc::new(EthBeaconConsensus::new(self.chain.clone())),
|
||||
NoopHeaderDownloader::default(),
|
||||
NoopBodiesDownloader::default(),
|
||||
NoopBlockExecutorProvider::default(),
|
||||
config.stages.clone(),
|
||||
prune_modes.clone(),
|
||||
))
|
||||
.build(factory.clone(), StaticFileProducer::new(factory.clone(), prune_modes));
|
||||
|
||||
// Move all applicable data from database to static files.
|
||||
pipeline.move_to_static_files()?;
|
||||
pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?;
|
||||
}
|
||||
|
||||
Ok(factory)
|
||||
}
|
||||
}
|
||||
|
||||
/// Environment built from [`EnvironmentArgs`].
|
||||
#[derive(Debug)]
|
||||
pub struct Environment {
|
||||
/// Configuration for reth node
|
||||
pub config: Config,
|
||||
/// Provider factory.
|
||||
pub provider_factory: ProviderFactory<Arc<DatabaseEnv>>,
|
||||
/// Datadir path.
|
||||
pub data_dir: ChainPath<DataDirPath>,
|
||||
}
|
||||
|
||||
/// Environment access rights.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum AccessRights {
|
||||
/// Read-write access
|
||||
RW,
|
||||
/// Read-only access
|
||||
RO,
|
||||
}
|
||||
|
||||
impl AccessRights {
|
||||
/// Returns `true` if it requires read-write access to the environment.
|
||||
pub const fn is_read_write(&self) -> bool {
|
||||
matches!(self, Self::RW)
|
||||
}
|
||||
}
|
||||
@ -1,133 +0,0 @@
|
||||
use crate::commands::db::get::{maybe_json_value_parser, table_key};
|
||||
use ahash::RandomState;
|
||||
use clap::Parser;
|
||||
use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables};
|
||||
use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx};
|
||||
use reth_db_common::DbTool;
|
||||
use std::{
|
||||
hash::{BuildHasher, Hasher},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db checksum` command
|
||||
pub struct Command {
|
||||
/// The table name
|
||||
table: Tables,
|
||||
|
||||
/// The start of the range to checksum.
|
||||
#[arg(long, value_parser = maybe_json_value_parser)]
|
||||
start_key: Option<String>,
|
||||
|
||||
/// The end of the range to checksum.
|
||||
#[arg(long, value_parser = maybe_json_value_parser)]
|
||||
end_key: Option<String>,
|
||||
|
||||
/// The maximum number of records that are queried and used to compute the
|
||||
/// checksum.
|
||||
#[arg(long)]
|
||||
limit: Option<usize>,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db checksum` command
|
||||
pub fn execute(self, tool: &DbTool<Arc<DatabaseEnv>>) -> eyre::Result<()> {
|
||||
warn!("This command should be run without the node running!");
|
||||
self.table.view(&ChecksumViewer {
|
||||
tool,
|
||||
start_key: self.start_key,
|
||||
end_key: self.end_key,
|
||||
limit: self.limit,
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ChecksumViewer<'a, DB: Database> {
|
||||
tool: &'a DbTool<DB>,
|
||||
start_key: Option<String>,
|
||||
end_key: Option<String>,
|
||||
limit: Option<usize>,
|
||||
}
|
||||
|
||||
impl<DB: Database> ChecksumViewer<'_, DB> {
|
||||
pub(crate) const fn new(tool: &'_ DbTool<DB>) -> ChecksumViewer<'_, DB> {
|
||||
ChecksumViewer { tool, start_key: None, end_key: None, limit: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> TableViewer<(u64, Duration)> for ChecksumViewer<'_, DB> {
|
||||
type Error = eyre::Report;
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(u64, Duration), Self::Error> {
|
||||
let provider =
|
||||
self.tool.provider_factory.provider()?.disable_long_read_transaction_safety();
|
||||
let tx = provider.tx_ref();
|
||||
info!(
|
||||
"Start computing checksum, start={:?}, end={:?}, limit={:?}",
|
||||
self.start_key, self.end_key, self.limit
|
||||
);
|
||||
|
||||
let mut cursor = tx.cursor_read::<RawTable<T>>()?;
|
||||
let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) {
|
||||
(Some(start), Some(end)) => {
|
||||
let start_key = table_key::<T>(start).map(RawKey::<T::Key>::new)?;
|
||||
let end_key = table_key::<T>(end).map(RawKey::<T::Key>::new)?;
|
||||
cursor.walk_range(start_key..=end_key)?
|
||||
}
|
||||
(None, Some(end)) => {
|
||||
let end_key = table_key::<T>(end).map(RawKey::<T::Key>::new)?;
|
||||
|
||||
cursor.walk_range(..=end_key)?
|
||||
}
|
||||
(Some(start), None) => {
|
||||
let start_key = table_key::<T>(start).map(RawKey::<T::Key>::new)?;
|
||||
cursor.walk_range(start_key..)?
|
||||
}
|
||||
(None, None) => cursor.walk_range(..)?,
|
||||
};
|
||||
|
||||
let start_time = Instant::now();
|
||||
let mut hasher = RandomState::with_seeds(1, 2, 3, 4).build_hasher();
|
||||
let mut total = 0;
|
||||
|
||||
let limit = self.limit.unwrap_or(usize::MAX);
|
||||
let mut enumerate_start_key = None;
|
||||
let mut enumerate_end_key = None;
|
||||
for (index, entry) in walker.enumerate() {
|
||||
let (k, v): (RawKey<T::Key>, RawValue<T::Value>) = entry?;
|
||||
|
||||
if index % 100_000 == 0 {
|
||||
info!("Hashed {index} entries.");
|
||||
}
|
||||
|
||||
hasher.write(k.raw_key());
|
||||
hasher.write(v.raw_value());
|
||||
|
||||
if enumerate_start_key.is_none() {
|
||||
enumerate_start_key = Some(k.clone());
|
||||
}
|
||||
enumerate_end_key = Some(k);
|
||||
|
||||
total = index + 1;
|
||||
if total >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
info!("Hashed {total} entries.");
|
||||
if let (Some(s), Some(e)) = (enumerate_start_key, enumerate_end_key) {
|
||||
info!("start-key: {}", serde_json::to_string(&s.key()?).unwrap_or_default());
|
||||
info!("end-key: {}", serde_json::to_string(&e.key()?).unwrap_or_default());
|
||||
}
|
||||
|
||||
let checksum = hasher.finish();
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
info!("Checksum for table `{}`: {:#x} (elapsed: {:?})", T::NAME, checksum, elapsed);
|
||||
|
||||
Ok((checksum, elapsed))
|
||||
}
|
||||
}
|
||||
@ -1,63 +0,0 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::{static_file::iter_static_files, TableViewer, Tables};
|
||||
use reth_db_api::{
|
||||
database::Database,
|
||||
table::Table,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
};
|
||||
use reth_provider::{ProviderFactory, StaticFileProviderFactory};
|
||||
use reth_static_file_types::{find_fixed_range, StaticFileSegment};
|
||||
|
||||
/// The arguments for the `reth db clear` command
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
subcommand: Subcommands,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db clear` command
|
||||
pub fn execute<DB: Database>(self, provider_factory: ProviderFactory<DB>) -> eyre::Result<()> {
|
||||
match self.subcommand {
|
||||
Subcommands::Mdbx { table } => {
|
||||
table.view(&ClearViewer { db: provider_factory.db_ref() })?
|
||||
}
|
||||
Subcommands::StaticFile { segment } => {
|
||||
let static_file_provider = provider_factory.static_file_provider();
|
||||
let static_files = iter_static_files(static_file_provider.directory())?;
|
||||
|
||||
if let Some(segment_static_files) = static_files.get(&segment) {
|
||||
for (block_range, _) in segment_static_files {
|
||||
static_file_provider
|
||||
.delete_jar(segment, find_fixed_range(block_range.start()))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Subcommands {
|
||||
/// Deletes all database table entries
|
||||
Mdbx { table: Tables },
|
||||
/// Deletes all static file segment entries
|
||||
StaticFile { segment: StaticFileSegment },
|
||||
}
|
||||
|
||||
struct ClearViewer<'a, DB: Database> {
|
||||
db: &'a DB,
|
||||
}
|
||||
|
||||
impl<DB: Database> TableViewer<()> for ClearViewer<'_, DB> {
|
||||
type Error = eyre::Report;
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
let tx = self.db.tx_mut()?;
|
||||
tx.clear::<T>()?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,345 +0,0 @@
|
||||
use crate::{
|
||||
args::DatabaseArgs,
|
||||
dirs::{DataDirPath, PlatformPath},
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv, Tables};
|
||||
use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx};
|
||||
use reth_db_common::DbTool;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt::Debug,
|
||||
fs::{self, File},
|
||||
hash::Hash,
|
||||
io::Write,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db diff` command
|
||||
pub struct Command {
|
||||
/// The path to the data dir for all reth files and subdirectories.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
secondary_datadir: PlatformPath<DataDirPath>,
|
||||
|
||||
/// Arguments for the second database
|
||||
#[command(flatten)]
|
||||
second_db: DatabaseArgs,
|
||||
|
||||
/// The table name to diff. If not specified, all tables are diffed.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
table: Option<Tables>,
|
||||
|
||||
/// The output directory for the diff report.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
output: PlatformPath<PathBuf>,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute the `db diff` command.
|
||||
///
|
||||
/// This first opens the `db/` folder from the secondary datadir, where the second database is
|
||||
/// opened read-only.
|
||||
///
|
||||
/// The tool will then iterate through all key-value pairs for the primary and secondary
|
||||
/// databases. The value for each key will be compared with its corresponding value in the
|
||||
/// other database. If the values are different, a discrepancy will be recorded in-memory. If
|
||||
/// one key is present in one database but not the other, this will be recorded as an "extra
|
||||
/// element" for that database.
|
||||
///
|
||||
/// The discrepancies and extra elements, along with a brief summary of the diff results are
|
||||
/// then written to a file in the output directory.
|
||||
pub fn execute(self, tool: &DbTool<Arc<DatabaseEnv>>) -> eyre::Result<()> {
|
||||
warn!("Make sure the node is not running when running `reth db diff`!");
|
||||
// open second db
|
||||
let second_db_path: PathBuf = self.secondary_datadir.join("db").into();
|
||||
let second_db = open_db_read_only(&second_db_path, self.second_db.database_args())?;
|
||||
|
||||
let tables = match &self.table {
|
||||
Some(table) => std::slice::from_ref(table),
|
||||
None => Tables::ALL,
|
||||
};
|
||||
|
||||
for table in tables {
|
||||
let mut primary_tx = tool.provider_factory.db_ref().tx()?;
|
||||
let mut secondary_tx = second_db.tx()?;
|
||||
|
||||
// disable long read transaction safety, since this will run for a while and it's
|
||||
// expected that the node is not running
|
||||
primary_tx.disable_long_read_transaction_safety();
|
||||
secondary_tx.disable_long_read_transaction_safety();
|
||||
|
||||
let output_dir = self.output.clone();
|
||||
tables_to_generic!(table, |Table| find_diffs::<Table>(
|
||||
primary_tx,
|
||||
secondary_tx,
|
||||
output_dir
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Find diffs for a table, then analyzing the result
|
||||
fn find_diffs<T: Table>(
|
||||
primary_tx: impl DbTx,
|
||||
secondary_tx: impl DbTx,
|
||||
output_dir: impl AsRef<Path>,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
T::Key: Hash,
|
||||
T::Value: PartialEq,
|
||||
{
|
||||
let table = T::NAME;
|
||||
|
||||
info!("Analyzing table {table}...");
|
||||
let result = find_diffs_advanced::<T>(&primary_tx, &secondary_tx)?;
|
||||
info!("Done analyzing table {table}!");
|
||||
|
||||
// Pretty info summary header: newline then header
|
||||
info!("");
|
||||
info!("Diff results for {table}:");
|
||||
|
||||
// create directory and open file
|
||||
fs::create_dir_all(output_dir.as_ref())?;
|
||||
let file_name = format!("{table}.txt");
|
||||
let mut file = File::create(output_dir.as_ref().join(file_name.clone()))?;
|
||||
|
||||
// analyze the result and print some stats
|
||||
let discrepancies = result.discrepancies.len();
|
||||
let extra_elements = result.extra_elements.len();
|
||||
|
||||
// Make a pretty summary header for the table
|
||||
writeln!(file, "Diff results for {table}")?;
|
||||
|
||||
if discrepancies > 0 {
|
||||
// write to file
|
||||
writeln!(file, "Found {discrepancies} discrepancies in table {table}")?;
|
||||
|
||||
// also print to info
|
||||
info!("Found {discrepancies} discrepancies in table {table}");
|
||||
} else {
|
||||
// write to file
|
||||
writeln!(file, "No discrepancies found in table {table}")?;
|
||||
|
||||
// also print to info
|
||||
info!("No discrepancies found in table {table}");
|
||||
}
|
||||
|
||||
if extra_elements > 0 {
|
||||
// write to file
|
||||
writeln!(file, "Found {extra_elements} extra elements in table {table}")?;
|
||||
|
||||
// also print to info
|
||||
info!("Found {extra_elements} extra elements in table {table}");
|
||||
} else {
|
||||
writeln!(file, "No extra elements found in table {table}")?;
|
||||
|
||||
// also print to info
|
||||
info!("No extra elements found in table {table}");
|
||||
}
|
||||
|
||||
info!("Writing diff results for {table} to {file_name}...");
|
||||
|
||||
if discrepancies > 0 {
|
||||
writeln!(file, "Discrepancies:")?;
|
||||
}
|
||||
|
||||
for discrepancy in result.discrepancies.values() {
|
||||
writeln!(file, "{discrepancy:?}")?;
|
||||
}
|
||||
|
||||
if extra_elements > 0 {
|
||||
writeln!(file, "Extra elements:")?;
|
||||
}
|
||||
|
||||
for extra_element in result.extra_elements.values() {
|
||||
writeln!(file, "{extra_element:?}")?;
|
||||
}
|
||||
|
||||
let full_file_name = output_dir.as_ref().join(file_name);
|
||||
info!("Done writing diff results for {table} to {}", full_file_name.display());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This diff algorithm is slightly different, it will walk _each_ table, cross-checking for the
|
||||
/// element in the other table.
|
||||
fn find_diffs_advanced<T: Table>(
|
||||
primary_tx: &impl DbTx,
|
||||
secondary_tx: &impl DbTx,
|
||||
) -> eyre::Result<TableDiffResult<T>>
|
||||
where
|
||||
T::Value: PartialEq,
|
||||
T::Key: Hash,
|
||||
{
|
||||
// initialize the zipped walker
|
||||
let mut primary_zip_cursor =
|
||||
primary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
|
||||
let primary_walker = primary_zip_cursor.walk(None)?;
|
||||
|
||||
let mut secondary_zip_cursor =
|
||||
secondary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
|
||||
let secondary_walker = secondary_zip_cursor.walk(None)?;
|
||||
let zipped_cursor = primary_walker.zip(secondary_walker);
|
||||
|
||||
// initialize the cursors for seeking when we are cross checking elements
|
||||
let mut primary_cursor =
|
||||
primary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
|
||||
|
||||
let mut secondary_cursor =
|
||||
secondary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
|
||||
|
||||
let mut result = TableDiffResult::<T>::default();
|
||||
|
||||
// this loop will walk both tables, cross-checking for the element in the other table.
|
||||
// it basically just loops through both tables at the same time. if the keys are different, it
|
||||
// will check each key in the other table. if the keys are the same, it will compare the
|
||||
// values
|
||||
for (primary_entry, secondary_entry) in zipped_cursor {
|
||||
let (primary_key, primary_value) = primary_entry?;
|
||||
let (secondary_key, secondary_value) = secondary_entry?;
|
||||
|
||||
if primary_key != secondary_key {
|
||||
// if the keys are different, we need to check if the key is in the other table
|
||||
let crossed_secondary =
|
||||
secondary_cursor.seek_exact(primary_key.clone())?.map(|(_, value)| value);
|
||||
result.try_push_discrepancy(
|
||||
primary_key.clone(),
|
||||
Some(primary_value),
|
||||
crossed_secondary,
|
||||
);
|
||||
|
||||
// now do the same for the primary table
|
||||
let crossed_primary =
|
||||
primary_cursor.seek_exact(secondary_key.clone())?.map(|(_, value)| value);
|
||||
result.try_push_discrepancy(
|
||||
secondary_key.clone(),
|
||||
crossed_primary,
|
||||
Some(secondary_value),
|
||||
);
|
||||
} else {
|
||||
// the keys are the same, so we need to compare the values
|
||||
result.try_push_discrepancy(primary_key, Some(primary_value), Some(secondary_value));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Includes a table element between two databases with the same key, but different values
|
||||
#[derive(Debug)]
|
||||
struct TableDiffElement<T: Table> {
|
||||
/// The key for the element
|
||||
key: T::Key,
|
||||
|
||||
/// The element from the first table
|
||||
#[allow(dead_code)]
|
||||
first: T::Value,
|
||||
|
||||
/// The element from the second table
|
||||
#[allow(dead_code)]
|
||||
second: T::Value,
|
||||
}
|
||||
|
||||
/// The diff result for an entire table. If the tables had the same number of elements, there will
|
||||
/// be no extra elements.
|
||||
struct TableDiffResult<T: Table>
|
||||
where
|
||||
T::Key: Hash,
|
||||
{
|
||||
/// All elements of the database that are different
|
||||
discrepancies: HashMap<T::Key, TableDiffElement<T>>,
|
||||
|
||||
/// Any extra elements, and the table they are in
|
||||
extra_elements: HashMap<T::Key, ExtraTableElement<T>>,
|
||||
}
|
||||
|
||||
impl<T> Default for TableDiffResult<T>
|
||||
where
|
||||
T: Table,
|
||||
T::Key: Hash,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self { discrepancies: HashMap::new(), extra_elements: HashMap::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Table> TableDiffResult<T>
|
||||
where
|
||||
T::Key: Hash,
|
||||
{
|
||||
/// Push a diff result into the discrepancies set.
|
||||
fn push_discrepancy(&mut self, discrepancy: TableDiffElement<T>) {
|
||||
self.discrepancies.insert(discrepancy.key.clone(), discrepancy);
|
||||
}
|
||||
|
||||
/// Push an extra element into the extra elements set.
|
||||
fn push_extra_element(&mut self, element: ExtraTableElement<T>) {
|
||||
self.extra_elements.insert(element.key().clone(), element);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TableDiffResult<T>
|
||||
where
|
||||
T: Table,
|
||||
T::Key: Hash,
|
||||
T::Value: PartialEq,
|
||||
{
|
||||
/// Try to push a diff result into the discrepancy set, only pushing if the given elements are
|
||||
/// different, and the discrepancy does not exist anywhere already.
|
||||
fn try_push_discrepancy(
|
||||
&mut self,
|
||||
key: T::Key,
|
||||
first: Option<T::Value>,
|
||||
second: Option<T::Value>,
|
||||
) {
|
||||
// do not bother comparing if the key is already in the discrepancies map
|
||||
if self.discrepancies.contains_key(&key) {
|
||||
return
|
||||
}
|
||||
|
||||
// do not bother comparing if the key is already in the extra elements map
|
||||
if self.extra_elements.contains_key(&key) {
|
||||
return
|
||||
}
|
||||
|
||||
match (first, second) {
|
||||
(Some(first), Some(second)) => {
|
||||
if first != second {
|
||||
self.push_discrepancy(TableDiffElement { key, first, second });
|
||||
}
|
||||
}
|
||||
(Some(first), None) => {
|
||||
self.push_extra_element(ExtraTableElement::First { key, value: first });
|
||||
}
|
||||
(None, Some(second)) => {
|
||||
self.push_extra_element(ExtraTableElement::Second { key, value: second });
|
||||
}
|
||||
(None, None) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single extra element from a table
|
||||
#[derive(Debug)]
|
||||
enum ExtraTableElement<T: Table> {
|
||||
/// The extra element that is in the first table
|
||||
#[allow(dead_code)]
|
||||
First { key: T::Key, value: T::Value },
|
||||
|
||||
/// The extra element that is in the second table
|
||||
#[allow(dead_code)]
|
||||
Second { key: T::Key, value: T::Value },
|
||||
}
|
||||
|
||||
impl<T: Table> ExtraTableElement<T> {
|
||||
/// Return the key for the extra element
|
||||
const fn key(&self) -> &T::Key {
|
||||
match self {
|
||||
Self::First { key, .. } | Self::Second { key, .. } => key,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,264 +0,0 @@
|
||||
use clap::Parser;
|
||||
use reth_db::{
|
||||
static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask},
|
||||
tables, RawKey, RawTable, Receipts, TableViewer, Transactions,
|
||||
};
|
||||
use reth_db_api::{
|
||||
database::Database,
|
||||
table::{Decompress, DupSort, Table},
|
||||
};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_primitives::{BlockHash, Header};
|
||||
use reth_provider::StaticFileProviderFactory;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use tracing::error;
|
||||
|
||||
/// The arguments for the `reth db get` command
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
subcommand: Subcommand,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand, Debug)]
|
||||
enum Subcommand {
|
||||
/// Gets the content of a database table for the given key
|
||||
Mdbx {
|
||||
table: tables::Tables,
|
||||
|
||||
/// The key to get content for
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
key: String,
|
||||
|
||||
/// The subkey to get content for
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
subkey: Option<String>,
|
||||
|
||||
/// Output bytes instead of human-readable decoded value
|
||||
#[arg(long)]
|
||||
raw: bool,
|
||||
},
|
||||
/// Gets the content of a static file segment for the given key
|
||||
StaticFile {
|
||||
segment: StaticFileSegment,
|
||||
|
||||
/// The key to get content for
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
key: String,
|
||||
|
||||
/// Output bytes instead of human-readable decoded value
|
||||
#[arg(long)]
|
||||
raw: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db get` command
|
||||
pub fn execute<DB: Database>(self, tool: &DbTool<DB>) -> eyre::Result<()> {
|
||||
match self.subcommand {
|
||||
Subcommand::Mdbx { table, key, subkey, raw } => {
|
||||
table.view(&GetValueViewer { tool, key, subkey, raw })?
|
||||
}
|
||||
Subcommand::StaticFile { segment, key, raw } => {
|
||||
let (key, mask): (u64, _) = match segment {
|
||||
StaticFileSegment::Headers => {
|
||||
(table_key::<tables::Headers>(&key)?, <HeaderMask<Header, BlockHash>>::MASK)
|
||||
}
|
||||
StaticFileSegment::Transactions => (
|
||||
table_key::<tables::Transactions>(&key)?,
|
||||
<TransactionMask<<Transactions as Table>::Value>>::MASK,
|
||||
),
|
||||
StaticFileSegment::Receipts => (
|
||||
table_key::<tables::Receipts>(&key)?,
|
||||
<ReceiptMask<<Receipts as Table>::Value>>::MASK,
|
||||
),
|
||||
};
|
||||
|
||||
let content = tool.provider_factory.static_file_provider().find_static_file(
|
||||
segment,
|
||||
|provider| {
|
||||
let mut cursor = provider.cursor()?;
|
||||
cursor.get(key.into(), mask).map(|result| {
|
||||
result.map(|vec| {
|
||||
vec.iter().map(|slice| slice.to_vec()).collect::<Vec<_>>()
|
||||
})
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
if raw {
|
||||
println!("{content:?}");
|
||||
} else {
|
||||
match segment {
|
||||
StaticFileSegment::Headers => {
|
||||
let header = Header::decompress(content[0].as_slice())?;
|
||||
let block_hash = BlockHash::decompress(content[1].as_slice())?;
|
||||
println!(
|
||||
"{}\n{}",
|
||||
serde_json::to_string_pretty(&header)?,
|
||||
serde_json::to_string_pretty(&block_hash)?
|
||||
);
|
||||
}
|
||||
StaticFileSegment::Transactions => {
|
||||
let transaction = <<Transactions as Table>::Value>::decompress(
|
||||
content[0].as_slice(),
|
||||
)?;
|
||||
println!("{}", serde_json::to_string_pretty(&transaction)?);
|
||||
}
|
||||
StaticFileSegment::Receipts => {
|
||||
let receipt = <<Receipts as Table>::Value>::decompress(
|
||||
content[0].as_slice(),
|
||||
)?;
|
||||
println!("{}", serde_json::to_string_pretty(&receipt)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table key.");
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an instance of key for given table
|
||||
pub(crate) fn table_key<T: Table>(key: &str) -> Result<T::Key, eyre::Error> {
|
||||
serde_json::from_str::<T::Key>(key).map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
/// Get an instance of subkey for given dupsort table
|
||||
fn table_subkey<T: DupSort>(subkey: &Option<String>) -> Result<T::SubKey, eyre::Error> {
|
||||
serde_json::from_str::<T::SubKey>(&subkey.clone().unwrap_or_default())
|
||||
.map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
struct GetValueViewer<'a, DB: Database> {
|
||||
tool: &'a DbTool<DB>,
|
||||
key: String,
|
||||
subkey: Option<String>,
|
||||
raw: bool,
|
||||
}
|
||||
|
||||
impl<DB: Database> TableViewer<()> for GetValueViewer<'_, DB> {
|
||||
type Error = eyre::Report;
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
let key = table_key::<T>(&self.key)?;
|
||||
|
||||
let content = if self.raw {
|
||||
self.tool
|
||||
.get::<RawTable<T>>(RawKey::from(key))?
|
||||
.map(|content| format!("{:?}", content.raw_value()))
|
||||
} else {
|
||||
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
|
||||
};
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
println!("{content}");
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table key.");
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
|
||||
// get a key for given table
|
||||
let key = table_key::<T>(&self.key)?;
|
||||
|
||||
// process dupsort table
|
||||
let subkey = table_subkey::<T>(&self.subkey)?;
|
||||
|
||||
match self.tool.get_dup::<T>(key, subkey)? {
|
||||
Some(content) => {
|
||||
println!("{}", serde_json::to_string_pretty(&content)?);
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table subkey.");
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Map the user input value to json
|
||||
pub(crate) fn maybe_json_value_parser(value: &str) -> Result<String, eyre::Error> {
|
||||
if serde_json::from_str::<serde::de::IgnoredAny>(value).is_ok() {
|
||||
Ok(value.to_string())
|
||||
} else {
|
||||
serde_json::to_string(&value).map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
use reth_db::{AccountsHistory, HashedAccounts, Headers, StageCheckpoints, StoragesHistory};
|
||||
use reth_db_api::models::{storage_sharded_key::StorageShardedKey, ShardedKey};
|
||||
use reth_primitives::{Address, B256};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_numeric_key_args() {
|
||||
assert_eq!(table_key::<Headers>("123").unwrap(), 123);
|
||||
assert_eq!(
|
||||
table_key::<HashedAccounts>(
|
||||
"\"0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac\""
|
||||
)
|
||||
.unwrap(),
|
||||
B256::from_str("0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac")
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_string_key_args() {
|
||||
assert_eq!(
|
||||
table_key::<StageCheckpoints>("\"MerkleExecution\"").unwrap(),
|
||||
"MerkleExecution"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_json_key_args() {
|
||||
assert_eq!(
|
||||
table_key::<StoragesHistory>(r#"{ "address": "0x01957911244e546ce519fbac6f798958fafadb41", "sharded_key": { "key": "0x0000000000000000000000000000000000000000000000000000000000000003", "highest_block_number": 18446744073709551615 } }"#).unwrap(),
|
||||
StorageShardedKey::new(
|
||||
Address::from_str("0x01957911244e546ce519fbac6f798958fafadb41").unwrap(),
|
||||
B256::from_str(
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000003"
|
||||
)
|
||||
.unwrap(),
|
||||
18446744073709551615
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_json_key_for_account_history() {
|
||||
assert_eq!(
|
||||
table_key::<AccountsHistory>(r#"{ "key": "0x4448e1273fd5a8bfdb9ed111e96889c960eee145", "highest_block_number": 18446744073709551615 }"#).unwrap(),
|
||||
ShardedKey::new(
|
||||
Address::from_str("0x4448e1273fd5a8bfdb9ed111e96889c960eee145").unwrap(),
|
||||
18446744073709551615
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,135 +0,0 @@
|
||||
use super::tui::DbListTUI;
|
||||
use clap::Parser;
|
||||
use eyre::WrapErr;
|
||||
use reth_db::{DatabaseEnv, RawValue, TableViewer, Tables};
|
||||
use reth_db_api::{database::Database, table::Table};
|
||||
use reth_db_common::{DbTool, ListFilter};
|
||||
use reth_primitives::hex;
|
||||
use std::{cell::RefCell, sync::Arc};
|
||||
use tracing::error;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db list` command
|
||||
pub struct Command {
|
||||
/// The table name
|
||||
table: Tables,
|
||||
/// Skip first N entries
|
||||
#[arg(long, short, default_value_t = 0)]
|
||||
skip: usize,
|
||||
/// Reverse the order of the entries. If enabled last table entries are read.
|
||||
#[arg(long, short, default_value_t = false)]
|
||||
reverse: bool,
|
||||
/// How many items to take from the walker
|
||||
#[arg(long, short, default_value_t = 5)]
|
||||
len: usize,
|
||||
/// Search parameter for both keys and values. Prefix it with `0x` to search for binary data,
|
||||
/// and text otherwise.
|
||||
///
|
||||
/// ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be
|
||||
/// missing results since the search uses the raw uncompressed value from the database.
|
||||
#[arg(long)]
|
||||
search: Option<String>,
|
||||
/// Minimum size of row in bytes
|
||||
#[arg(long, default_value_t = 0)]
|
||||
min_row_size: usize,
|
||||
/// Minimum size of key in bytes
|
||||
#[arg(long, default_value_t = 0)]
|
||||
min_key_size: usize,
|
||||
/// Minimum size of value in bytes
|
||||
#[arg(long, default_value_t = 0)]
|
||||
min_value_size: usize,
|
||||
/// Returns the number of rows found.
|
||||
#[arg(long, short)]
|
||||
count: bool,
|
||||
/// Dump as JSON instead of using TUI.
|
||||
#[arg(long, short)]
|
||||
json: bool,
|
||||
/// Output bytes instead of human-readable decoded value
|
||||
#[arg(long)]
|
||||
raw: bool,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db list` command
|
||||
pub fn execute(self, tool: &DbTool<Arc<DatabaseEnv>>) -> eyre::Result<()> {
|
||||
self.table.view(&ListTableViewer { tool, args: &self })
|
||||
}
|
||||
|
||||
/// Generate [`ListFilter`] from command.
|
||||
pub fn list_filter(&self) -> ListFilter {
|
||||
let search = self
|
||||
.search
|
||||
.as_ref()
|
||||
.map(|search| {
|
||||
if let Some(search) = search.strip_prefix("0x") {
|
||||
return hex::decode(search).unwrap()
|
||||
}
|
||||
search.as_bytes().to_vec()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
ListFilter {
|
||||
skip: self.skip,
|
||||
len: self.len,
|
||||
search,
|
||||
min_row_size: self.min_row_size,
|
||||
min_key_size: self.min_key_size,
|
||||
min_value_size: self.min_value_size,
|
||||
reverse: self.reverse,
|
||||
only_count: self.count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ListTableViewer<'a> {
|
||||
tool: &'a DbTool<Arc<DatabaseEnv>>,
|
||||
args: &'a Command,
|
||||
}
|
||||
|
||||
impl TableViewer<()> for ListTableViewer<'_> {
|
||||
type Error = eyre::Report;
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
self.tool.provider_factory.db_ref().view(|tx| {
|
||||
let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?;
|
||||
let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?;
|
||||
let total_entries = stats.entries();
|
||||
let final_entry_idx = total_entries.saturating_sub(1);
|
||||
if self.args.skip > final_entry_idx {
|
||||
error!(
|
||||
target: "reth::cli",
|
||||
"Start index {start} is greater than the final entry index ({final_entry_idx}) in the table {table}",
|
||||
start = self.args.skip,
|
||||
final_entry_idx = final_entry_idx,
|
||||
table = self.args.table.name()
|
||||
);
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
|
||||
let list_filter = self.args.list_filter();
|
||||
|
||||
if self.args.json || self.args.count {
|
||||
let (list, count) = self.tool.list::<T>(&list_filter)?;
|
||||
|
||||
if self.args.count {
|
||||
println!("{count} entries found.")
|
||||
} else if self.args.raw {
|
||||
let list = list.into_iter().map(|row| (row.0, RawValue::new(row.1).into_value())).collect::<Vec<_>>();
|
||||
println!("{}", serde_json::to_string_pretty(&list)?);
|
||||
} else {
|
||||
println!("{}", serde_json::to_string_pretty(&list)?);
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
let list_filter = RefCell::new(list_filter);
|
||||
DbListTUI::<_, T>::new(|skip, len| {
|
||||
list_filter.borrow_mut().update_page(skip, len);
|
||||
self.tool.list::<T>(&list_filter.borrow()).unwrap().0
|
||||
}, self.args.skip, self.args.len, total_entries, self.args.raw).run()
|
||||
}
|
||||
})??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,159 +0,0 @@
|
||||
//! Database debugging tool
|
||||
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION};
|
||||
use reth_db_common::DbTool;
|
||||
use std::io::{self, Write};
|
||||
|
||||
mod checksum;
|
||||
mod clear;
|
||||
mod diff;
|
||||
mod get;
|
||||
mod list;
|
||||
mod stats;
|
||||
/// DB List TUI
|
||||
mod tui;
|
||||
|
||||
/// `reth db` command
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
#[command(flatten)]
|
||||
env: EnvironmentArgs,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Subcommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
/// `reth db` subcommands
|
||||
pub enum Subcommands {
|
||||
/// Lists all the tables, their entry count and their size
|
||||
Stats(stats::Command),
|
||||
/// Lists the contents of a table
|
||||
List(list::Command),
|
||||
/// Calculates the content checksum of a table
|
||||
Checksum(checksum::Command),
|
||||
/// Create a diff between two database tables or two entire databases.
|
||||
Diff(diff::Command),
|
||||
/// Gets the content of a table for the given key
|
||||
Get(get::Command),
|
||||
/// Deletes all database entries
|
||||
Drop {
|
||||
/// Bypasses the interactive confirmation and drops the database directly
|
||||
#[arg(short, long)]
|
||||
force: bool,
|
||||
},
|
||||
/// Deletes all table entries
|
||||
Clear(clear::Command),
|
||||
/// Lists current and local database versions
|
||||
Version,
|
||||
/// Returns the full database path
|
||||
Path,
|
||||
}
|
||||
|
||||
/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command
|
||||
macro_rules! db_ro_exec {
|
||||
($env:expr, $tool:ident, $command:block) => {
|
||||
let Environment { provider_factory, .. } = $env.init(AccessRights::RO)?;
|
||||
|
||||
let $tool = DbTool::new(provider_factory.clone())?;
|
||||
$command;
|
||||
};
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db` command
|
||||
pub async fn execute(self) -> eyre::Result<()> {
|
||||
let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain);
|
||||
let db_path = data_dir.db();
|
||||
let static_files_path = data_dir.static_files();
|
||||
|
||||
match self.command {
|
||||
// TODO: We'll need to add this on the DB trait.
|
||||
Subcommands::Stats(command) => {
|
||||
db_ro_exec!(self.env, tool, {
|
||||
command.execute(data_dir, &tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::List(command) => {
|
||||
db_ro_exec!(self.env, tool, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Checksum(command) => {
|
||||
db_ro_exec!(self.env, tool, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Diff(command) => {
|
||||
db_ro_exec!(self.env, tool, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Get(command) => {
|
||||
db_ro_exec!(self.env, tool, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Drop { force } => {
|
||||
if !force {
|
||||
// Ask for confirmation
|
||||
print!("Are you sure you want to drop the database at {data_dir}? This cannot be undone. (y/N): ");
|
||||
// Flush the buffer to ensure the message is printed immediately
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
let mut input = String::new();
|
||||
io::stdin().read_line(&mut input).expect("Failed to read line");
|
||||
|
||||
if !input.trim().eq_ignore_ascii_case("y") {
|
||||
println!("Database drop aborted!");
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?;
|
||||
let tool = DbTool::new(provider_factory)?;
|
||||
tool.drop(db_path, static_files_path)?;
|
||||
}
|
||||
Subcommands::Clear(command) => {
|
||||
let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?;
|
||||
command.execute(provider_factory)?;
|
||||
}
|
||||
Subcommands::Version => {
|
||||
let local_db_version = match get_db_version(&db_path) {
|
||||
Ok(version) => Some(version),
|
||||
Err(DatabaseVersionError::MissingFile) => None,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
println!("Current database version: {DB_VERSION}");
|
||||
|
||||
if let Some(version) = local_db_version {
|
||||
println!("Local database version: {version}");
|
||||
} else {
|
||||
println!("Local database is uninitialized");
|
||||
}
|
||||
}
|
||||
Subcommands::Path => {
|
||||
println!("{}", db_path.display());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use reth_node_core::args::utils::SUPPORTED_CHAINS;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn parse_stats_globals() {
|
||||
let path = format!("../{}", SUPPORTED_CHAINS[0]);
|
||||
let cmd = Command::try_parse_from(["reth", "--datadir", &path, "stats"]).unwrap();
|
||||
assert_eq!(cmd.env.datadir.resolve_datadir(cmd.env.chain.chain).as_ref(), Path::new(&path));
|
||||
}
|
||||
}
|
||||
@ -1,348 +0,0 @@
|
||||
use crate::commands::db::checksum::ChecksumViewer;
|
||||
use clap::Parser;
|
||||
use comfy_table::{Cell, Row, Table as ComfyTable};
|
||||
use eyre::WrapErr;
|
||||
use human_bytes::human_bytes;
|
||||
use itertools::Itertools;
|
||||
use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables};
|
||||
use reth_db_api::database::Database;
|
||||
use reth_db_common::DbTool;
|
||||
use reth_fs_util as fs;
|
||||
use reth_node_core::dirs::{ChainPath, DataDirPath};
|
||||
use reth_provider::providers::StaticFileProvider;
|
||||
use reth_static_file_types::{find_fixed_range, SegmentRangeInclusive};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db stats` command
|
||||
pub struct Command {
|
||||
/// Show only the total size for static files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
detailed_sizes: bool,
|
||||
|
||||
/// Show detailed information per static file segment.
|
||||
#[arg(long, default_value_t = false)]
|
||||
detailed_segments: bool,
|
||||
|
||||
/// Show a checksum of each table in the database.
|
||||
///
|
||||
/// WARNING: this option will take a long time to run, as it needs to traverse and hash the
|
||||
/// entire database.
|
||||
///
|
||||
/// For individual table checksums, use the `reth db checksum` command.
|
||||
#[arg(long, default_value_t = false)]
|
||||
checksum: bool,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db stats` command
|
||||
pub fn execute(
|
||||
self,
|
||||
data_dir: ChainPath<DataDirPath>,
|
||||
tool: &DbTool<Arc<DatabaseEnv>>,
|
||||
) -> eyre::Result<()> {
|
||||
if self.checksum {
|
||||
let checksum_report = self.checksum_report(tool)?;
|
||||
println!("{checksum_report}");
|
||||
println!("\n");
|
||||
}
|
||||
|
||||
let static_files_stats_table = self.static_files_stats_table(data_dir)?;
|
||||
println!("{static_files_stats_table}");
|
||||
|
||||
println!("\n");
|
||||
|
||||
let db_stats_table = self.db_stats_table(tool)?;
|
||||
println!("{db_stats_table}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn db_stats_table(&self, tool: &DbTool<Arc<DatabaseEnv>>) -> eyre::Result<ComfyTable> {
|
||||
let mut table = ComfyTable::new();
|
||||
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
|
||||
table.set_header([
|
||||
"Table Name",
|
||||
"# Entries",
|
||||
"Branch Pages",
|
||||
"Leaf Pages",
|
||||
"Overflow Pages",
|
||||
"Total Size",
|
||||
]);
|
||||
|
||||
tool.provider_factory.db_ref().view(|tx| {
|
||||
let mut db_tables = Tables::ALL.iter().map(|table| table.name()).collect::<Vec<_>>();
|
||||
db_tables.sort();
|
||||
let mut total_size = 0;
|
||||
for db_table in db_tables {
|
||||
let table_db = tx.inner.open_db(Some(db_table)).wrap_err("Could not open db.")?;
|
||||
|
||||
let stats = tx
|
||||
.inner
|
||||
.db_stat(&table_db)
|
||||
.wrap_err(format!("Could not find table: {db_table}"))?;
|
||||
|
||||
// Defaults to 16KB right now but we should
|
||||
// re-evaluate depending on the DB we end up using
|
||||
// (e.g. REDB does not have these options as configurable intentionally)
|
||||
let page_size = stats.page_size() as usize;
|
||||
let leaf_pages = stats.leaf_pages();
|
||||
let branch_pages = stats.branch_pages();
|
||||
let overflow_pages = stats.overflow_pages();
|
||||
let num_pages = leaf_pages + branch_pages + overflow_pages;
|
||||
let table_size = page_size * num_pages;
|
||||
|
||||
total_size += table_size;
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new(db_table))
|
||||
.add_cell(Cell::new(stats.entries()))
|
||||
.add_cell(Cell::new(branch_pages))
|
||||
.add_cell(Cell::new(leaf_pages))
|
||||
.add_cell(Cell::new(overflow_pages))
|
||||
.add_cell(Cell::new(human_bytes(table_size as f64)));
|
||||
table.add_row(row);
|
||||
}
|
||||
|
||||
let max_widths = table.column_max_content_widths();
|
||||
let mut separator = Row::new();
|
||||
for width in max_widths {
|
||||
separator.add_cell(Cell::new("-".repeat(width as usize)));
|
||||
}
|
||||
table.add_row(separator);
|
||||
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new("Tables"))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(human_bytes(total_size as f64)));
|
||||
table.add_row(row);
|
||||
|
||||
let freelist = tx.inner.env().freelist()?;
|
||||
let pagesize = tx.inner.db_stat(&mdbx::Database::freelist_db())?.page_size() as usize;
|
||||
let freelist_size = freelist * pagesize;
|
||||
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new("Freelist"))
|
||||
.add_cell(Cell::new(freelist))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(human_bytes(freelist_size as f64)));
|
||||
table.add_row(row);
|
||||
|
||||
Ok::<(), eyre::Report>(())
|
||||
})??;
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn static_files_stats_table(
|
||||
&self,
|
||||
data_dir: ChainPath<DataDirPath>,
|
||||
) -> eyre::Result<ComfyTable> {
|
||||
let mut table = ComfyTable::new();
|
||||
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
|
||||
|
||||
if self.detailed_sizes {
|
||||
table.set_header([
|
||||
"Segment",
|
||||
"Block Range",
|
||||
"Transaction Range",
|
||||
"Shape (columns x rows)",
|
||||
"Data Size",
|
||||
"Index Size",
|
||||
"Offsets Size",
|
||||
"Config Size",
|
||||
"Total Size",
|
||||
]);
|
||||
} else {
|
||||
table.set_header([
|
||||
"Segment",
|
||||
"Block Range",
|
||||
"Transaction Range",
|
||||
"Shape (columns x rows)",
|
||||
"Size",
|
||||
]);
|
||||
}
|
||||
|
||||
let static_files = iter_static_files(data_dir.static_files())?;
|
||||
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files())?;
|
||||
|
||||
let mut total_data_size = 0;
|
||||
let mut total_index_size = 0;
|
||||
let mut total_offsets_size = 0;
|
||||
let mut total_config_size = 0;
|
||||
|
||||
for (segment, ranges) in static_files.into_iter().sorted_by_key(|(segment, _)| *segment) {
|
||||
let (
|
||||
mut segment_columns,
|
||||
mut segment_rows,
|
||||
mut segment_data_size,
|
||||
mut segment_index_size,
|
||||
mut segment_offsets_size,
|
||||
mut segment_config_size,
|
||||
) = (0, 0, 0, 0, 0, 0);
|
||||
|
||||
for (block_range, tx_range) in &ranges {
|
||||
let fixed_block_range = find_fixed_range(block_range.start());
|
||||
let jar_provider = static_file_provider
|
||||
.get_segment_provider(segment, || Some(fixed_block_range), None)?
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!("Failed to get segment provider for segment: {}", segment)
|
||||
})?;
|
||||
|
||||
let columns = jar_provider.columns();
|
||||
let rows = jar_provider.rows();
|
||||
|
||||
let data_size = fs::metadata(jar_provider.data_path())
|
||||
.map(|metadata| metadata.len())
|
||||
.unwrap_or_default();
|
||||
let index_size = fs::metadata(jar_provider.index_path())
|
||||
.map(|metadata| metadata.len())
|
||||
.unwrap_or_default();
|
||||
let offsets_size = fs::metadata(jar_provider.offsets_path())
|
||||
.map(|metadata| metadata.len())
|
||||
.unwrap_or_default();
|
||||
let config_size = fs::metadata(jar_provider.config_path())
|
||||
.map(|metadata| metadata.len())
|
||||
.unwrap_or_default();
|
||||
|
||||
if self.detailed_segments {
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new(segment))
|
||||
.add_cell(Cell::new(format!("{block_range}")))
|
||||
.add_cell(Cell::new(
|
||||
tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")),
|
||||
))
|
||||
.add_cell(Cell::new(format!("{columns} x {rows}")));
|
||||
if self.detailed_sizes {
|
||||
row.add_cell(Cell::new(human_bytes(data_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(index_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(offsets_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(config_size as f64)));
|
||||
}
|
||||
row.add_cell(Cell::new(human_bytes(
|
||||
(data_size + index_size + offsets_size + config_size) as f64,
|
||||
)));
|
||||
table.add_row(row);
|
||||
} else {
|
||||
if segment_columns > 0 {
|
||||
assert_eq!(segment_columns, columns);
|
||||
} else {
|
||||
segment_columns = columns;
|
||||
}
|
||||
segment_rows += rows;
|
||||
segment_data_size += data_size;
|
||||
segment_index_size += index_size;
|
||||
segment_offsets_size += offsets_size;
|
||||
segment_config_size += config_size;
|
||||
}
|
||||
|
||||
total_data_size += data_size;
|
||||
total_index_size += index_size;
|
||||
total_offsets_size += offsets_size;
|
||||
total_config_size += config_size;
|
||||
}
|
||||
|
||||
if !self.detailed_segments {
|
||||
let first_ranges = ranges.first().expect("not empty list of ranges");
|
||||
let last_ranges = ranges.last().expect("not empty list of ranges");
|
||||
|
||||
let block_range =
|
||||
SegmentRangeInclusive::new(first_ranges.0.start(), last_ranges.0.end());
|
||||
let tx_range = first_ranges
|
||||
.1
|
||||
.zip(last_ranges.1)
|
||||
.map(|(first, last)| SegmentRangeInclusive::new(first.start(), last.end()));
|
||||
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new(segment))
|
||||
.add_cell(Cell::new(format!("{block_range}")))
|
||||
.add_cell(Cell::new(
|
||||
tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")),
|
||||
))
|
||||
.add_cell(Cell::new(format!("{segment_columns} x {segment_rows}")));
|
||||
if self.detailed_sizes {
|
||||
row.add_cell(Cell::new(human_bytes(segment_data_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(segment_index_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(segment_offsets_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(segment_config_size as f64)));
|
||||
}
|
||||
row.add_cell(Cell::new(human_bytes(
|
||||
(segment_data_size +
|
||||
segment_index_size +
|
||||
segment_offsets_size +
|
||||
segment_config_size) as f64,
|
||||
)));
|
||||
table.add_row(row);
|
||||
}
|
||||
}
|
||||
|
||||
let max_widths = table.column_max_content_widths();
|
||||
let mut separator = Row::new();
|
||||
for width in max_widths {
|
||||
separator.add_cell(Cell::new("-".repeat(width as usize)));
|
||||
}
|
||||
table.add_row(separator);
|
||||
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new("Total"))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""))
|
||||
.add_cell(Cell::new(""));
|
||||
if self.detailed_sizes {
|
||||
row.add_cell(Cell::new(human_bytes(total_data_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(total_index_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(total_offsets_size as f64)))
|
||||
.add_cell(Cell::new(human_bytes(total_config_size as f64)));
|
||||
}
|
||||
row.add_cell(Cell::new(human_bytes(
|
||||
(total_data_size + total_index_size + total_offsets_size + total_config_size) as f64,
|
||||
)));
|
||||
table.add_row(row);
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
fn checksum_report(&self, tool: &DbTool<Arc<DatabaseEnv>>) -> eyre::Result<ComfyTable> {
|
||||
let mut table = ComfyTable::new();
|
||||
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
|
||||
table.set_header(vec![Cell::new("Table"), Cell::new("Checksum"), Cell::new("Elapsed")]);
|
||||
|
||||
let db_tables = Tables::ALL;
|
||||
let mut total_elapsed = Duration::default();
|
||||
|
||||
for &db_table in db_tables {
|
||||
let (checksum, elapsed) = ChecksumViewer::new(tool).view_rt(db_table).unwrap();
|
||||
|
||||
// increment duration for final report
|
||||
total_elapsed += elapsed;
|
||||
|
||||
// add rows containing checksums to the table
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new(db_table));
|
||||
row.add_cell(Cell::new(format!("{:x}", checksum)));
|
||||
row.add_cell(Cell::new(format!("{:?}", elapsed)));
|
||||
table.add_row(row);
|
||||
}
|
||||
|
||||
// add a separator for the final report
|
||||
let max_widths = table.column_max_content_widths();
|
||||
let mut separator = Row::new();
|
||||
for width in max_widths {
|
||||
separator.add_cell(Cell::new("-".repeat(width as usize)));
|
||||
}
|
||||
table.add_row(separator);
|
||||
|
||||
// add the final report
|
||||
let mut row = Row::new();
|
||||
row.add_cell(Cell::new("Total elapsed"));
|
||||
row.add_cell(Cell::new(""));
|
||||
row.add_cell(Cell::new(format!("{:?}", total_elapsed)));
|
||||
table.add_row(row);
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
@ -1,424 +0,0 @@
|
||||
use crossterm::{
|
||||
event::{self, Event, KeyCode, MouseEventKind},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::{
|
||||
backend::{Backend, CrosstermBackend},
|
||||
layout::{Alignment, Constraint, Direction, Layout},
|
||||
style::{Color, Modifier, Style},
|
||||
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
|
||||
Frame, Terminal,
|
||||
};
|
||||
use reth_db::RawValue;
|
||||
use reth_db_api::table::{Table, TableRow};
|
||||
use std::{
|
||||
io,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
/// Available keybindings for the [`DbListTUI`]
|
||||
static CMDS: [(&str, &str); 6] = [
|
||||
("q", "Quit"),
|
||||
("↑", "Entry above"),
|
||||
("↓", "Entry below"),
|
||||
("←", "Previous page"),
|
||||
("→", "Next page"),
|
||||
("G", "Go to a specific page"),
|
||||
];
|
||||
|
||||
/// Modified version of the [`ListState`] struct that exposes the `offset` field.
|
||||
/// Used to make the [`DbListTUI`] keys clickable.
|
||||
struct ExpListState {
|
||||
pub(crate) offset: usize,
|
||||
}
|
||||
|
||||
#[derive(Default, Eq, PartialEq)]
|
||||
pub(crate) enum ViewMode {
|
||||
/// Normal list view mode
|
||||
#[default]
|
||||
Normal,
|
||||
/// Currently wanting to go to a page
|
||||
GoToPage,
|
||||
}
|
||||
|
||||
enum Entries<T: Table> {
|
||||
/// Pairs of [`Table::Key`] and [`RawValue<Table::Value>`]
|
||||
RawValues(Vec<(T::Key, RawValue<T::Value>)>),
|
||||
/// Pairs of [`Table::Key`] and [`Table::Value`]
|
||||
Values(Vec<TableRow<T>>),
|
||||
}
|
||||
|
||||
impl<T: Table> Entries<T> {
|
||||
/// Creates new empty [Entries] as [`Entries::RawValues`] if `raw_values == true` and as
|
||||
/// [`Entries::Values`] if `raw == false`.
|
||||
const fn new_with_raw_values(raw_values: bool) -> Self {
|
||||
if raw_values {
|
||||
Self::RawValues(Vec::new())
|
||||
} else {
|
||||
Self::Values(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the internal entries [Vec], converting the [`Table::Value`] into
|
||||
/// [`RawValue<Table::Value>`] if needed.
|
||||
fn set(&mut self, new_entries: Vec<TableRow<T>>) {
|
||||
match self {
|
||||
Self::RawValues(old_entries) => {
|
||||
*old_entries =
|
||||
new_entries.into_iter().map(|(key, value)| (key, value.into())).collect()
|
||||
}
|
||||
Self::Values(old_entries) => *old_entries = new_entries,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the length of internal [Vec].
|
||||
fn len(&self) -> usize {
|
||||
match self {
|
||||
Self::RawValues(entries) => entries.len(),
|
||||
Self::Values(entries) => entries.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over keys of the internal [Vec]. For both [`Entries::RawValues`] and
|
||||
/// [`Entries::Values`], this iterator will yield [`Table::Key`].
|
||||
const fn iter_keys(&self) -> EntriesKeyIter<'_, T> {
|
||||
EntriesKeyIter { entries: self, index: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
struct EntriesKeyIter<'a, T: Table> {
|
||||
entries: &'a Entries<T>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a, T: Table> Iterator for EntriesKeyIter<'a, T> {
|
||||
type Item = &'a T::Key;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let item = match self.entries {
|
||||
Entries::RawValues(values) => values.get(self.index).map(|(key, _)| key),
|
||||
Entries::Values(values) => values.get(self.index).map(|(key, _)| key),
|
||||
};
|
||||
self.index += 1;
|
||||
|
||||
item
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct DbListTUI<F, T: Table>
|
||||
where
|
||||
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
|
||||
{
|
||||
/// Fetcher for the next page of items.
|
||||
///
|
||||
/// The fetcher is passed the index of the first item to fetch, and the number of items to
|
||||
/// fetch from that item.
|
||||
fetch: F,
|
||||
/// Skip N indices of the key list in the DB.
|
||||
skip: usize,
|
||||
/// The amount of entries to show per page
|
||||
count: usize,
|
||||
/// The total number of entries in the database
|
||||
total_entries: usize,
|
||||
/// The current view mode
|
||||
mode: ViewMode,
|
||||
/// The current state of the input buffer
|
||||
input: String,
|
||||
/// The state of the key list.
|
||||
list_state: ListState,
|
||||
/// Entries to show in the TUI.
|
||||
entries: Entries<T>,
|
||||
}
|
||||
|
||||
impl<F, T: Table> DbListTUI<F, T>
|
||||
where
|
||||
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
|
||||
{
|
||||
/// Create a new database list TUI
|
||||
pub(crate) fn new(
|
||||
fetch: F,
|
||||
skip: usize,
|
||||
count: usize,
|
||||
total_entries: usize,
|
||||
raw: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
fetch,
|
||||
skip,
|
||||
count,
|
||||
total_entries,
|
||||
mode: ViewMode::Normal,
|
||||
input: String::new(),
|
||||
list_state: ListState::default(),
|
||||
entries: Entries::new_with_raw_values(raw),
|
||||
}
|
||||
}
|
||||
|
||||
/// Move to the next list selection
|
||||
fn next(&mut self) {
|
||||
self.list_state.select(Some(
|
||||
self.list_state
|
||||
.selected()
|
||||
.map(|i| if i >= self.entries.len() - 1 { 0 } else { i + 1 })
|
||||
.unwrap_or(0),
|
||||
));
|
||||
}
|
||||
|
||||
/// Move to the previous list selection
|
||||
fn previous(&mut self) {
|
||||
self.list_state.select(Some(
|
||||
self.list_state
|
||||
.selected()
|
||||
.map(|i| if i == 0 { self.entries.len() - 1 } else { i - 1 })
|
||||
.unwrap_or(0),
|
||||
));
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.list_state.select(Some(0));
|
||||
}
|
||||
|
||||
/// Fetch the next page of items
|
||||
fn next_page(&mut self) {
|
||||
if self.skip + self.count < self.total_entries {
|
||||
self.skip += self.count;
|
||||
self.fetch_page();
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the previous page of items
|
||||
fn previous_page(&mut self) {
|
||||
if self.skip > 0 {
|
||||
self.skip = self.skip.saturating_sub(self.count);
|
||||
self.fetch_page();
|
||||
}
|
||||
}
|
||||
|
||||
/// Go to a specific page.
|
||||
fn go_to_page(&mut self, page: usize) {
|
||||
self.skip = (self.count * page).min(self.total_entries - self.count);
|
||||
self.fetch_page();
|
||||
}
|
||||
|
||||
/// Fetch the current page
|
||||
fn fetch_page(&mut self) {
|
||||
self.entries.set((self.fetch)(self.skip, self.count));
|
||||
self.reset();
|
||||
}
|
||||
|
||||
/// Show the [`DbListTUI`] in the terminal.
|
||||
pub(crate) fn run(mut self) -> eyre::Result<()> {
|
||||
// Setup backend
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen)?;
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
// Load initial page
|
||||
self.fetch_page();
|
||||
|
||||
// Run event loop
|
||||
let tick_rate = Duration::from_millis(250);
|
||||
let res = event_loop(&mut terminal, &mut self, tick_rate);
|
||||
|
||||
// Restore terminal
|
||||
disable_raw_mode()?;
|
||||
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
|
||||
terminal.show_cursor()?;
|
||||
|
||||
// Handle errors
|
||||
if let Err(err) = res {
|
||||
error!("{:?}", err)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the event loop
|
||||
fn event_loop<B: Backend, F, T: Table>(
|
||||
terminal: &mut Terminal<B>,
|
||||
app: &mut DbListTUI<F, T>,
|
||||
tick_rate: Duration,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
|
||||
{
|
||||
let mut last_tick = Instant::now();
|
||||
let mut running = true;
|
||||
while running {
|
||||
// Render
|
||||
terminal.draw(|f| ui(f, app))?;
|
||||
|
||||
// Calculate timeout
|
||||
let timeout =
|
||||
tick_rate.checked_sub(last_tick.elapsed()).unwrap_or_else(|| Duration::from_secs(0));
|
||||
|
||||
// Poll events
|
||||
if crossterm::event::poll(timeout)? {
|
||||
running = !handle_event(app, event::read()?)?;
|
||||
}
|
||||
|
||||
if last_tick.elapsed() >= tick_rate {
|
||||
last_tick = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle incoming events
|
||||
fn handle_event<F, T: Table>(app: &mut DbListTUI<F, T>, event: Event) -> io::Result<bool>
|
||||
where
|
||||
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
|
||||
{
|
||||
if app.mode == ViewMode::GoToPage {
|
||||
if let Event::Key(key) = event {
|
||||
match key.code {
|
||||
KeyCode::Enter => {
|
||||
let input = std::mem::take(&mut app.input);
|
||||
if let Ok(page) = input.parse() {
|
||||
app.go_to_page(page);
|
||||
}
|
||||
app.mode = ViewMode::Normal;
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
app.input.push(c);
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
app.input.pop();
|
||||
}
|
||||
KeyCode::Esc => app.mode = ViewMode::Normal,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(false)
|
||||
}
|
||||
|
||||
match event {
|
||||
Event::Key(key) => {
|
||||
if key.kind == event::KeyEventKind::Press {
|
||||
match key.code {
|
||||
KeyCode::Char('q') | KeyCode::Char('Q') => return Ok(true),
|
||||
KeyCode::Down => app.next(),
|
||||
KeyCode::Up => app.previous(),
|
||||
KeyCode::Right => app.next_page(),
|
||||
KeyCode::Left => app.previous_page(),
|
||||
KeyCode::Char('G') => {
|
||||
app.mode = ViewMode::GoToPage;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
Event::Mouse(e) => match e.kind {
|
||||
MouseEventKind::ScrollDown => app.next(),
|
||||
MouseEventKind::ScrollUp => app.previous(),
|
||||
// TODO: This click event can be triggered outside of the list widget.
|
||||
MouseEventKind::Down(_) => {
|
||||
// SAFETY: The pointer to the app's state will always be valid for
|
||||
// reads here, and the source is larger than the destination.
|
||||
//
|
||||
// This is technically unsafe, but because the alignment requirements
|
||||
// in both the source and destination are the same and we can ensure
|
||||
// that the pointer to `app.state` is valid for reads, this is safe.
|
||||
let state: ExpListState = unsafe { std::mem::transmute_copy(&app.list_state) };
|
||||
let new_idx = (e.row as usize + state.offset).saturating_sub(1);
|
||||
if new_idx < app.entries.len() {
|
||||
app.list_state.select(Some(new_idx));
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Render the UI
|
||||
fn ui<F, T: Table>(f: &mut Frame<'_>, app: &mut DbListTUI<F, T>)
|
||||
where
|
||||
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
|
||||
{
|
||||
let outer_chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Percentage(95), Constraint::Percentage(5)].as_ref())
|
||||
.split(f.size());
|
||||
|
||||
// Columns
|
||||
{
|
||||
let inner_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
|
||||
.split(outer_chunks[0]);
|
||||
|
||||
let key_length = format!("{}", (app.skip + app.count).saturating_sub(1)).len();
|
||||
|
||||
let formatted_keys = app
|
||||
.entries
|
||||
.iter_keys()
|
||||
.enumerate()
|
||||
.map(|(i, k)| {
|
||||
ListItem::new(format!("[{:0>width$}]: {k:?}", i + app.skip, width = key_length))
|
||||
})
|
||||
.collect::<Vec<ListItem<'_>>>();
|
||||
|
||||
let key_list = List::new(formatted_keys)
|
||||
.block(Block::default().borders(Borders::ALL).title(format!(
|
||||
"Keys (Showing entries {}-{} out of {} entries)",
|
||||
app.skip,
|
||||
(app.skip + app.entries.len()).saturating_sub(1),
|
||||
app.total_entries
|
||||
)))
|
||||
.style(Style::default().fg(Color::White))
|
||||
.highlight_style(Style::default().fg(Color::Cyan).add_modifier(Modifier::ITALIC))
|
||||
.highlight_symbol("➜ ");
|
||||
f.render_stateful_widget(key_list, inner_chunks[0], &mut app.list_state);
|
||||
|
||||
let value_display = Paragraph::new(
|
||||
app.list_state
|
||||
.selected()
|
||||
.and_then(|selected| {
|
||||
let maybe_serialized = match &app.entries {
|
||||
Entries::RawValues(entries) => {
|
||||
entries.get(selected).map(|(_, v)| serde_json::to_string(v.raw_value()))
|
||||
}
|
||||
Entries::Values(entries) => {
|
||||
entries.get(selected).map(|(_, v)| serde_json::to_string_pretty(v))
|
||||
}
|
||||
};
|
||||
maybe_serialized.map(|ser| {
|
||||
ser.unwrap_or_else(|error| format!("Error serializing value: {error}"))
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| "No value selected".to_string()),
|
||||
)
|
||||
.block(Block::default().borders(Borders::ALL).title("Value (JSON)"))
|
||||
.wrap(Wrap { trim: false })
|
||||
.alignment(Alignment::Left);
|
||||
f.render_widget(value_display, inner_chunks[1]);
|
||||
}
|
||||
|
||||
// Footer
|
||||
let footer = match app.mode {
|
||||
ViewMode::Normal => Paragraph::new(
|
||||
CMDS.iter().map(|(k, v)| format!("[{k}] {v}")).collect::<Vec<_>>().join(" | "),
|
||||
),
|
||||
ViewMode::GoToPage => Paragraph::new(format!(
|
||||
"Go to page (max {}): {}",
|
||||
app.total_entries / app.count,
|
||||
app.input
|
||||
)),
|
||||
}
|
||||
.block(Block::default().borders(Borders::ALL))
|
||||
.alignment(match app.mode {
|
||||
ViewMode::Normal => Alignment::Center,
|
||||
ViewMode::GoToPage => Alignment::Left,
|
||||
})
|
||||
.style(Style::default().fg(Color::Cyan).add_modifier(Modifier::BOLD));
|
||||
f.render_widget(footer, outer_chunks[1]);
|
||||
}
|
||||
@ -1,9 +1,5 @@
|
||||
//! Command for debugging block building.
|
||||
|
||||
use crate::{
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
};
|
||||
use crate::macros::block_executor;
|
||||
use alloy_rlp::Decodable;
|
||||
use clap::Parser;
|
||||
use eyre::Context;
|
||||
@ -14,6 +10,7 @@ use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_blockchain_tree::{
|
||||
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
|
||||
};
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_consensus::Consensus;
|
||||
use reth_db::DatabaseEnv;
|
||||
|
||||
@ -2,13 +2,13 @@
|
||||
|
||||
use crate::{
|
||||
args::{get_secret_key, NetworkArgs},
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
utils::get_single_header,
|
||||
};
|
||||
use clap::Parser;
|
||||
use futures::{stream::select as stream_select, StreamExt};
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_config::Config;
|
||||
use reth_consensus::Consensus;
|
||||
|
||||
@ -2,12 +2,12 @@
|
||||
|
||||
use crate::{
|
||||
args::{get_secret_key, NetworkArgs},
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
utils::{get_single_body, get_single_header},
|
||||
};
|
||||
use backon::{ConstantBuilder, Retryable};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_config::Config;
|
||||
use reth_db::DatabaseEnv;
|
||||
|
||||
@ -1,14 +1,13 @@
|
||||
//! Command for debugging merkle trie calculation.
|
||||
|
||||
use crate::{
|
||||
args::{get_secret_key, NetworkArgs},
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
utils::get_single_header,
|
||||
};
|
||||
use backon::{ConstantBuilder, Retryable};
|
||||
use clap::Parser;
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_config::Config;
|
||||
use reth_consensus::Consensus;
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
use crate::{
|
||||
args::{get_secret_key, NetworkArgs},
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
};
|
||||
use clap::Parser;
|
||||
@ -10,6 +9,7 @@ use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeacon
|
||||
use reth_blockchain_tree::{
|
||||
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
|
||||
};
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_config::Config;
|
||||
use reth_consensus::Consensus;
|
||||
|
||||
@ -1,13 +1,9 @@
|
||||
//! Command that initializes the node by importing a chain from a file.
|
||||
|
||||
use crate::{
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
version::SHORT_VERSION,
|
||||
};
|
||||
use crate::{macros::block_executor, version::SHORT_VERSION};
|
||||
use clap::Parser;
|
||||
use futures::{Stream, StreamExt};
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_config::Config;
|
||||
use reth_consensus::Consensus;
|
||||
use reth_db::tables;
|
||||
|
||||
@ -1,14 +1,8 @@
|
||||
//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a
|
||||
//! file.
|
||||
|
||||
use crate::{
|
||||
commands::{
|
||||
common::{AccessRights, Environment, EnvironmentArgs},
|
||||
import::build_import_pipeline,
|
||||
},
|
||||
version::SHORT_VERSION,
|
||||
};
|
||||
use crate::{commands::import::build_import_pipeline, version::SHORT_VERSION};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_consensus::noop::NoopConsensus;
|
||||
use reth_db::tables;
|
||||
use reth_db_api::transaction::DbTx;
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
//! Command that imports OP mainnet receipts from Bedrock datadir, exported via
|
||||
//! <https://github.com/testinprod-io/op-geth/pull/1>.
|
||||
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_db::tables;
|
||||
use reth_db_api::{database::Database, transaction::DbTx};
|
||||
use reth_downloaders::{
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_provider::BlockHashReader;
|
||||
use tracing::info;
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_config::config::EtlConfig;
|
||||
use reth_db_api::database::Database;
|
||||
use reth_db_common::init::init_from_state_dump;
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
//! This contains all of the `reth` commands
|
||||
|
||||
pub mod config_cmd;
|
||||
pub mod db;
|
||||
pub mod debug_cmd;
|
||||
pub mod dump_genesis;
|
||||
pub mod import;
|
||||
@ -15,5 +14,3 @@ pub mod prune;
|
||||
pub mod recover;
|
||||
pub mod stage;
|
||||
pub mod test_vectors;
|
||||
|
||||
pub mod common;
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
//! Command that runs pruning without any limits.
|
||||
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_prune::PrunerBuilder;
|
||||
use reth_static_file::StaticFileProducer;
|
||||
use tracing::info;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
use crate::commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_db::tables;
|
||||
use reth_db_api::{
|
||||
|
||||
@ -1,11 +1,8 @@
|
||||
//! Database debugging tool
|
||||
|
||||
use crate::{
|
||||
args::StageEnum,
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
};
|
||||
use crate::args::StageEnum;
|
||||
use clap::Parser;
|
||||
use itertools::Itertools;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_db::{static_file::iter_static_files, tables, DatabaseEnv};
|
||||
use reth_db_api::transaction::DbTxMut;
|
||||
use reth_db_common::{
|
||||
|
||||
@ -1,11 +1,7 @@
|
||||
//! Database debugging tool
|
||||
|
||||
use crate::{
|
||||
args::DatadirArgs,
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
dirs::DataDirPath,
|
||||
};
|
||||
use crate::{args::DatadirArgs, dirs::DataDirPath};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments, tables, DatabaseEnv};
|
||||
use reth_db_api::{
|
||||
cursor::DbCursorRO, database::Database, models::ClientVersion, table::TableImporter,
|
||||
|
||||
@ -1,15 +1,14 @@
|
||||
//! Main `stage` command
|
||||
//!
|
||||
//! Stage debugging tool
|
||||
|
||||
use crate::{
|
||||
args::{get_secret_key, NetworkArgs, StageEnum},
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
prometheus_exporter,
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig};
|
||||
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
//! Unwinding a certain block range
|
||||
|
||||
use crate::macros::block_executor;
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_beacon_consensus::EthBeaconConsensus;
|
||||
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
|
||||
use reth_config::Config;
|
||||
use reth_consensus::Consensus;
|
||||
use reth_db_api::database::Database;
|
||||
@ -24,11 +26,6 @@ use std::{ops::RangeInclusive, sync::Arc};
|
||||
use tokio::sync::watch;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
commands::common::{AccessRights, Environment, EnvironmentArgs},
|
||||
macros::block_executor,
|
||||
};
|
||||
|
||||
/// `reth stage unwind` command
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
|
||||
Reference in New Issue
Block a user