feat(bin, storage): configurable MDBX log level (#3524)

Co-authored-by: Georgios Konstantopoulos <me@gakonst.com>
This commit is contained in:
Alexey Shekhirin
2023-07-03 13:45:32 +01:00
committed by GitHub
parent 4d3ce34901
commit 770652a787
28 changed files with 215 additions and 86 deletions

1
Cargo.lock generated
View File

@ -5321,6 +5321,7 @@ dependencies = [
"arbitrary",
"async-trait",
"auto_impl",
"clap 4.1.8",
"futures",
"hex-literal 0.3.4",
"modular-bitfield",

View File

@ -18,7 +18,7 @@ reth-revm = { path = "../../crates/revm" }
reth-revm-inspectors = { path = "../../crates/revm/revm-inspectors" }
reth-staged-sync = { path = "../../crates/staged-sync" }
reth-stages = { path = "../../crates/stages" }
reth-interfaces = { workspace = true, features = ["test-utils"] }
reth-interfaces = { workspace = true, features = ["test-utils", "clap"] }
reth-transaction-pool = { workspace = true }
reth-beacon-consensus = { path = "../../crates/consensus/beacon" }
reth-auto-seal-consensus = { path = "../../crates/consensus/auto-seal" }

View File

@ -0,0 +1,13 @@
//! clap [Args](clap::Args) for database configuration
use clap::Args;
use reth_interfaces::db::LogLevel;
/// Parameters for database configuration
#[derive(Debug, Args, PartialEq, Default, Clone, Copy)]
#[command(next_help_heading = "Database")]
pub struct DatabaseArgs {
/// Database logging level. Levels higher than "notice" require a debug build.
#[arg(long = "db.log-level", value_enum)]
pub log_level: Option<LogLevel>,
}

View File

@ -5,7 +5,7 @@ use reth_primitives::{TxHash, H256};
/// Parameters for debugging purposes
#[derive(Debug, Args, PartialEq, Default)]
#[command(next_help_heading = "Rpc")]
#[command(next_help_heading = "Debug")]
pub struct DebugArgs {
/// Prompt the downloader to download blocks one at a time.
///

View File

@ -2,7 +2,7 @@ use clap::Args;
/// Parameters to configure Gas Price Oracle
#[derive(Debug, Args, PartialEq, Eq, Default)]
#[command(next_help_heading = "GAS PRICE ORACLE")]
#[command(next_help_heading = "Gas Price Oracle")]
pub struct GasPriceOracleArgs {
/// Number of recent blocks to check for gas price
#[arg(long = "gpo.blocks", default_value = "20")]

View File

@ -12,6 +12,10 @@ pub use rpc_server_args::RpcServerArgs;
mod debug_args;
pub use debug_args::DebugArgs;
/// DatabaseArgs struct for configuring the database
mod database_args;
pub use database_args::DatabaseArgs;
mod secret_key;
pub use secret_key::{get_secret_key, SecretKeyError};

View File

@ -9,7 +9,7 @@ use futures::{Stream, StreamExt};
use reth_beacon_consensus::BeaconConsensus;
use reth_provider::{ProviderFactory, StageCheckpointReader};
use crate::args::utils::genesis_value_parser;
use crate::args::{utils::genesis_value_parser, DatabaseArgs};
use reth_config::Config;
use reth_db::{database::Database, init_db};
use reth_downloaders::{
@ -64,6 +64,9 @@ pub struct ImportCommand {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
/// The path to a block file for import.
///
/// The online stages (headers and bodies) are replaced by a file import, after which the
@ -87,7 +90,7 @@ impl ImportCommand {
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path)?);
let db = Arc::new(init_db(db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");

View File

@ -1,5 +1,5 @@
use crate::{
args::utils::genesis_value_parser,
args::{utils::genesis_value_parser, DatabaseArgs},
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::Parser;
@ -38,6 +38,9 @@ pub struct InitCommand {
value_parser = genesis_value_parser
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
}
impl InitCommand {
@ -49,7 +52,7 @@ impl InitCommand {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(&db_path)?);
let db = Arc::new(init_db(&db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
info!(target: "reth::cli", "Writing genesis block");

View File

@ -1,6 +1,6 @@
//! Database debugging tool
use crate::{
args::utils::genesis_value_parser,
args::{utils::genesis_value_parser, DatabaseArgs},
dirs::{DataDirPath, MaybePlatformPath},
utils::DbTool,
};
@ -53,6 +53,9 @@ pub struct Command {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
#[clap(subcommand)]
command: Subcommands,
}
@ -84,7 +87,7 @@ impl Command {
match self.command {
// TODO: We'll need to add this on the DB trait.
Subcommands::Stats { .. } => {
let db = open_db_read_only(&db_path)?;
let db = open_db_read_only(&db_path, self.db.log_level)?;
let tool = DbTool::new(&db, self.chain.clone())?;
let mut stats_table = ComfyTable::new();
stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
@ -135,17 +138,17 @@ impl Command {
println!("{stats_table}");
}
Subcommands::List(command) => {
let db = open_db_read_only(&db_path)?;
let db = open_db_read_only(&db_path, self.db.log_level)?;
let tool = DbTool::new(&db, self.chain.clone())?;
command.execute(&tool)?;
}
Subcommands::Get(command) => {
let db = open_db_read_only(&db_path)?;
let db = open_db_read_only(&db_path, self.db.log_level)?;
let tool = DbTool::new(&db, self.chain.clone())?;
command.execute(&tool)?;
}
Subcommands::Drop => {
let db = open_db(&db_path)?;
let db = open_db(&db_path, self.db.log_level)?;
let mut tool = DbTool::new(&db, self.chain.clone())?;
tool.drop(db_path)?;
}

View File

@ -1,6 +1,6 @@
//! Command for debugging execution.
use crate::{
args::{get_secret_key, utils::genesis_value_parser, NetworkArgs},
args::{get_secret_key, utils::genesis_value_parser, DatabaseArgs, NetworkArgs},
dirs::{DataDirPath, MaybePlatformPath},
node::events,
runner::CliContext,
@ -75,6 +75,9 @@ pub struct Command {
#[clap(flatten)]
network: NetworkArgs,
#[clap(flatten)]
db: DatabaseArgs,
/// Set the chain tip manually for testing purposes.
///
/// NOTE: This is a temporary flag
@ -201,7 +204,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path)?);
let db = Arc::new(init_db(db_path, self.db.log_level)?);
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(db.clone(), self.chain.clone())?;

View File

@ -1,6 +1,6 @@
//! Command for debugging merkle trie calculation.
use crate::{
args::utils::genesis_value_parser,
args::{utils::genesis_value_parser, DatabaseArgs},
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::Parser;
@ -50,6 +50,9 @@ pub struct Command {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
/// The height to finish at
#[arg(long)]
to: u64,
@ -67,7 +70,7 @@ impl Command {
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path)?);
let db = Arc::new(init_db(db_path, self.db.log_level)?);
let factory = ProviderFactory::new(&db, self.chain.clone());
let provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;

View File

@ -69,7 +69,7 @@ use tracing::*;
use crate::{
args::{
utils::{genesis_value_parser, parse_socket_address},
PayloadBuilderArgs,
DatabaseArgs, PayloadBuilderArgs,
},
dirs::MaybePlatformPath,
node::cl_events::ConsensusLayerHealthEvents,
@ -138,6 +138,9 @@ pub struct Command {
#[clap(flatten)]
debug: DebugArgs,
#[clap(flatten)]
db: DatabaseArgs,
/// Automatically mine blocks for new transactions
#[arg(long)]
auto_mine: bool,
@ -163,7 +166,7 @@ impl Command {
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(&db_path)?);
let db = Arc::new(init_db(&db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
self.start_metrics_endpoint(Arc::clone(&db)).await?;

View File

@ -3,7 +3,7 @@ use crate::{
args::{
get_secret_key,
utils::{chain_spec_value_parser, hash_or_num_value_parser},
DiscoveryArgs,
DatabaseArgs, DiscoveryArgs,
},
dirs::{DataDirPath, MaybePlatformPath},
utils::get_single_header,
@ -74,11 +74,14 @@ pub struct Command {
#[arg(long, default_value = "5")]
retries: usize,
#[clap(subcommand)]
command: Subcommands,
#[arg(long, default_value = "any")]
nat: NatResolver,
#[clap(flatten)]
db: DatabaseArgs,
#[clap(subcommand)]
command: Subcommands,
}
#[derive(Subcommand, Debug)]
@ -101,7 +104,7 @@ impl Command {
/// Execute `p2p` command
pub async fn execute(&self) -> eyre::Result<()> {
let tempdir = tempfile::TempDir::new()?;
let noop_db = Arc::new(open_db(&tempdir.into_path())?);
let noop_db = Arc::new(open_db(&tempdir.into_path(), self.db.log_level)?);
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);

View File

@ -1,6 +1,6 @@
//! Database debugging tool
use crate::{
args::{utils::genesis_value_parser, StageEnum},
args::{utils::genesis_value_parser, DatabaseArgs, StageEnum},
dirs::{DataDirPath, MaybePlatformPath},
utils::DbTool,
};
@ -41,6 +41,9 @@ pub struct Command {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
stage: StageEnum,
}
@ -52,7 +55,7 @@ impl Command {
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db = open_db(db_path.as_ref())?;
let db = open_db(db_path.as_ref(), self.db.log_level)?;
let tool = DbTool::new(&db, self.chain.clone())?;

View File

@ -22,7 +22,7 @@ mod execution;
use execution::dump_execution_stage;
mod merkle;
use crate::args::utils::genesis_value_parser;
use crate::args::{utils::genesis_value_parser, DatabaseArgs};
use merkle::dump_merkle_stage;
/// `reth dump-stage` command
@ -55,6 +55,9 @@ pub struct Command {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
#[clap(subcommand)]
command: Stages,
}
@ -98,7 +101,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path)?);
let db = Arc::new(init_db(db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
let mut tool = DbTool::new(&db, self.chain.clone())?;
@ -134,7 +137,7 @@ pub(crate) fn setup<DB: Database>(
info!(target: "reth::cli", ?output_db, "Creating separate db");
let output_db = init_db(output_db)?;
let output_db = init_db(output_db, None)?;
output_db.update(|tx| {
tx.import_table_with_range::<tables::BlockBodyIndices, _>(

View File

@ -2,7 +2,7 @@
//!
//! Stage debugging tool
use crate::{
args::{get_secret_key, utils::chain_spec_value_parser, NetworkArgs, StageEnum},
args::{get_secret_key, utils::chain_spec_value_parser, DatabaseArgs, NetworkArgs, StageEnum},
dirs::{DataDirPath, MaybePlatformPath},
prometheus_exporter,
version::SHORT_VERSION,
@ -92,6 +92,9 @@ pub struct Command {
#[clap(flatten)]
network: NetworkArgs,
#[clap(flatten)]
db: DatabaseArgs,
/// Commits the changes in the database. WARNING: potentially destructive.
///
/// Useful when you want to run diagnostics on the database.
@ -119,7 +122,7 @@ impl Command {
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path)?);
let db = Arc::new(init_db(db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
let factory = ProviderFactory::new(&db, self.chain.clone());

View File

@ -1,7 +1,7 @@
//! Unwinding a certain block range
use crate::{
args::utils::genesis_value_parser,
args::{utils::genesis_value_parser, DatabaseArgs},
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::{Parser, Subcommand};
@ -41,6 +41,9 @@ pub struct Command {
)]
chain: Arc<ChainSpec>,
#[clap(flatten)]
db: DatabaseArgs,
#[clap(subcommand)]
command: Subcommands,
}
@ -55,7 +58,7 @@ impl Command {
eyre::bail!("Database {db_path:?} does not exist.")
}
let db = open_db(db_path.as_ref())?;
let db = open_db(db_path.as_ref(), self.db.log_level)?;
let range = self.command.unwind_range(&db)?;

View File

@ -38,6 +38,7 @@ secp256k1 = { workspace = true, default-features = false, features = [
], optional = true }
modular-bitfield = "0.11.2"
parking_lot = "0.12.1"
clap = { version = "4", features = ["derive"], optional = true }
[dev-dependencies]
reth-db = { path = "../storage/db", features = ["test-utils"] }
@ -53,3 +54,4 @@ secp256k1 = { workspace = true, features = [
[features]
test-utils = ["tokio-stream/sync", "secp256k1", "rand/std_rng"]
cli = ["clap"]

View File

@ -31,4 +31,29 @@ pub enum DatabaseError {
/// Failed to get database stats.
#[error("Database stats error code: {0:?}")]
Stats(i32),
/// Failed to use the specified log level, as it's not available.
#[error("Log level is not available: {0:?}")]
LogLevelUnavailable(LogLevel),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
/// Database log level.
pub enum LogLevel {
/// Enables logging for critical conditions, i.e. assertion failures.
Fatal,
/// Enables logging for error conditions.
Error,
/// Enables logging for warning conditions.
Warn,
/// Enables logging for normal but significant condition.
Notice,
/// Enables logging for verbose informational.
Verbose,
/// Enables logging for debug-level messages.
Debug,
/// Enables logging for trace debug-level messages.
Trace,
/// Enables logging for extra debug-level messages.
Extra,
}

View File

@ -6,6 +6,7 @@ use crate::{
utils::default_page_size,
DatabaseError,
};
use reth_interfaces::db::LogLevel;
use reth_libmdbx::{
DatabaseFlags, Environment, EnvironmentFlags, EnvironmentKind, Geometry, Mode, PageSize,
SyncMode, RO, RW,
@ -61,16 +62,19 @@ impl<E: EnvironmentKind> Env<E> {
/// Opens the database at the specified path with the given `EnvKind`.
///
/// It does not create the tables, for that call [`Env::create_tables`].
pub fn open(path: &Path, kind: EnvKind) -> Result<Env<E>, DatabaseError> {
pub fn open(
path: &Path,
kind: EnvKind,
log_level: Option<LogLevel>,
) -> Result<Env<E>, DatabaseError> {
let mode = match kind {
EnvKind::RO => Mode::ReadOnly,
EnvKind::RW => Mode::ReadWrite { sync_mode: SyncMode::Durable },
};
let env = Env {
inner: Environment::new()
.set_max_dbs(Tables::ALL.len())
.set_geometry(Geometry {
let mut inner_env = Environment::new();
inner_env.set_max_dbs(Tables::ALL.len());
inner_env.set_geometry(Geometry {
// Maximum database size of 4 terabytes
size: Some(0..(4 * TERABYTE)),
// We grow the database in increments of 4 gigabytes
@ -78,20 +82,46 @@ impl<E: EnvironmentKind> Env<E> {
// The database never shrinks
shrink_threshold: None,
page_size: Some(PageSize::Set(default_page_size())),
})
.set_flags(EnvironmentFlags {
});
inner_env.set_flags(EnvironmentFlags {
mode,
// We disable readahead because it improves performance for linear scans, but
// worsens it for random access (which is our access pattern outside of sync)
no_rdahead: true,
coalesce: true,
..Default::default()
})
});
// configure more readers
.set_max_readers(DEFAULT_MAX_READERS)
.open(path)
.map_err(|e| DatabaseError::FailedToOpen(e.into()))?,
inner_env.set_max_readers(DEFAULT_MAX_READERS);
if let Some(log_level) = log_level {
// Levels higher than [LogLevel::Notice] require libmdbx built with `MDBX_DEBUG` option.
let is_log_level_available = if cfg!(debug_assertions) {
true
} else {
matches!(
log_level,
LogLevel::Fatal | LogLevel::Error | LogLevel::Warn | LogLevel::Notice
)
};
if is_log_level_available {
inner_env.set_log_level(match log_level {
LogLevel::Fatal => 0,
LogLevel::Error => 1,
LogLevel::Warn => 2,
LogLevel::Notice => 3,
LogLevel::Verbose => 4,
LogLevel::Debug => 5,
LogLevel::Trace => 6,
LogLevel::Extra => 7,
});
} else {
return Err(DatabaseError::LogLevelUnavailable(log_level))
}
}
let env =
Env { inner: inner_env.open(path).map_err(|e| DatabaseError::FailedToOpen(e.into()))? };
Ok(env)
}
@ -117,7 +147,7 @@ impl<E: EnvironmentKind> Env<E> {
}
impl<E: EnvironmentKind> Deref for Env<E> {
type Target = reth_libmdbx::Environment<E>;
type Target = Environment<E>;
fn deref(&self) -> &Self::Target {
&self.inner
@ -151,7 +181,7 @@ mod tests {
/// Create database for testing with specified path
fn create_test_db_with_path<E: EnvironmentKind>(kind: EnvKind, path: &Path) -> Env<E> {
let env = Env::<E>::open(path, kind).expect(ERROR_DB_CREATION);
let env = Env::<E>::open(path, kind, None).expect(ERROR_DB_CREATION);
env.create_tables().expect(ERROR_TABLE_CREATION);
env
}
@ -746,7 +776,7 @@ mod tests {
assert!(result.expect(ERROR_RETURN_VALUE) == 200);
}
let env = Env::<WriteMap>::open(&path, EnvKind::RO).expect(ERROR_DB_CREATION);
let env = Env::<WriteMap>::open(&path, EnvKind::RO, None).expect(ERROR_DB_CREATION);
// GET
let result =

View File

@ -8,7 +8,7 @@ use crate::{
DatabaseError,
};
use parking_lot::RwLock;
use reth_libmdbx::{EnvironmentKind, Transaction, TransactionKind, WriteFlags, DBI, RW};
use reth_libmdbx::{ffi::DBI, EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW};
use reth_metrics::metrics::{self, histogram};
use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant};

View File

@ -100,11 +100,12 @@ pub type DatabaseEnv = Env<WriteMap>;
pub type DatabaseEnvRO = Env<NoWriteMap>;
use eyre::WrapErr;
use reth_interfaces::db::LogLevel;
use std::path::Path;
/// Opens up an existing database or creates a new one at the specified path. Creates tables if
/// necessary. Read/Write mode.
pub fn init_db<P: AsRef<Path>>(path: P) -> eyre::Result<DatabaseEnv> {
pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> {
use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError};
let rpath = path.as_ref();
@ -121,7 +122,7 @@ pub fn init_db<P: AsRef<Path>>(path: P) -> eyre::Result<DatabaseEnv> {
}
#[cfg(feature = "mdbx")]
{
let db = DatabaseEnv::open(rpath, EnvKind::RW)?;
let db = DatabaseEnv::open(rpath, EnvKind::RW, log_level)?;
db.create_tables()?;
Ok(db)
}
@ -132,10 +133,10 @@ pub fn init_db<P: AsRef<Path>>(path: P) -> eyre::Result<DatabaseEnv> {
}
/// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing.
pub fn open_db_read_only(path: &Path) -> eyre::Result<DatabaseEnvRO> {
pub fn open_db_read_only(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnvRO> {
#[cfg(feature = "mdbx")]
{
Env::<NoWriteMap>::open(path, mdbx::EnvKind::RO)
Env::<NoWriteMap>::open(path, EnvKind::RO, log_level)
.with_context(|| format!("Could not open database at path: {}", path.display()))
}
#[cfg(not(feature = "mdbx"))]
@ -146,10 +147,10 @@ pub fn open_db_read_only(path: &Path) -> eyre::Result<DatabaseEnvRO> {
/// Opens up an existing database. Read/Write mode. It doesn't create it or create tables if
/// missing.
pub fn open_db(path: &Path) -> eyre::Result<DatabaseEnv> {
pub fn open_db(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> {
#[cfg(feature = "mdbx")]
{
Env::<WriteMap>::open(path, mdbx::EnvKind::RW)
Env::<WriteMap>::open(path, EnvKind::RW, log_level)
.with_context(|| format!("Could not open database at path: {}", path.display()))
}
#[cfg(not(feature = "mdbx"))]
@ -176,23 +177,23 @@ pub mod test_utils {
/// Create read/write database for testing
pub fn create_test_rw_db() -> Arc<DatabaseEnv> {
Arc::new(
init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path())
init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), None)
.expect(ERROR_DB_CREATION),
)
}
/// Create read/write database for testing
pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<DatabaseEnv> {
Arc::new(init_db(path.as_ref()).expect(ERROR_DB_CREATION))
Arc::new(init_db(path.as_ref(), None).expect(ERROR_DB_CREATION))
}
/// Create read only database for testing
pub fn create_test_ro_db() -> Arc<DatabaseEnvRO> {
let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path();
{
init_db(path.as_path()).expect(ERROR_DB_CREATION);
init_db(path.as_path(), None).expect(ERROR_DB_CREATION);
}
Arc::new(open_db_read_only(path.as_path()).expect(ERROR_DB_OPEN))
Arc::new(open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN))
}
}
@ -211,13 +212,13 @@ mod tests {
// Database is empty
{
let db = init_db(&path);
let db = init_db(&path, None);
assert_matches!(db, Ok(_));
}
// Database is not empty, current version is the same as in the file
{
let db = init_db(&path);
let db = init_db(&path, None);
assert_matches!(db, Ok(_));
}
@ -225,7 +226,7 @@ mod tests {
{
std::fs::write(path.path().join(db_version_file_path(&path)), "invalid-version")
.unwrap();
let db = init_db(&path);
let db = init_db(&path, None);
assert!(db.is_err());
assert_matches!(
db.unwrap_err().downcast_ref::<DatabaseVersionError>(),
@ -236,7 +237,7 @@ mod tests {
// Database is not empty, version file contains not matching version
{
std::fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap();
let db = init_db(&path);
let db = init_db(&path, None);
assert!(db.is_err());
assert_matches!(
db.unwrap_err().downcast_ref::<DatabaseVersionError>(),

View File

@ -1,7 +1,7 @@
mod utils;
use ::ffi::*;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ffi::*;
use pprof::criterion::{Output, PProfProfiler};
use reth_libmdbx::*;
use std::ptr;

View File

@ -90,6 +90,7 @@ where
spill_max_denominator: None,
spill_min_denominator: None,
geometry: None,
log_level: None,
_marker: PhantomData,
}
}
@ -384,6 +385,7 @@ where
spill_max_denominator: Option<u64>,
spill_min_denominator: Option<u64>,
geometry: Option<Geometry<(Option<usize>, Option<usize>)>>,
log_level: Option<ffi::MDBX_log_level_t>,
_marker: PhantomData<E>,
}
@ -408,7 +410,14 @@ where
) -> Result<Environment<E>> {
let mut env: *mut ffi::MDBX_env = ptr::null_mut();
unsafe {
if let Some(log_level) = self.log_level {
// Returns the previously debug_flags in the 0-15 bits and log_level in the
// 16-31 bits, no need to use `mdbx_result`.
ffi::mdbx_setup_debug(log_level, ffi::MDBX_DBG_DONTCHANGE, None);
}
mdbx_result(ffi::mdbx_env_create(&mut env))?;
if let Err(e) = (|| {
if let Some(geometry) = &self.geometry {
let mut min_size = -1;
@ -618,4 +627,9 @@ where
});
self
}
pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self {
self.log_level = Some(log_level);
self
}
}

View File

@ -19,7 +19,9 @@ pub use crate::{
flags::*,
transaction::{Transaction, TransactionKind, RO, RW},
};
pub use ffi::MDBX_dbi as DBI;
pub mod ffi {
pub use ffi::{MDBX_dbi as DBI, MDBX_log_level_t as LogLevel};
}
mod codec;
mod cursor;

View File

@ -19,6 +19,7 @@ use tracing::trace;
mod provider;
pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW};
use reth_interfaces::db::LogLevel;
/// A common provider that fetches data from a database.
///
@ -61,9 +62,11 @@ impl<DB: Database> ProviderFactory<DB> {
pub fn new_with_database_path<P: AsRef<std::path::Path>>(
path: P,
chain_spec: Arc<ChainSpec>,
log_level: Option<LogLevel>,
) -> Result<ProviderFactory<DatabaseEnv>> {
Ok(ProviderFactory::<DatabaseEnv> {
db: init_db(path).map_err(|e| reth_interfaces::Error::Custom(e.to_string()))?,
db: init_db(path, log_level)
.map_err(|e| reth_interfaces::Error::Custom(e.to_string()))?,
chain_spec,
})
}
@ -402,6 +405,7 @@ mod tests {
let factory = ProviderFactory::<DatabaseEnv>::new_with_database_path(
tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(),
Arc::new(chain_spec),
None,
)
.unwrap();

View File

@ -18,7 +18,7 @@ fn main() -> eyre::Result<()> {
// Opens a RO handle to the database file.
// TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of
// doing in 2 steps.
let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?;
let db = open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?;
// Instantiate a provider factory for Ethereum mainnet using the provided DB.
// TODO: Should the DB version include the spec so that you do not need to specify it here?

View File

@ -30,7 +30,7 @@ use std::{path::Path, sync::Arc};
#[tokio::main]
async fn main() -> eyre::Result<()> {
// 1. Setup the DB
let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?))?);
let db = Arc::new(open_db_read_only(&Path::new(&std::env::var("RETH_DB_PATH")?), None)?);
let spec = Arc::new(ChainSpecBuilder::mainnet().build());
let factory = ProviderFactory::new(db.clone(), spec.clone());