feat(storage, mdbx): transaction manager (#6126)

This commit is contained in:
Alexey Shekhirin
2024-01-23 12:24:56 +00:00
committed by GitHub
parent 9a5120a883
commit a6f8e449f7
36 changed files with 821 additions and 262 deletions

2
Cargo.lock generated
View File

@ -6256,6 +6256,7 @@ dependencies = [
"bitflags 2.4.2", "bitflags 2.4.2",
"byteorder", "byteorder",
"criterion", "criterion",
"dashmap",
"derive_more", "derive_more",
"indexmap 2.1.0", "indexmap 2.1.0",
"libc", "libc",
@ -6267,6 +6268,7 @@ dependencies = [
"reth-mdbx-sys", "reth-mdbx-sys",
"tempfile", "tempfile",
"thiserror", "thiserror",
"tracing",
] ]
[[package]] [[package]]

View File

@ -4,6 +4,7 @@ use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
use alloy_chains::Chain; use alloy_chains::Chain;
use reth_db::{ use reth_db::{
init_db, init_db,
mdbx::DatabaseArguments,
test_utils::{create_test_rw_db, TempDatabase}, test_utils::{create_test_rw_db, TempDatabase},
DatabaseEnv, DatabaseEnv,
}; };
@ -54,7 +55,10 @@ impl DatabaseBuilder {
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database"); tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path.clone(), log_level)?.with_metrics()); let db = Arc::new(
init_db(db_path.clone(), DatabaseArguments::default().log_level(log_level))?
.with_metrics(),
);
Ok(DatabaseInstance::Real { db, data_dir }) Ok(DatabaseInstance::Real { db, data_dir })
} }
} }

View File

@ -15,12 +15,12 @@ use crate::{
dirs::{DataDirPath, PlatformPath}, dirs::{DataDirPath, PlatformPath},
}; };
use reth_db::{ use reth_db::{
cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx, cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db_read_only,
AccountChangeSet, AccountHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, table::Table, transaction::DbTx, AccountChangeSet, AccountHistory, AccountsTrie,
BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, HashedAccount, HashedStorage, BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv,
HeaderNumbers, HeaderTD, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, HashedAccount, HashedStorage, HeaderNumbers, HeaderTD, Headers, PlainAccountState,
Receipts, StorageChangeSet, StorageHistory, StoragesTrie, SyncStage, SyncStageProgress, Tables, PlainStorageState, PruneCheckpoints, Receipts, StorageChangeSet, StorageHistory, StoragesTrie,
TransactionBlock, Transactions, TxHashNumber, TxSenders, SyncStage, SyncStageProgress, Tables, TransactionBlock, Transactions, TxHashNumber, TxSenders,
}; };
use tracing::info; use tracing::info;
@ -61,7 +61,10 @@ impl Command {
pub fn execute(self, tool: &DbTool<'_, DatabaseEnv>) -> eyre::Result<()> { pub fn execute(self, tool: &DbTool<'_, DatabaseEnv>) -> eyre::Result<()> {
// open second db // open second db
let second_db_path: PathBuf = self.secondary_datadir.join("db").into(); let second_db_path: PathBuf = self.secondary_datadir.join("db").into();
let second_db = open_db_read_only(&second_db_path, self.second_db.log_level)?; let second_db = open_db_read_only(
&second_db_path,
DatabaseArguments::default().log_level(self.second_db.log_level),
)?;
let tables = match self.table { let tables = match self.table {
Some(table) => vec![table], Some(table) => vec![table],

View File

@ -14,7 +14,9 @@ use eyre::WrapErr;
use human_bytes::human_bytes; use human_bytes::human_bytes;
use reth_db::{ use reth_db::{
database::Database, database::Database,
mdbx, open_db, open_db_read_only, mdbx,
mdbx::DatabaseArguments,
open_db, open_db_read_only,
version::{get_db_version, DatabaseVersionError, DB_VERSION}, version::{get_db_version, DatabaseVersionError, DB_VERSION},
Tables, Tables,
}; };
@ -102,7 +104,10 @@ impl Command {
match self.command { match self.command {
// TODO: We'll need to add this on the DB trait. // TODO: We'll need to add this on the DB trait.
Subcommands::Stats { .. } => { Subcommands::Stats { .. } => {
let db = open_db_read_only(&db_path, self.db.log_level)?; let db = open_db_read_only(
&db_path,
DatabaseArguments::default().log_level(self.db.log_level),
)?;
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;
let mut stats_table = ComfyTable::new(); let mut stats_table = ComfyTable::new();
stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN); stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
@ -186,17 +191,26 @@ impl Command {
println!("{stats_table}"); println!("{stats_table}");
} }
Subcommands::List(command) => { Subcommands::List(command) => {
let db = open_db_read_only(&db_path, self.db.log_level)?; let db = open_db_read_only(
&db_path,
DatabaseArguments::default().log_level(self.db.log_level),
)?;
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;
command.execute(&tool)?; command.execute(&tool)?;
} }
Subcommands::Diff(command) => { Subcommands::Diff(command) => {
let db = open_db_read_only(&db_path, self.db.log_level)?; let db = open_db_read_only(
&db_path,
DatabaseArguments::default().log_level(self.db.log_level),
)?;
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;
command.execute(&tool)?; command.execute(&tool)?;
} }
Subcommands::Get(command) => { Subcommands::Get(command) => {
let db = open_db_read_only(&db_path, self.db.log_level)?; let db = open_db_read_only(
&db_path,
DatabaseArguments::default().log_level(self.db.log_level),
)?;
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;
command.execute(&tool)?; command.execute(&tool)?;
} }
@ -216,12 +230,14 @@ impl Command {
} }
} }
let db = open_db(&db_path, self.db.log_level)?; let db =
open_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?;
let mut tool = DbTool::new(&db, self.chain.clone())?; let mut tool = DbTool::new(&db, self.chain.clone())?;
tool.drop(db_path)?; tool.drop(db_path)?;
} }
Subcommands::Clear(command) => { Subcommands::Clear(command) => {
let db = open_db(&db_path, self.db.log_level)?; let db =
open_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?;
command.execute(&db)?; command.execute(&db)?;
} }
Subcommands::Snapshot(command) => { Subcommands::Snapshot(command) => {

View File

@ -3,7 +3,7 @@ use super::{
Command, Command,
}; };
use rand::{seq::SliceRandom, Rng}; use rand::{seq::SliceRandom, Rng};
use reth_db::{open_db_read_only, snapshot::HeaderMask}; use reth_db::{mdbx::DatabaseArguments, open_db_read_only, snapshot::HeaderMask};
use reth_interfaces::db::LogLevel; use reth_interfaces::db::LogLevel;
use reth_primitives::{ use reth_primitives::{
snapshot::{Compression, Filters, InclusionFilter, PerfectHashingFunction}, snapshot::{Compression, Filters, InclusionFilter, PerfectHashingFunction},
@ -28,7 +28,9 @@ impl Command {
inclusion_filter: InclusionFilter, inclusion_filter: InclusionFilter,
phf: Option<PerfectHashingFunction>, phf: Option<PerfectHashingFunction>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()); let db_args = DatabaseArguments::default().log_level(log_level);
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
let provider = factory.provider()?; let provider = factory.provider()?;
let tip = provider.last_block_number()?; let tip = provider.last_block_number()?;
let block_range = let block_range =
@ -43,7 +45,7 @@ impl Command {
let mut row_indexes = block_range.clone().collect::<Vec<_>>(); let mut row_indexes = block_range.clone().collect::<Vec<_>>();
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let tx_range = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) let tx_range = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
.provider()? .provider()?
.transaction_range_by_block_range(block_range.clone())?; .transaction_range_by_block_range(block_range.clone())?;
@ -61,7 +63,7 @@ impl Command {
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
bench( bench(
bench_kind, bench_kind,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Headers, SnapshotSegment::Headers,
filters, filters,
compression, compression,
@ -92,7 +94,7 @@ impl Command {
let num = row_indexes[rng.gen_range(0..row_indexes.len())]; let num = row_indexes[rng.gen_range(0..row_indexes.len())];
bench( bench(
BenchKind::RandomOne, BenchKind::RandomOne,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Headers, SnapshotSegment::Headers,
filters, filters,
compression, compression,
@ -113,14 +115,14 @@ impl Command {
{ {
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64; let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
let header_hash = let header_hash =
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
.header_by_number(num)? .header_by_number(num)?
.ok_or(ProviderError::HeaderNotFound(num.into()))? .ok_or(ProviderError::HeaderNotFound(num.into()))?
.hash_slow(); .hash_slow();
bench( bench(
BenchKind::RandomHash, BenchKind::RandomHash,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Headers, SnapshotSegment::Headers,
filters, filters,
compression, compression,

View File

@ -2,7 +2,11 @@ use clap::{builder::RangedU64ValueParser, Parser};
use human_bytes::human_bytes; use human_bytes::human_bytes;
use itertools::Itertools; use itertools::Itertools;
use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::iter::{IntoParallelIterator, ParallelIterator};
use reth_db::{database::Database, open_db_read_only, DatabaseEnv}; use reth_db::{
database::Database,
mdbx::{DatabaseArguments, MaxReadTransactionDuration},
open_db_read_only, DatabaseEnv,
};
use reth_interfaces::db::LogLevel; use reth_interfaces::db::LogLevel;
use reth_nippy_jar::{NippyJar, NippyJarCursor}; use reth_nippy_jar::{NippyJar, NippyJarCursor};
use reth_primitives::{ use reth_primitives::{
@ -89,7 +93,11 @@ impl Command {
); );
{ {
let db = open_db_read_only(db_path, None)?; let db = open_db_read_only(
db_path,
DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)?;
let factory = Arc::new(ProviderFactory::new(db, chain.clone())); let factory = Arc::new(ProviderFactory::new(db, chain.clone()));
if !self.only_bench { if !self.only_bench {

View File

@ -14,6 +14,7 @@ use reth_provider::{
TransactionsProvider, TransactionsProviderExt, TransactionsProvider, TransactionsProviderExt,
}; };
use reth_db::mdbx::DatabaseArguments;
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
@ -29,7 +30,9 @@ impl Command {
inclusion_filter: InclusionFilter, inclusion_filter: InclusionFilter,
phf: Option<PerfectHashingFunction>, phf: Option<PerfectHashingFunction>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()); let db_args = DatabaseArguments::default().log_level(log_level);
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
let provider = factory.provider()?; let provider = factory.provider()?;
let tip = provider.last_block_number()?; let tip = provider.last_block_number()?;
let block_range = let block_range =
@ -43,7 +46,7 @@ impl Command {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let tx_range = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) let tx_range = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
.provider()? .provider()?
.transaction_range_by_block_range(block_range.clone())?; .transaction_range_by_block_range(block_range.clone())?;
@ -64,7 +67,7 @@ impl Command {
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
bench( bench(
bench_kind, bench_kind,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Receipts, SnapshotSegment::Receipts,
filters, filters,
compression, compression,
@ -95,7 +98,7 @@ impl Command {
let num = row_indexes[rng.gen_range(0..row_indexes.len())]; let num = row_indexes[rng.gen_range(0..row_indexes.len())];
bench( bench(
BenchKind::RandomOne, BenchKind::RandomOne,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Receipts, SnapshotSegment::Receipts,
filters, filters,
compression, compression,
@ -115,15 +118,14 @@ impl Command {
// BENCHMARK QUERYING A RANDOM RECEIPT BY HASH // BENCHMARK QUERYING A RANDOM RECEIPT BY HASH
{ {
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64; let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
let tx_hash = let tx_hash = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) .transaction_by_id(num)?
.transaction_by_id(num)? .ok_or(ProviderError::ReceiptNotFound(num.into()))?
.ok_or(ProviderError::ReceiptNotFound(num.into()))? .hash();
.hash();
bench( bench(
BenchKind::RandomHash, BenchKind::RandomHash,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Receipts, SnapshotSegment::Receipts,
filters, filters,
compression, compression,

View File

@ -14,6 +14,7 @@ use reth_provider::{
TransactionsProvider, TransactionsProviderExt, TransactionsProvider, TransactionsProviderExt,
}; };
use reth_db::mdbx::DatabaseArguments;
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
@ -29,7 +30,9 @@ impl Command {
inclusion_filter: InclusionFilter, inclusion_filter: InclusionFilter,
phf: Option<PerfectHashingFunction>, phf: Option<PerfectHashingFunction>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()); let db_args = DatabaseArguments::default().log_level(log_level);
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
let provider = factory.provider()?; let provider = factory.provider()?;
let tip = provider.last_block_number()?; let tip = provider.last_block_number()?;
let block_range = let block_range =
@ -61,7 +64,7 @@ impl Command {
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
bench( bench(
bench_kind, bench_kind,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Transactions, SnapshotSegment::Transactions,
filters, filters,
compression, compression,
@ -93,7 +96,7 @@ impl Command {
let num = row_indexes[rng.gen_range(0..row_indexes.len())]; let num = row_indexes[rng.gen_range(0..row_indexes.len())];
bench( bench(
BenchKind::RandomOne, BenchKind::RandomOne,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Transactions, SnapshotSegment::Transactions,
filters, filters,
compression, compression,
@ -115,14 +118,14 @@ impl Command {
{ {
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64; let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
let transaction_hash = let transaction_hash =
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
.transaction_by_id(num)? .transaction_by_id(num)?
.ok_or(ProviderError::TransactionNotFound(num.into()))? .ok_or(ProviderError::TransactionNotFound(num.into()))?
.hash(); .hash();
bench( bench(
BenchKind::RandomHash, BenchKind::RandomHash,
(open_db_read_only(db_path, log_level)?, chain.clone()), (open_db_read_only(db_path, db_args)?, chain.clone()),
SnapshotSegment::Transactions, SnapshotSegment::Transactions,
filters, filters,
compression, compression,

View File

@ -11,7 +11,7 @@ use reth_beacon_consensus::BeaconConsensus;
use reth_blockchain_tree::{ use reth_blockchain_tree::{
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
}; };
use reth_db::{init_db, DatabaseEnv}; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_interfaces::{consensus::Consensus, RethResult}; use reth_interfaces::{consensus::Consensus, RethResult};
use reth_node_api::PayloadBuilderAttributes; use reth_node_api::PayloadBuilderAttributes;
use reth_payload_builder::database::CachedReads; use reth_payload_builder::database::CachedReads;
@ -150,7 +150,8 @@ impl Command {
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
// initialize the database // initialize the database
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain));
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));

View File

@ -16,7 +16,7 @@ use clap::Parser;
use futures::{stream::select as stream_select, StreamExt}; use futures::{stream::select as stream_select, StreamExt};
use reth_beacon_consensus::BeaconConsensus; use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config; use reth_config::Config;
use reth_db::{database::Database, init_db, DatabaseEnv}; use reth_db::{database::Database, init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_downloaders::{ use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder, bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@ -204,7 +204,8 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone()); let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");

View File

@ -13,7 +13,7 @@ use crate::{
use backon::{ConstantBuilder, Retryable}; use backon::{ConstantBuilder, Retryable};
use clap::Parser; use clap::Parser;
use reth_config::Config; use reth_config::Config;
use reth_db::{init_db, DatabaseEnv}; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_interfaces::executor::BlockValidationError; use reth_interfaces::executor::BlockValidationError;
use reth_network::NetworkHandle; use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo; use reth_network_api::NetworkInfo;
@ -112,7 +112,8 @@ impl Command {
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
// initialize the database // initialize the database
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let factory = ProviderFactory::new(&db, self.chain.clone()); let factory = ProviderFactory::new(&db, self.chain.clone());
let provider = factory.provider()?; let provider = factory.provider()?;

View File

@ -14,7 +14,9 @@ use backon::{ConstantBuilder, Retryable};
use clap::Parser; use clap::Parser;
use reth_beacon_consensus::BeaconConsensus; use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config; use reth_config::Config;
use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv}; use reth_db::{
cursor::DbCursorRO, init_db, mdbx::DatabaseArguments, tables, transaction::DbTx, DatabaseEnv,
};
use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient}; use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient};
use reth_network::NetworkHandle; use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo; use reth_network_api::NetworkInfo;
@ -121,7 +123,8 @@ impl Command {
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
// initialize the database // initialize the database
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let factory = ProviderFactory::new(&db, self.chain.clone()); let factory = ProviderFactory::new(&db, self.chain.clone());
let provider_rw = factory.provider_rw()?; let provider_rw = factory.provider_rw()?;

View File

@ -17,7 +17,7 @@ use reth_blockchain_tree::{
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
}; };
use reth_config::Config; use reth_config::Config;
use reth_db::{init_db, DatabaseEnv}; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_interfaces::consensus::Consensus; use reth_interfaces::consensus::Consensus;
use reth_network::NetworkHandle; use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo; use reth_network_api::NetworkInfo;
@ -133,7 +133,8 @@ impl Command {
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
// Initialize the database // Initialize the database
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone()); let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));

View File

@ -10,7 +10,7 @@ use eyre::Context;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use reth_beacon_consensus::BeaconConsensus; use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config; use reth_config::Config;
use reth_db::{database::Database, init_db}; use reth_db::{database::Database, init_db, mdbx::DatabaseArguments};
use reth_downloaders::{ use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient,
headers::reverse_headers::ReverseHeadersDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@ -89,7 +89,8 @@ impl ImportCommand {
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database"); info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
info!(target: "reth::cli", "Database opened"); info!(target: "reth::cli", "Database opened");
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone()); let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());

View File

@ -1,19 +1,18 @@
//! Command that initializes the node from a genesis file. //! Command that initializes the node from a genesis file.
use crate::init::init_genesis;
use clap::Parser;
use reth_db::init_db;
use reth_primitives::ChainSpec;
use std::sync::Arc;
use tracing::info;
use crate::{ use crate::{
args::{ args::{
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
DatabaseArgs, DatabaseArgs,
}, },
dirs::{DataDirPath, MaybePlatformPath}, dirs::{DataDirPath, MaybePlatformPath},
init::init_genesis,
}; };
use clap::Parser;
use reth_db::{init_db, mdbx::DatabaseArguments};
use reth_primitives::ChainSpec;
use std::sync::Arc;
use tracing::info;
/// Initializes the database with the genesis block. /// Initializes the database with the genesis block.
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
@ -53,7 +52,8 @@ impl InitCommand {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database"); info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(&db_path, self.db.log_level)?); let db =
Arc::new(init_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
info!(target: "reth::cli", "Database opened"); info!(target: "reth::cli", "Database opened");
info!(target: "reth::cli", "Writing genesis block"); info!(target: "reth::cli", "Writing genesis block");

View File

@ -12,7 +12,7 @@ use crate::{
use backon::{ConstantBuilder, Retryable}; use backon::{ConstantBuilder, Retryable};
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use reth_config::Config; use reth_config::Config;
use reth_db::open_db; use reth_db::{mdbx::DatabaseArguments, open_db};
use reth_discv4::NatResolver; use reth_discv4::NatResolver;
use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_interfaces::p2p::bodies::client::BodiesClient;
use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord};
@ -100,7 +100,10 @@ impl Command {
/// Execute `p2p` command /// Execute `p2p` command
pub async fn execute(&self) -> eyre::Result<()> { pub async fn execute(&self) -> eyre::Result<()> {
let tempdir = tempfile::TempDir::new()?; let tempdir = tempfile::TempDir::new()?;
let noop_db = Arc::new(open_db(&tempdir.into_path(), self.db.log_level)?); let noop_db = Arc::new(open_db(
&tempdir.into_path(),
DatabaseArguments::default().log_level(self.db.log_level),
)?);
// add network name to data dir // add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);

View File

@ -48,7 +48,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path, None)?); let db = Arc::new(init_db(db_path, Default::default())?);
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(db.clone(), self.chain.clone())?; init_genesis(db.clone(), self.chain.clone())?;

View File

@ -10,7 +10,9 @@ use crate::{
utils::DbTool, utils::DbTool,
}; };
use clap::Parser; use clap::Parser;
use reth_db::{database::Database, open_db, tables, transaction::DbTxMut, DatabaseEnv}; use reth_db::{
database::Database, mdbx::DatabaseArguments, open_db, tables, transaction::DbTxMut, DatabaseEnv,
};
use reth_primitives::{fs, stage::StageId, ChainSpec}; use reth_primitives::{fs, stage::StageId, ChainSpec};
use std::sync::Arc; use std::sync::Arc;
use tracing::info; use tracing::info;
@ -54,7 +56,8 @@ impl Command {
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?; fs::create_dir_all(&db_path)?;
let db = open_db(db_path.as_ref(), self.db.log_level)?; let db =
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;

View File

@ -29,6 +29,7 @@ use execution::dump_execution_stage;
mod merkle; mod merkle;
use merkle::dump_merkle_stage; use merkle::dump_merkle_stage;
use reth_db::mdbx::DatabaseArguments;
/// `reth dump-stage` command /// `reth dump-stage` command
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
@ -101,7 +102,8 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database"); info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
info!(target: "reth::cli", "Database opened"); info!(target: "reth::cli", "Database opened");
let tool = DbTool::new(&db, self.chain.clone())?; let tool = DbTool::new(&db, self.chain.clone())?;
@ -137,7 +139,7 @@ pub(crate) fn setup<DB: Database>(
info!(target: "reth::cli", ?output_db, "Creating separate db"); info!(target: "reth::cli", ?output_db, "Creating separate db");
let output_db = init_db(output_db, None)?; let output_db = init_db(output_db, Default::default())?;
output_db.update(|tx| { output_db.update(|tx| {
tx.import_table_with_range::<tables::BlockBodyIndices, _>( tx.import_table_with_range::<tables::BlockBodyIndices, _>(

View File

@ -15,7 +15,7 @@ use crate::{
use clap::Parser; use clap::Parser;
use reth_beacon_consensus::BeaconConsensus; use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config; use reth_config::Config;
use reth_db::init_db; use reth_db::{init_db, mdbx::DatabaseArguments};
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
use reth_primitives::ChainSpec; use reth_primitives::ChainSpec;
@ -123,7 +123,8 @@ impl Command {
let db_path = data_dir.db_path(); let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database"); info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.log_level)?); let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
info!(target: "reth::cli", "Database opened"); info!(target: "reth::cli", "Database opened");
let factory = ProviderFactory::new(Arc::clone(&db), self.chain.clone()); let factory = ProviderFactory::new(Arc::clone(&db), self.chain.clone());

View File

@ -1,11 +1,5 @@
//! Unwinding a certain block range //! Unwinding a certain block range
use clap::{Parser, Subcommand};
use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx};
use reth_primitives::{BlockHashOrNumber, ChainSpec};
use reth_provider::{BlockExecutionWriter, ProviderFactory};
use std::{ops::RangeInclusive, sync::Arc};
use crate::{ use crate::{
args::{ args::{
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
@ -13,6 +7,15 @@ use crate::{
}, },
dirs::{DataDirPath, MaybePlatformPath}, dirs::{DataDirPath, MaybePlatformPath},
}; };
use clap::{Parser, Subcommand};
use reth_db::{
cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db, tables,
transaction::DbTx,
};
use reth_primitives::{BlockHashOrNumber, ChainSpec};
use reth_provider::{BlockExecutionWriter, ProviderFactory};
use std::{ops::RangeInclusive, sync::Arc};
/// `reth stage unwind` command /// `reth stage unwind` command
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
pub struct Command { pub struct Command {
@ -56,7 +59,8 @@ impl Command {
eyre::bail!("Database {db_path:?} does not exist.") eyre::bail!("Database {db_path:?} does not exist.")
} }
let db = open_db(db_path.as_ref(), self.db.log_level)?; let db =
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
let range = self.command.unwind_range(&db)?; let range = self.command.unwind_range(&db)?;

View File

@ -16,7 +16,7 @@ workspace = true
reth-primitives.workspace = true reth-primitives.workspace = true
reth-interfaces.workspace = true reth-interfaces.workspace = true
reth-codecs.workspace = true reth-codecs.workspace = true
reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed"] } reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] }
reth-nippy-jar.workspace = true reth-nippy-jar.workspace = true
reth-tracing.workspace = true reth-tracing.workspace = true

View File

@ -12,7 +12,8 @@ use metrics::{gauge, Label};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use reth_interfaces::db::LogLevel; use reth_interfaces::db::LogLevel;
use reth_libmdbx::{ use reth_libmdbx::{
DatabaseFlags, Environment, EnvironmentFlags, Geometry, Mode, PageSize, SyncMode, RO, RW, DatabaseFlags, Environment, EnvironmentFlags, Geometry, MaxReadTransactionDuration, Mode,
PageSize, SyncMode, RO, RW,
}; };
use reth_tracing::tracing::error; use reth_tracing::tracing::error;
use std::{ops::Deref, path::Path}; use std::{ops::Deref, path::Path};
@ -54,6 +55,32 @@ pub enum DatabaseEnvKind {
RW, RW,
} }
/// Arguments for database initialization.
#[derive(Debug, Default, Clone, Copy)]
pub struct DatabaseArguments {
/// Database log level. If [None], the default value is used.
log_level: Option<LogLevel>,
/// Maximum duration of a read transaction. If [None], the default value is used.
max_read_transaction_duration: Option<MaxReadTransactionDuration>,
}
impl DatabaseArguments {
/// Set the log level.
pub fn log_level(mut self, log_level: Option<LogLevel>) -> Self {
self.log_level = log_level;
self
}
/// Set the maximum duration of a read transaction.
pub fn max_read_transaction_duration(
mut self,
max_read_transaction_duration: Option<MaxReadTransactionDuration>,
) -> Self {
self.max_read_transaction_duration = max_read_transaction_duration;
self
}
}
/// Wrapper for the libmdbx environment: [Environment] /// Wrapper for the libmdbx environment: [Environment]
#[derive(Debug)] #[derive(Debug)]
pub struct DatabaseEnv { pub struct DatabaseEnv {
@ -164,7 +191,7 @@ impl DatabaseEnv {
pub fn open( pub fn open(
path: &Path, path: &Path,
kind: DatabaseEnvKind, kind: DatabaseEnvKind,
log_level: Option<LogLevel>, args: DatabaseArguments,
) -> Result<DatabaseEnv, DatabaseError> { ) -> Result<DatabaseEnv, DatabaseError> {
let mut inner_env = Environment::builder(); let mut inner_env = Environment::builder();
@ -250,7 +277,7 @@ impl DatabaseEnv {
// https://github.com/paradigmxyz/reth/blob/fa2b9b685ed9787636d962f4366caf34a9186e66/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c#L16017. // https://github.com/paradigmxyz/reth/blob/fa2b9b685ed9787636d962f4366caf34a9186e66/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c#L16017.
inner_env.set_rp_augment_limit(256 * 1024); inner_env.set_rp_augment_limit(256 * 1024);
if let Some(log_level) = log_level { if let Some(log_level) = args.log_level {
// Levels higher than [LogLevel::Notice] require libmdbx built with `MDBX_DEBUG` option. // Levels higher than [LogLevel::Notice] require libmdbx built with `MDBX_DEBUG` option.
let is_log_level_available = if cfg!(debug_assertions) { let is_log_level_available = if cfg!(debug_assertions) {
true true
@ -276,6 +303,10 @@ impl DatabaseEnv {
} }
} }
if let Some(max_read_transaction_duration) = args.max_read_transaction_duration {
inner_env.set_max_read_transaction_duration(max_read_transaction_duration);
}
let env = DatabaseEnv { let env = DatabaseEnv {
inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?, inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?,
with_metrics: false, with_metrics: false,
@ -346,7 +377,8 @@ mod tests {
/// Create database for testing with specified path /// Create database for testing with specified path
fn create_test_db_with_path(kind: DatabaseEnvKind, path: &Path) -> DatabaseEnv { fn create_test_db_with_path(kind: DatabaseEnvKind, path: &Path) -> DatabaseEnv {
let env = DatabaseEnv::open(path, kind, None).expect(ERROR_DB_CREATION); let env =
DatabaseEnv::open(path, kind, DatabaseArguments::default()).expect(ERROR_DB_CREATION);
env.create_tables().expect(ERROR_TABLE_CREATION); env.create_tables().expect(ERROR_TABLE_CREATION);
env env
} }
@ -971,7 +1003,8 @@ mod tests {
assert_eq!(result.expect(ERROR_RETURN_VALUE), 200); assert_eq!(result.expect(ERROR_RETURN_VALUE), 200);
} }
let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, None).expect(ERROR_DB_CREATION); let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, Default::default())
.expect(ERROR_DB_CREATION);
// GET // GET
let result = let result =

View File

@ -87,13 +87,13 @@ pub use utils::is_database_empty;
#[cfg(feature = "mdbx")] #[cfg(feature = "mdbx")]
pub use mdbx::{DatabaseEnv, DatabaseEnvKind}; pub use mdbx::{DatabaseEnv, DatabaseEnvKind};
use crate::mdbx::DatabaseArguments;
use eyre::WrapErr; use eyre::WrapErr;
use reth_interfaces::db::LogLevel;
use std::path::Path; use std::path::Path;
/// Opens up an existing database or creates a new one at the specified path. Creates tables if /// Opens up an existing database or creates a new one at the specified path. Creates tables if
/// necessary. Read/Write mode. /// necessary. Read/Write mode.
pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> { pub fn init_db<P: AsRef<Path>>(path: P, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError}; use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError};
let rpath = path.as_ref(); let rpath = path.as_ref();
@ -110,7 +110,7 @@ pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Re
} }
#[cfg(feature = "mdbx")] #[cfg(feature = "mdbx")]
{ {
let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, log_level)?; let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?;
db.create_tables()?; db.create_tables()?;
Ok(db) Ok(db)
} }
@ -121,10 +121,10 @@ pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Re
} }
/// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing.
pub fn open_db_read_only(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> { pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
#[cfg(feature = "mdbx")] #[cfg(feature = "mdbx")]
{ {
DatabaseEnv::open(path, DatabaseEnvKind::RO, log_level) DatabaseEnv::open(path, DatabaseEnvKind::RO, args)
.with_context(|| format!("Could not open database at path: {}", path.display())) .with_context(|| format!("Could not open database at path: {}", path.display()))
} }
#[cfg(not(feature = "mdbx"))] #[cfg(not(feature = "mdbx"))]
@ -135,10 +135,10 @@ pub fn open_db_read_only(path: &Path, log_level: Option<LogLevel>) -> eyre::Resu
/// Opens up an existing database. Read/Write mode with WriteMap enabled. It doesn't create it or /// Opens up an existing database. Read/Write mode with WriteMap enabled. It doesn't create it or
/// create tables if missing. /// create tables if missing.
pub fn open_db(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> { pub fn open_db(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
#[cfg(feature = "mdbx")] #[cfg(feature = "mdbx")]
{ {
DatabaseEnv::open(path, DatabaseEnvKind::RW, log_level) DatabaseEnv::open(path, DatabaseEnvKind::RW, args)
.with_context(|| format!("Could not open database at path: {}", path.display())) .with_context(|| format!("Could not open database at path: {}", path.display()))
} }
#[cfg(not(feature = "mdbx"))] #[cfg(not(feature = "mdbx"))]
@ -155,6 +155,7 @@ pub mod test_utils {
database::Database, database::Database,
database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics}, database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics},
}; };
use reth_libmdbx::MaxReadTransactionDuration;
use reth_primitives::fs; use reth_primitives::fs;
use std::{path::PathBuf, sync::Arc}; use std::{path::PathBuf, sync::Arc};
@ -235,7 +236,12 @@ pub mod test_utils {
let path = tempdir_path(); let path = tempdir_path();
let emsg = format!("{}: {:?}", ERROR_DB_CREATION, path); let emsg = format!("{}: {:?}", ERROR_DB_CREATION, path);
let db = init_db(&path, None).expect(&emsg); let db = init_db(
&path,
DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)
.expect(&emsg);
Arc::new(TempDatabase { db: Some(db), path }) Arc::new(TempDatabase { db: Some(db), path })
} }
@ -243,17 +249,25 @@ pub mod test_utils {
/// Create read/write database for testing /// Create read/write database for testing
pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<TempDatabase<DatabaseEnv>> { pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<TempDatabase<DatabaseEnv>> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
let db = init_db(path.as_path(), None).expect(ERROR_DB_CREATION); let db = init_db(
path.as_path(),
DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)
.expect(ERROR_DB_CREATION);
Arc::new(TempDatabase { db: Some(db), path }) Arc::new(TempDatabase { db: Some(db), path })
} }
/// Create read only database for testing /// Create read only database for testing
pub fn create_test_ro_db() -> Arc<TempDatabase<DatabaseEnv>> { pub fn create_test_ro_db() -> Arc<TempDatabase<DatabaseEnv>> {
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
let path = tempdir_path(); let path = tempdir_path();
{ {
init_db(path.as_path(), None).expect(ERROR_DB_CREATION); init_db(path.as_path(), args).expect(ERROR_DB_CREATION);
} }
let db = open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN); let db = open_db_read_only(path.as_path(), args).expect(ERROR_DB_OPEN);
Arc::new(TempDatabase { db: Some(db), path }) Arc::new(TempDatabase { db: Some(db), path })
} }
} }
@ -262,9 +276,11 @@ pub mod test_utils {
mod tests { mod tests {
use crate::{ use crate::{
init_db, init_db,
mdbx::DatabaseArguments,
version::{db_version_file_path, DatabaseVersionError}, version::{db_version_file_path, DatabaseVersionError},
}; };
use assert_matches::assert_matches; use assert_matches::assert_matches;
use reth_libmdbx::MaxReadTransactionDuration;
use reth_primitives::fs; use reth_primitives::fs;
use tempfile::tempdir; use tempfile::tempdir;
@ -272,22 +288,25 @@ mod tests {
fn db_version() { fn db_version() {
let path = tempdir().unwrap(); let path = tempdir().unwrap();
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
// Database is empty // Database is empty
{ {
let db = init_db(&path, None); let db = init_db(&path, args);
assert_matches!(db, Ok(_)); assert_matches!(db, Ok(_));
} }
// Database is not empty, current version is the same as in the file // Database is not empty, current version is the same as in the file
{ {
let db = init_db(&path, None); let db = init_db(&path, args);
assert_matches!(db, Ok(_)); assert_matches!(db, Ok(_));
} }
// Database is not empty, version file is malformed // Database is not empty, version file is malformed
{ {
fs::write(path.path().join(db_version_file_path(&path)), "invalid-version").unwrap(); fs::write(path.path().join(db_version_file_path(&path)), "invalid-version").unwrap();
let db = init_db(&path, None); let db = init_db(&path, args);
assert!(db.is_err()); assert!(db.is_err());
assert_matches!( assert_matches!(
db.unwrap_err().downcast_ref::<DatabaseVersionError>(), db.unwrap_err().downcast_ref::<DatabaseVersionError>(),
@ -298,7 +317,7 @@ mod tests {
// Database is not empty, version file contains not matching version // Database is not empty, version file contains not matching version
{ {
fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap(); fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap();
let db = init_db(&path, None); let db = init_db(&path, args);
assert!(db.is_err()); assert!(db.is_err());
assert_matches!( assert_matches!(
db.unwrap_err().downcast_ref::<DatabaseVersionError>(), db.unwrap_err().downcast_ref::<DatabaseVersionError>(),

View File

@ -22,6 +22,8 @@ indexmap = "2"
libc = "0.2" libc = "0.2"
parking_lot.workspace = true parking_lot.workspace = true
thiserror.workspace = true thiserror.workspace = true
dashmap = { version = "5.5.3", features = ["inline"], optional = true }
tracing = { workspace = true, optional = true }
ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" }
@ -31,6 +33,7 @@ libffi = "3.2.0"
[features] [features]
default = [] default = []
return-borrowed = [] return-borrowed = []
read-tx-timeouts = ["dashmap", "dashmap/inline", "tracing"]
[dev-dependencies] [dev-dependencies]
pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] }

View File

@ -41,7 +41,7 @@ impl<'tx> TableObject for Cow<'tx, [u8]> {
#[cfg(not(feature = "return-borrowed"))] #[cfg(not(feature = "return-borrowed"))]
{ {
let is_dirty = (!K::ONLY_CLEAN) && let is_dirty = (!K::IS_READ_ONLY) &&
crate::error::mdbx_result(ffi::mdbx_is_dirty(_txn, data_val.iov_base))?; crate::error::mdbx_result(ffi::mdbx_is_dirty(_txn, data_val.iov_base))?;
Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) }) Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) })

View File

@ -1,5 +1,5 @@
use crate::{ use crate::{
error::{mdbx_result, Error, Result}, error::{mdbx_result, mdbx_result_with_tx_kind, Error, Result},
flags::*, flags::*,
mdbx_try_optional, mdbx_try_optional,
transaction::{TransactionKind, RW}, transaction::{TransactionKind, RW},
@ -30,7 +30,11 @@ where
pub(crate) fn new(txn: Transaction<K>, dbi: ffi::MDBX_dbi) -> Result<Self> { pub(crate) fn new(txn: Transaction<K>, dbi: ffi::MDBX_dbi) -> Result<Self> {
let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut();
unsafe { unsafe {
mdbx_result(txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)))?; mdbx_result_with_tx_kind::<K>(
txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)),
txn.txn(),
txn.env().txn_manager(),
)?;
} }
Ok(Self { txn, cursor }) Ok(Self { txn, cursor })
} }
@ -43,7 +47,7 @@ where
let s = Self { txn: other.txn.clone(), cursor }; let s = Self { txn: other.txn.clone(), cursor };
mdbx_result(res)?; mdbx_result_with_tx_kind::<K>(res, s.txn.txn(), s.txn.env().txn_manager())?;
Ok(s) Ok(s)
} }
@ -91,12 +95,11 @@ where
let key_ptr = key_val.iov_base; let key_ptr = key_val.iov_base;
let data_ptr = data_val.iov_base; let data_ptr = data_val.iov_base;
self.txn.txn_execute(|txn| { self.txn.txn_execute(|txn| {
let v = mdbx_result(ffi::mdbx_cursor_get( let v = mdbx_result_with_tx_kind::<K>(
self.cursor, ffi::mdbx_cursor_get(self.cursor, &mut key_val, &mut data_val, op),
&mut key_val, txn,
&mut data_val, self.txn.env().txn_manager(),
op, )?;
))?;
assert_ne!(data_ptr, data_val.iov_base); assert_ne!(data_ptr, data_val.iov_base);
let key_out = { let key_out = {
// MDBX wrote in new key // MDBX wrote in new key

View File

@ -1,5 +1,5 @@
use crate::{ use crate::{
error::{mdbx_result, Result}, error::{mdbx_result_with_tx_kind, Result},
transaction::TransactionKind, transaction::TransactionKind,
Environment, Transaction, Environment, Transaction,
}; };
@ -30,9 +30,13 @@ impl Database {
let c_name = name.map(|n| CString::new(n).unwrap()); let c_name = name.map(|n| CString::new(n).unwrap());
let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() }; let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() };
let mut dbi: ffi::MDBX_dbi = 0; let mut dbi: ffi::MDBX_dbi = 0;
mdbx_result( txn.txn_execute(|txn_ptr| {
txn.txn_execute(|txn| unsafe { ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi) }), mdbx_result_with_tx_kind::<K>(
)?; unsafe { ffi::mdbx_dbi_open(txn_ptr, name_ptr, flags, &mut dbi) },
txn_ptr,
txn.env().txn_manager(),
)
})?;
Ok(Self::new_from_ptr(dbi, txn.env().clone())) Ok(Self::new_from_ptr(dbi, txn.env().clone()))
} }

View File

@ -2,8 +2,9 @@ use crate::{
database::Database, database::Database,
error::{mdbx_result, Error, Result}, error::{mdbx_result, Error, Result},
flags::EnvironmentFlags, flags::EnvironmentFlags,
transaction::{CommitLatency, RO, RW}, transaction::{RO, RW},
Mode, Transaction, TransactionKind, txn_manager::{TxnManager, TxnManagerMessage, TxnPtr},
Transaction, TransactionKind,
}; };
use byteorder::{ByteOrder, NativeEndian}; use byteorder::{ByteOrder, NativeEndian};
use mem::size_of; use mem::size_of;
@ -15,14 +16,15 @@ use std::{
ops::{Bound, RangeBounds}, ops::{Bound, RangeBounds},
path::Path, path::Path,
ptr, ptr,
sync::{ sync::{mpsc::sync_channel, Arc},
mpsc::{sync_channel, SyncSender},
Arc,
},
thread::sleep, thread::sleep,
time::Duration, time::Duration,
}; };
/// The default maximum duration of a read transaction.
#[cfg(feature = "read-tx-timeouts")]
const DEFAULT_MAX_READ_TRANSACTION_DURATION: Duration = Duration::from_secs(5 * 60);
/// An environment supports multiple databases, all residing in the same shared-memory map. /// An environment supports multiple databases, all residing in the same shared-memory map.
/// ///
/// Accessing the environment is thread-safe. /// Accessing the environment is thread-safe.
@ -50,6 +52,8 @@ impl Environment {
kind: Default::default(), kind: Default::default(),
#[cfg(not(windows))] #[cfg(not(windows))]
handle_slow_readers: None, handle_slow_readers: None,
#[cfg(feature = "read-tx-timeouts")]
max_read_transaction_duration: None,
} }
} }
@ -65,32 +69,22 @@ impl Environment {
self.inner.env_kind self.inner.env_kind
} }
/// Returns true if the environment was opened in [Mode::ReadWrite] mode. /// Returns true if the environment was opened in [crate::Mode::ReadWrite] mode.
#[inline] #[inline]
pub fn is_read_write(&self) -> bool { pub fn is_read_write(&self) -> bool {
self.inner.txn_manager.is_some() self.inner.env_kind.is_write_map()
} }
/// Returns true if the environment was opened in [Mode::ReadOnly] mode. /// Returns true if the environment was opened in [crate::Mode::ReadOnly] mode.
#[inline] #[inline]
pub fn is_read_only(&self) -> bool { pub fn is_read_only(&self) -> bool {
self.inner.txn_manager.is_none() !self.inner.env_kind.is_write_map()
} }
/// Returns the manager that handles transaction messages. /// Returns the transaction manager.
///
/// Requires [Mode::ReadWrite] and returns None otherwise.
#[inline] #[inline]
pub(crate) fn txn_manager(&self) -> Option<&SyncSender<TxnManagerMessage>> { pub(crate) fn txn_manager(&self) -> &TxnManager {
self.inner.txn_manager.as_ref() &self.inner.txn_manager
}
/// Returns the manager that handles transaction messages.
///
/// Requires [Mode::ReadWrite] and returns None otherwise.
#[inline]
pub(crate) fn ensure_txn_manager(&self) -> Result<&SyncSender<TxnManagerMessage>> {
self.txn_manager().ok_or(Error::WriteTransactionUnsupportedInReadOnlyMode)
} }
/// Create a read-only transaction for use with the environment. /// Create a read-only transaction for use with the environment.
@ -102,16 +96,13 @@ impl Environment {
/// Create a read-write transaction for use with the environment. This method will block while /// Create a read-write transaction for use with the environment. This method will block while
/// there are any other read-write transactions open on the environment. /// there are any other read-write transactions open on the environment.
pub fn begin_rw_txn(&self) -> Result<Transaction<RW>> { pub fn begin_rw_txn(&self) -> Result<Transaction<RW>> {
let sender = self.ensure_txn_manager()?;
let txn = loop { let txn = loop {
let (tx, rx) = sync_channel(0); let (tx, rx) = sync_channel(0);
sender self.txn_manager().send_message(TxnManagerMessage::Begin {
.send(TxnManagerMessage::Begin { parent: TxnPtr(ptr::null_mut()),
parent: TxnPtr(ptr::null_mut()), flags: RW::OPEN_FLAGS,
flags: RW::OPEN_FLAGS, sender: tx,
sender: tx, });
})
.unwrap();
let res = rx.recv().unwrap(); let res = rx.recv().unwrap();
if let Err(Error::Busy) = &res { if let Err(Error::Busy) = &res {
sleep(Duration::from_millis(250)); sleep(Duration::from_millis(250));
@ -235,10 +226,8 @@ struct EnvironmentInner {
env: *mut ffi::MDBX_env, env: *mut ffi::MDBX_env,
/// Whether the environment was opened as WRITEMAP. /// Whether the environment was opened as WRITEMAP.
env_kind: EnvironmentKind, env_kind: EnvironmentKind,
/// the sender half of the transaction manager channel /// Transaction manager
/// txn_manager: TxnManager,
/// Only set if the environment was opened in [Mode::ReadWrite] mode.
txn_manager: Option<SyncSender<TxnManagerMessage>>,
} }
impl Drop for EnvironmentInner { impl Drop for EnvironmentInner {
@ -265,12 +254,12 @@ pub enum EnvironmentKind {
Default, Default,
/// Open the environment as mdbx-WRITEMAP. /// Open the environment as mdbx-WRITEMAP.
/// Use a writeable memory map unless the environment is opened as MDBX_RDONLY /// Use a writeable memory map unless the environment is opened as MDBX_RDONLY
/// ([Mode::ReadOnly]). /// ([crate::Mode::ReadOnly]).
/// ///
/// All data will be mapped into memory in the read-write mode [Mode::ReadWrite]. This offers a /// All data will be mapped into memory in the read-write mode [crate::Mode::ReadWrite]. This
/// significant performance benefit, since the data will be modified directly in mapped /// offers a significant performance benefit, since the data will be modified directly in
/// memory and then flushed to disk by single system call, without any memory management /// mapped memory and then flushed to disk by single system call, without any memory
/// nor copying. /// management nor copying.
/// ///
/// This mode is incompatible with nested transactions. /// This mode is incompatible with nested transactions.
WriteMap, WriteMap,
@ -292,22 +281,11 @@ impl EnvironmentKind {
} }
} }
#[derive(Copy, Clone, Debug)]
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
unsafe impl Send for TxnPtr {}
unsafe impl Sync for TxnPtr {}
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub(crate) struct EnvPtr(pub(crate) *mut ffi::MDBX_env); pub(crate) struct EnvPtr(pub(crate) *mut ffi::MDBX_env);
unsafe impl Send for EnvPtr {} unsafe impl Send for EnvPtr {}
unsafe impl Sync for EnvPtr {} unsafe impl Sync for EnvPtr {}
pub(crate) enum TxnManagerMessage {
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
Commit { tx: TxnPtr, sender: SyncSender<Result<(bool, CommitLatency)>> },
}
/// Environment statistics. /// Environment statistics.
/// ///
/// Contains information about the size and layout of an MDBX environment or database. /// Contains information about the size and layout of an MDBX environment or database.
@ -597,6 +575,10 @@ pub struct EnvironmentBuilder {
kind: EnvironmentKind, kind: EnvironmentKind,
#[cfg(not(windows))] #[cfg(not(windows))]
handle_slow_readers: Option<HandleSlowReadersCallback>, handle_slow_readers: Option<HandleSlowReadersCallback>,
#[cfg(feature = "read-tx-timeouts")]
/// The maximum duration of a read transaction. If [None], but the `read-tx-timeout` feature is
/// enabled, the default value of [DEFAULT_MAX_READ_TRANSACTION_DURATION] is used.
max_read_transaction_duration: Option<read_transactions::MaxReadTransactionDuration>,
} }
impl EnvironmentBuilder { impl EnvironmentBuilder {
@ -718,54 +700,24 @@ impl EnvironmentBuilder {
} }
} }
let mut env = EnvironmentInner { env, txn_manager: None, env_kind: self.kind }; #[cfg(not(feature = "read-tx-timeouts"))]
let txn_manager = TxnManager::new(EnvPtr(env));
if let Mode::ReadWrite { .. } = self.flags.mode { #[cfg(feature = "read-tx-timeouts")]
let (tx, rx) = std::sync::mpsc::sync_channel(0); let txn_manager = {
let e = EnvPtr(env.env); let mut txn_manager = TxnManager::new(EnvPtr(env));
std::thread::spawn(move || loop { if let crate::MaxReadTransactionDuration::Set(duration) = self
match rx.recv() { .max_read_transaction_duration
Ok(msg) => match msg { .unwrap_or(read_transactions::MaxReadTransactionDuration::Set(
TxnManagerMessage::Begin { parent, flags, sender } => { DEFAULT_MAX_READ_TRANSACTION_DURATION,
#[allow(clippy::redundant_locals)] ))
let e = e; {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); txn_manager = txn_manager.with_max_read_transaction_duration(duration);
sender };
.send( txn_manager
mdbx_result(unsafe { };
ffi::mdbx_txn_begin_ex(
e.0,
parent.0,
flags,
&mut txn,
ptr::null_mut(),
)
})
.map(|_| TxnPtr(txn)),
)
.unwrap()
}
TxnManagerMessage::Abort { tx, sender } => {
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
}
TxnManagerMessage::Commit { tx, sender } => {
sender
.send({
let mut latency = CommitLatency::new();
mdbx_result(unsafe {
ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency())
})
.map(|v| (v, latency))
})
.unwrap();
}
},
Err(_) => return,
}
});
env.txn_manager = Some(tx); let env = EnvironmentInner { env, txn_manager, env_kind: self.kind };
}
Ok(Environment { inner: Arc::new(env) }) Ok(Environment { inner: Arc::new(env) })
} }
@ -861,16 +813,53 @@ impl EnvironmentBuilder {
self self
} }
pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self {
self.log_level = Some(log_level);
self
}
/// Set the Handle-Slow-Readers callback. See [HandleSlowReadersCallback] for more information. /// Set the Handle-Slow-Readers callback. See [HandleSlowReadersCallback] for more information.
#[cfg(not(windows))] #[cfg(not(windows))]
pub fn set_handle_slow_readers(&mut self, hsr: HandleSlowReadersCallback) -> &mut Self { pub fn set_handle_slow_readers(&mut self, hsr: HandleSlowReadersCallback) -> &mut Self {
self.handle_slow_readers = Some(hsr); self.handle_slow_readers = Some(hsr);
self self
} }
}
pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self { #[cfg(feature = "read-tx-timeouts")]
self.log_level = Some(log_level); pub(crate) mod read_transactions {
self use crate::EnvironmentBuilder;
use std::time::Duration;
/// The maximum duration of a read transaction.
#[derive(Debug, Clone, Copy)]
#[cfg(feature = "read-tx-timeouts")]
pub enum MaxReadTransactionDuration {
/// The maximum duration of a read transaction is unbounded.
Unbounded,
/// The maximum duration of a read transaction is set to the given duration.
Set(Duration),
}
#[cfg(feature = "read-tx-timeouts")]
impl MaxReadTransactionDuration {
pub fn as_duration(&self) -> Option<Duration> {
match self {
MaxReadTransactionDuration::Unbounded => None,
MaxReadTransactionDuration::Set(duration) => Some(*duration),
}
}
}
impl EnvironmentBuilder {
/// Set the maximum time a read-only transaction can be open.
pub fn set_max_read_transaction_duration(
&mut self,
max_read_transaction_duration: MaxReadTransactionDuration,
) -> &mut Self {
self.max_read_transaction_duration = Some(max_read_transaction_duration);
self
}
} }
} }

View File

@ -1,3 +1,4 @@
use crate::{txn_manager::TxnManager, TransactionKind};
use libc::c_int; use libc::c_int;
use std::result; use std::result;
@ -5,7 +6,7 @@ use std::result;
pub type Result<T> = result::Result<T, Error>; pub type Result<T> = result::Result<T, Error>;
/// An MDBX error kind. /// An MDBX error kind.
#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] #[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)]
pub enum Error { pub enum Error {
/// Key/data pair already exists. /// Key/data pair already exists.
#[error("key/data pair already exists")] #[error("key/data pair already exists")]
@ -117,6 +118,8 @@ pub enum Error {
/// [Mode::ReadOnly](crate::flags::Mode::ReadOnly), write transactions can't be opened. /// [Mode::ReadOnly](crate::flags::Mode::ReadOnly), write transactions can't be opened.
#[error("write transactions are not supported in read-only mode")] #[error("write transactions are not supported in read-only mode")]
WriteTransactionUnsupportedInReadOnlyMode, WriteTransactionUnsupportedInReadOnlyMode,
#[error("read transaction has been aborted by the transaction manager")]
ReadTransactionAborted,
/// Unknown error code. /// Unknown error code.
#[error("unknown error code")] #[error("unknown error code")]
Other(i32), Other(i32),
@ -190,7 +193,7 @@ impl Error {
Error::DecodeErrorLenDiff | Error::DecodeError => ffi::MDBX_EINVAL, Error::DecodeErrorLenDiff | Error::DecodeError => ffi::MDBX_EINVAL,
Error::Access => ffi::MDBX_EACCESS, Error::Access => ffi::MDBX_EACCESS,
Error::TooLarge => ffi::MDBX_TOO_LARGE, Error::TooLarge => ffi::MDBX_TOO_LARGE,
Error::BadSignature => ffi::MDBX_EBADSIGN, Error::BadSignature | Error::ReadTransactionAborted => ffi::MDBX_EBADSIGN,
Error::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS, Error::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS,
Error::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, Error::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS,
Error::Other(err_code) => *err_code, Error::Other(err_code) => *err_code,
@ -213,6 +216,33 @@ pub(crate) fn mdbx_result(err_code: c_int) -> Result<bool> {
} }
} }
#[cfg(feature = "read-tx-timeouts")]
#[inline]
pub(crate) fn mdbx_result_with_tx_kind<K: TransactionKind>(
err_code: c_int,
txn: *mut ffi::MDBX_txn,
txn_manager: &TxnManager,
) -> Result<bool> {
if K::IS_READ_ONLY &&
err_code == ffi::MDBX_EBADSIGN &&
txn_manager.remove_aborted_read_transaction(txn).is_some()
{
return Err(Error::ReadTransactionAborted);
}
mdbx_result(err_code)
}
#[cfg(not(feature = "read-tx-timeouts"))]
#[inline]
pub(crate) fn mdbx_result_with_tx_kind<K: TransactionKind>(
err_code: c_int,
_txn: *mut ffi::MDBX_txn,
_txn_manager: &TxnManager,
) -> Result<bool> {
mdbx_result(err_code)
}
#[macro_export] #[macro_export]
macro_rules! mdbx_try_optional { macro_rules! mdbx_try_optional {
($expr:expr) => {{ ($expr:expr) => {{

View File

@ -23,6 +23,9 @@ pub mod ffi {
pub use ffi::{MDBX_dbi as DBI, MDBX_log_level_t as LogLevel}; pub use ffi::{MDBX_dbi as DBI, MDBX_log_level_t as LogLevel};
} }
#[cfg(feature = "read-tx-timeouts")]
pub use crate::environment::read_transactions::MaxReadTransactionDuration;
mod codec; mod codec;
mod cursor; mod cursor;
mod database; mod database;
@ -30,6 +33,7 @@ mod environment;
mod error; mod error;
mod flags; mod flags;
mod transaction; mod transaction;
mod txn_manager;
#[cfg(test)] #[cfg(test)]
mod test_utils { mod test_utils {

View File

@ -1,8 +1,9 @@
use crate::{ use crate::{
database::Database, database::Database,
environment::{Environment, TxnManagerMessage, TxnPtr}, environment::Environment,
error::{mdbx_result, Result}, error::{mdbx_result, mdbx_result_with_tx_kind, Result},
flags::{DatabaseFlags, WriteFlags}, flags::{DatabaseFlags, WriteFlags},
txn_manager::{TxnManagerMessage, TxnPtr},
Cursor, Error, Stat, TableObject, Cursor, Error, Stat, TableObject,
}; };
use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE};
@ -28,12 +29,10 @@ mod private {
} }
pub trait TransactionKind: private::Sealed + Send + Sync + Debug + 'static { pub trait TransactionKind: private::Sealed + Send + Sync + Debug + 'static {
#[doc(hidden)]
const ONLY_CLEAN: bool;
#[doc(hidden)] #[doc(hidden)]
const OPEN_FLAGS: MDBX_txn_flags_t; const OPEN_FLAGS: MDBX_txn_flags_t;
/// Convenience flag for distinguishing between read-only and read-write transactions.
#[doc(hidden)] #[doc(hidden)]
const IS_READ_ONLY: bool; const IS_READ_ONLY: bool;
} }
@ -47,12 +46,10 @@ pub struct RO;
pub struct RW; pub struct RW;
impl TransactionKind for RO { impl TransactionKind for RO {
const ONLY_CLEAN: bool = true;
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY; const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY;
const IS_READ_ONLY: bool = true; const IS_READ_ONLY: bool = true;
} }
impl TransactionKind for RW { impl TransactionKind for RW {
const ONLY_CLEAN: bool = false;
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE; const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE;
const IS_READ_ONLY: bool = false; const IS_READ_ONLY: bool = false;
} }
@ -74,18 +71,27 @@ where
pub(crate) fn new(env: Environment) -> Result<Self> { pub(crate) fn new(env: Environment) -> Result<Self> {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
unsafe { unsafe {
mdbx_result(ffi::mdbx_txn_begin_ex( mdbx_result_with_tx_kind::<K>(
env.env_ptr(), ffi::mdbx_txn_begin_ex(
ptr::null_mut(), env.env_ptr(),
K::OPEN_FLAGS, ptr::null_mut(),
&mut txn, K::OPEN_FLAGS,
ptr::null_mut(), &mut txn,
))?; ptr::null_mut(),
),
txn,
env.txn_manager(),
)?;
Ok(Self::new_from_ptr(env, txn)) Ok(Self::new_from_ptr(env, txn))
} }
} }
pub(crate) fn new_from_ptr(env: Environment, txn: *mut ffi::MDBX_txn) -> Self { pub(crate) fn new_from_ptr(env: Environment, txn: *mut ffi::MDBX_txn) -> Self {
#[cfg(feature = "read-tx-timeouts")]
if K::IS_READ_ONLY {
env.txn_manager().add_active_read_transaction(txn)
}
let inner = TransactionInner { let inner = TransactionInner {
txn: TransactionPtr::new(txn), txn: TransactionPtr::new(txn),
primed_dbis: Mutex::new(IndexSet::new()), primed_dbis: Mutex::new(IndexSet::new()),
@ -93,6 +99,7 @@ where
env, env,
_marker: Default::default(), _marker: Default::default(),
}; };
Self { inner: Arc::new(inner) } Self { inner: Arc::new(inner) }
} }
@ -179,22 +186,26 @@ where
pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, CommitLatency, Vec<Database>)> { pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, CommitLatency, Vec<Database>)> {
let result = { let result = {
let result = self.txn_execute(|txn| { let result = self.txn_execute(|txn| {
if K::ONLY_CLEAN { if K::IS_READ_ONLY {
#[cfg(feature = "read-tx-timeouts")]
self.env().txn_manager().remove_active_read_transaction(txn);
let mut latency = CommitLatency::new(); let mut latency = CommitLatency::new();
mdbx_result(unsafe { mdbx_result_with_tx_kind::<K>(
ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency()) unsafe { ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency()) },
}) txn,
self.env().txn_manager(),
)
.map(|v| (v, latency)) .map(|v| (v, latency))
} else { } else {
let (sender, rx) = sync_channel(0); let (sender, rx) = sync_channel(0);
self.env() self.env()
.ensure_txn_manager() .txn_manager()
.unwrap() .send_message(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender });
.send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender })
.unwrap();
rx.recv().unwrap() rx.recv().unwrap()
} }
}); });
self.inner.set_committed(); self.inner.set_committed();
result result
}; };
@ -231,9 +242,13 @@ where
pub fn db_flags(&self, db: &Database) -> Result<DatabaseFlags> { pub fn db_flags(&self, db: &Database) -> Result<DatabaseFlags> {
let mut flags: c_uint = 0; let mut flags: c_uint = 0;
unsafe { unsafe {
mdbx_result(self.txn_execute(|txn| { self.txn_execute(|txn| {
ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()) mdbx_result_with_tx_kind::<K>(
}))?; ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()),
txn,
self.env().txn_manager(),
)
})?;
} }
// The types are not the same on Windows. Great! // The types are not the same on Windows. Great!
@ -250,9 +265,13 @@ where
pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result<Stat> { pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result<Stat> {
unsafe { unsafe {
let mut stat = Stat::new(); let mut stat = Stat::new();
mdbx_result(self.txn_execute(|txn| { self.txn_execute(|txn| {
ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::<Stat>()) mdbx_result_with_tx_kind::<K>(
}))?; ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::<Stat>()),
txn,
self.env().txn_manager(),
)
})?;
Ok(stat) Ok(stat)
} }
} }
@ -330,17 +349,18 @@ where
fn drop(&mut self) { fn drop(&mut self) {
self.txn_execute(|txn| { self.txn_execute(|txn| {
if !self.has_committed() { if !self.has_committed() {
if K::ONLY_CLEAN { if K::IS_READ_ONLY {
#[cfg(feature = "read-tx-timeouts")]
self.env.txn_manager().remove_active_read_transaction(txn);
unsafe { unsafe {
ffi::mdbx_txn_abort(txn); ffi::mdbx_txn_abort(txn);
} }
} else { } else {
let (sender, rx) = sync_channel(0); let (sender, rx) = sync_channel(0);
self.env self.env
.ensure_txn_manager() .txn_manager()
.unwrap() .send_message(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender });
.send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender })
.unwrap();
rx.recv().unwrap().unwrap(); rx.recv().unwrap().unwrap();
} }
} }
@ -489,7 +509,11 @@ impl Transaction<RO> {
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
/// BEFORE calling this function. /// BEFORE calling this function.
pub unsafe fn close_db(&self, db: Database) -> Result<()> { pub unsafe fn close_db(&self, db: Database) -> Result<()> {
mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?; mdbx_result_with_tx_kind::<RO>(
ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()),
self.txn(),
self.env().txn_manager(),
)?;
Ok(()) Ok(())
} }
@ -503,15 +527,11 @@ impl Transaction<RW> {
} }
self.txn_execute(|txn| { self.txn_execute(|txn| {
let (tx, rx) = sync_channel(0); let (tx, rx) = sync_channel(0);
self.env() self.env().txn_manager().send_message(TxnManagerMessage::Begin {
.ensure_txn_manager() parent: TxnPtr(txn),
.unwrap() flags: RW::OPEN_FLAGS,
.send(TxnManagerMessage::Begin { sender: tx,
parent: TxnPtr(txn), });
flags: RW::OPEN_FLAGS,
sender: tx,
})
.unwrap();
rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env().clone(), ptr.0)) rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env().clone(), ptr.0))
}) })

View File

@ -0,0 +1,389 @@
use crate::{
environment::EnvPtr,
error::{mdbx_result, Result},
CommitLatency,
};
use std::{
ptr,
sync::mpsc::{sync_channel, Receiver, SyncSender},
};
#[derive(Copy, Clone, Debug)]
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
unsafe impl Send for TxnPtr {}
unsafe impl Sync for TxnPtr {}
pub(crate) enum TxnManagerMessage {
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
Commit { tx: TxnPtr, sender: SyncSender<Result<(bool, CommitLatency)>> },
}
/// Manages transactions by doing two things:
/// - Opening, aborting, and committing transactions using [TxnManager::send_message] with the
/// corresponding [TxnManagerMessage]
/// - Aborting long-lived read transactions (if the `read-tx-timeouts` feature is enabled and
/// `TxnManager::with_max_read_transaction_duration` is called)
#[derive(Debug)]
pub(crate) struct TxnManager {
sender: SyncSender<TxnManagerMessage>,
#[cfg(feature = "read-tx-timeouts")]
read_transactions: Option<std::sync::Arc<read_transactions::ReadTransactions>>,
}
impl TxnManager {
pub(crate) fn new(env: EnvPtr) -> Self {
let (tx, rx) = sync_channel(0);
let txn_manager = Self {
sender: tx,
#[cfg(feature = "read-tx-timeouts")]
read_transactions: None,
};
txn_manager.start_message_listener(env, rx);
txn_manager
}
/// Spawns a new thread with [std::thread::spawn] that listens to incoming [TxnManagerMessage]
/// messages, executes an FFI function, and returns the result on the provided channel.
///
/// - [TxnManagerMessage::Begin] opens a new transaction with [ffi::mdbx_txn_begin_ex]
/// - [TxnManagerMessage::Abort] aborts a transaction with [ffi::mdbx_txn_abort]
/// - [TxnManagerMessage::Commit] commits a transaction with [ffi::mdbx_txn_commit_ex]
fn start_message_listener(&self, env: EnvPtr, rx: Receiver<TxnManagerMessage>) {
let read_transactions = self.read_transactions.clone();
std::thread::spawn(move || {
#[allow(clippy::redundant_locals)]
let env = env;
loop {
match rx.recv() {
Ok(msg) => match msg {
TxnManagerMessage::Begin { parent, flags, sender } => {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
sender
.send(
mdbx_result(unsafe {
ffi::mdbx_txn_begin_ex(
env.0,
parent.0,
flags,
&mut txn,
ptr::null_mut(),
)
})
.map(|_| TxnPtr(txn)),
)
.unwrap();
#[cfg(feature = "read-tx-timeouts")]
{
use crate::transaction::TransactionKind;
if flags == crate::transaction::RO::OPEN_FLAGS {
if let Some(read_transactions) = &read_transactions {
read_transactions.add_active(txn);
}
}
}
}
TxnManagerMessage::Abort { tx, sender } => {
#[cfg(feature = "read-tx-timeouts")]
if let Some(read_transactions) = &read_transactions {
read_transactions.remove_active(tx.0);
}
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
}
TxnManagerMessage::Commit { tx, sender } => {
#[cfg(feature = "read-tx-timeouts")]
if let Some(read_transactions) = &read_transactions {
read_transactions.remove_active(tx.0);
}
sender
.send({
let mut latency = CommitLatency::new();
mdbx_result(unsafe {
ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency())
})
.map(|v| (v, latency))
})
.unwrap();
}
},
Err(_) => return,
}
}
});
}
pub(crate) fn send_message(&self, message: TxnManagerMessage) {
self.sender.send(message).unwrap()
}
}
#[cfg(feature = "read-tx-timeouts")]
mod read_transactions {
use crate::{error::mdbx_result, txn_manager::TxnManager, Error};
use dashmap::{DashMap, DashSet};
use std::{
sync::Arc,
time::{Duration, Instant},
};
use tracing::{error, trace, warn};
const READ_TRANSACTIONS_CHECK_INTERVAL: Duration = Duration::from_secs(5);
impl TxnManager {
/// Sets the maximum duration that a read transaction can be open.
pub(crate) fn with_max_read_transaction_duration(
mut self,
duration: Duration,
) -> TxnManager {
let read_transactions = Arc::new(ReadTransactions::new(duration));
read_transactions.clone().start_monitor();
self.read_transactions = Some(read_transactions);
self
}
/// Adds a new transaction to the list of active read transactions.
pub(crate) fn add_active_read_transaction(&self, ptr: *mut ffi::MDBX_txn) {
if let Some(read_transactions) = &self.read_transactions {
read_transactions.add_active(ptr);
}
}
/// Removes a transaction from the list of active read transactions.
pub(crate) fn remove_active_read_transaction(
&self,
ptr: *mut ffi::MDBX_txn,
) -> Option<(usize, Instant)> {
self.read_transactions.as_ref()?.remove_active(ptr)
}
/// Removes a transaction from the list of aborted read transactions.
pub(crate) fn remove_aborted_read_transaction(
&self,
ptr: *mut ffi::MDBX_txn,
) -> Option<usize> {
self.read_transactions.as_ref()?.remove_aborted(ptr)
}
}
#[derive(Debug, Default)]
pub(super) struct ReadTransactions {
/// Maximum duration that a read transaction can be open until the
/// [ReadTransactions::start_monitor] aborts it.
max_duration: Duration,
/// List of currently active read transactions.
///
/// We store `usize` instead of a raw pointer as a key, because pointers are not
/// comparable. The time of transaction opening is stored as a value.
active: DashMap<usize, Instant>,
/// List of read transactions aborted by the [ReadTransactions::start_monitor].
/// We keep them until user tries to abort the transaction, so we're able to report a nice
/// [Error::ReadTransactionAborted] error.
///
/// We store `usize` instead of a raw pointer, because pointers are not comparable.
aborted: DashSet<usize>,
}
impl ReadTransactions {
pub(super) fn new(max_duration: Duration) -> Self {
Self { max_duration, ..Default::default() }
}
/// Adds a new transaction to the list of active read transactions.
pub(super) fn add_active(&self, ptr: *mut ffi::MDBX_txn) {
let _ = self.active.insert(ptr as usize, Instant::now());
}
/// Removes a transaction from the list of active read transactions.
pub(super) fn remove_active(&self, ptr: *mut ffi::MDBX_txn) -> Option<(usize, Instant)> {
self.active.remove(&(ptr as usize))
}
/// Adds a new transaction to the list of aborted read transactions.
pub(super) fn add_aborted(&self, ptr: *mut ffi::MDBX_txn) {
self.aborted.insert(ptr as usize);
}
/// Removes a transaction from the list of aborted read transactions.
pub(super) fn remove_aborted(&self, ptr: *mut ffi::MDBX_txn) -> Option<usize> {
self.aborted.remove(&(ptr as usize))
}
/// Spawns a new thread with [std::thread::spawn] that monitors the list of active read
/// transactions and aborts those that are open for longer than
/// `ReadTransactions.max_duration`.
///
/// Aborted transaction pointers are placed into the list of aborted read transactions, and
/// removed from this list by [crate::error::mdbx_result_with_tx_kind] when the user tries
/// to use it.
pub(super) fn start_monitor(self: Arc<Self>) {
std::thread::spawn(move || {
let mut aborted_active = Vec::new();
loop {
let now = Instant::now();
let mut max_active_transaction_duration = None;
// Iterate through active read transactions and abort those that's open for
// longer than `self.max_duration`.
for entry in self.active.iter() {
let (ptr, start) = entry.pair();
let duration = now - *start;
if duration > self.max_duration {
let ptr = *ptr as *mut ffi::MDBX_txn;
// Add the transaction to the list of aborted transactions, so further
// usages report the correct error when the transaction is closed.
self.add_aborted(ptr);
// Abort the transaction
let result = mdbx_result(unsafe { ffi::mdbx_txn_abort(ptr) });
// Add the transaction to `aborted_active`. We can't remove it instantly
// from the list of active transactions, because we iterate through it.
aborted_active.push((ptr, duration, result.err()));
} else {
max_active_transaction_duration = Some(
duration.max(max_active_transaction_duration.unwrap_or_default()),
);
}
}
// Walk through aborted transactions, and delete them from the list of active
// transactions.
for (ptr, open_duration, err) in aborted_active.iter().copied() {
// Try deleting the transaction from the list of active transactions.
let was_in_active = self.remove_active(ptr).is_some();
if let Some(err) = err {
// If there was an error when aborting the transaction, we need to
// remove it from the list of aborted transactions, because otherwise it
// will stay there forever.
self.remove_aborted(ptr);
if was_in_active && err != Error::BadSignature {
// If the transaction was in the list of active transactions and the
// error code is not `EBADSIGN`, then user didn't abort it.
error!(target: "libmdbx", ?err, ?open_duration, "Failed to abort the long-lived read transactions");
}
} else {
// Happy path, the transaction has been aborted by us with no errors.
warn!(target: "libmdbx", ?open_duration, "Long-lived read transactions has been aborted");
}
}
// Clear the list of aborted transactions, but not de-allocate the reserved
// capacity to save on further pushes.
aborted_active.clear();
if !self.active.is_empty() || !self.aborted.is_empty() {
trace!(
target: "libmdbx",
elapsed = ?now.elapsed(),
active = ?self.active.iter().map(|entry| {
let (ptr, start) = entry.pair();
(*ptr, start.elapsed())
}).collect::<Vec<_>>(),
aborted = ?self.aborted.iter().map(|entry| *entry).collect::<Vec<_>>(),
"Read transactions"
);
}
// Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until
// the closest deadline of an active read transaction
let duration_until_closest_deadline =
self.max_duration - max_active_transaction_duration.unwrap_or_default();
std::thread::sleep(
READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline),
);
}
});
}
}
#[cfg(test)]
mod tests {
use crate::{
txn_manager::read_transactions::READ_TRANSACTIONS_CHECK_INTERVAL, Environment, Error,
MaxReadTransactionDuration,
};
use std::{thread::sleep, time::Duration};
use tempfile::tempdir;
#[test]
fn txn_manager_read_transactions_duration_set() {
const MAX_DURATION: Duration = Duration::from_secs(1);
let dir = tempdir().unwrap();
let env = Environment::builder()
.set_max_read_transaction_duration(MaxReadTransactionDuration::Set(MAX_DURATION))
.open(dir.path())
.unwrap();
let read_transactions = env.txn_manager().read_transactions.as_ref().unwrap();
// Create a read-only transaction, successfully use it, close it by dropping.
{
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
tx.open_db(None).unwrap();
drop(tx);
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(!read_transactions.aborted.contains(&tx_ptr));
}
// Create a read-only transaction, successfully use it, close it by committing.
{
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
tx.open_db(None).unwrap();
tx.commit().unwrap();
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(!read_transactions.aborted.contains(&tx_ptr));
}
// Create a read-only transaction, wait until `MAX_DURATION` time is elapsed so the
// manager kills it, use it and observe the `Error::ReadTransactionAborted` error.
{
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
sleep(MAX_DURATION + READ_TRANSACTIONS_CHECK_INTERVAL);
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(read_transactions.aborted.contains(&tx_ptr));
assert_eq!(tx.open_db(None).err(), Some(Error::ReadTransactionAborted));
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(!read_transactions.aborted.contains(&tx_ptr));
}
}
#[test]
fn txn_manager_read_transactions_duration_unbounded() {
let dir = tempdir().unwrap();
let env = Environment::builder()
.set_max_read_transaction_duration(MaxReadTransactionDuration::Unbounded)
.open(dir.path())
.unwrap();
assert!(env.txn_manager().read_transactions.is_none());
let tx = env.begin_ro_txn().unwrap();
sleep(READ_TRANSACTIONS_CHECK_INTERVAL);
assert!(tx.commit().is_ok())
}
}
}

View File

@ -10,7 +10,7 @@ use crate::{
TransactionsProvider, WithdrawalsProvider, TransactionsProvider, WithdrawalsProvider,
}; };
use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv};
use reth_interfaces::{db::LogLevel, provider::ProviderResult, RethError, RethResult}; use reth_interfaces::{provider::ProviderResult, RethError, RethResult};
use reth_primitives::{ use reth_primitives::{
snapshot::HighestSnapshots, snapshot::HighestSnapshots,
stage::{StageCheckpoint, StageId}, stage::{StageCheckpoint, StageId},
@ -32,6 +32,7 @@ mod metrics;
mod provider; mod provider;
pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW};
use reth_db::mdbx::DatabaseArguments;
/// A common provider that fetches data from a database. /// A common provider that fetches data from a database.
/// ///
@ -69,10 +70,10 @@ impl<DB> ProviderFactory<DB> {
pub fn new_with_database_path<P: AsRef<Path>>( pub fn new_with_database_path<P: AsRef<Path>>(
path: P, path: P,
chain_spec: Arc<ChainSpec>, chain_spec: Arc<ChainSpec>,
log_level: Option<LogLevel>, args: DatabaseArguments,
) -> RethResult<ProviderFactory<DatabaseEnv>> { ) -> RethResult<ProviderFactory<DatabaseEnv>> {
Ok(ProviderFactory::<DatabaseEnv> { Ok(ProviderFactory::<DatabaseEnv> {
db: init_db(path, log_level).map_err(|e| RethError::Custom(e.to_string()))?, db: init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?,
chain_spec, chain_spec,
snapshot_provider: None, snapshot_provider: None,
}) })
@ -556,7 +557,7 @@ mod tests {
let factory = ProviderFactory::<DatabaseEnv>::new_with_database_path( let factory = ProviderFactory::<DatabaseEnv>::new_with_database_path(
tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(),
Arc::new(chain_spec), Arc::new(chain_spec),
None, Default::default(),
) )
.unwrap(); .unwrap();

View File

@ -17,8 +17,8 @@ use std::path::Path;
fn main() -> eyre::Result<()> { fn main() -> eyre::Result<()> {
// Opens a RO handle to the database file. // Opens a RO handle to the database file.
// TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of
// doing in 2 steps. // doing in 2 steps.
let db = open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?; let db = open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), Default::default())?;
// Instantiate a provider factory for Ethereum mainnet using the provided DB. // Instantiate a provider factory for Ethereum mainnet using the provided DB.
// TODO: Should the DB version include the spec so that you do not need to specify it here? // TODO: Should the DB version include the spec so that you do not need to specify it here?

View File

@ -35,7 +35,10 @@ pub mod myrpc_ext;
#[tokio::main] #[tokio::main]
async fn main() -> eyre::Result<()> { async fn main() -> eyre::Result<()> {
// 1. Setup the DB // 1. Setup the DB
let db = Arc::new(open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?); let db = Arc::new(open_db_read_only(
Path::new(&std::env::var("RETH_DB_PATH")?),
Default::default(),
)?);
let spec = Arc::new(ChainSpecBuilder::mainnet().build()); let spec = Arc::new(ChainSpecBuilder::mainnet().build());
let factory = ProviderFactory::new(db.clone(), spec.clone()); let factory = ProviderFactory::new(db.clone(), spec.clone());