mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat(storage, mdbx): transaction manager (#6126)
This commit is contained in:
2
Cargo.lock
generated
2
Cargo.lock
generated
@ -6256,6 +6256,7 @@ dependencies = [
|
||||
"bitflags 2.4.2",
|
||||
"byteorder",
|
||||
"criterion",
|
||||
"dashmap",
|
||||
"derive_more",
|
||||
"indexmap 2.1.0",
|
||||
"libc",
|
||||
@ -6267,6 +6268,7 @@ dependencies = [
|
||||
"reth-mdbx-sys",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@ -4,6 +4,7 @@ use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
|
||||
use alloy_chains::Chain;
|
||||
use reth_db::{
|
||||
init_db,
|
||||
mdbx::DatabaseArguments,
|
||||
test_utils::{create_test_rw_db, TempDatabase},
|
||||
DatabaseEnv,
|
||||
};
|
||||
@ -54,7 +55,10 @@ impl DatabaseBuilder {
|
||||
let db_path = data_dir.db_path();
|
||||
|
||||
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(db_path.clone(), log_level)?.with_metrics());
|
||||
let db = Arc::new(
|
||||
init_db(db_path.clone(), DatabaseArguments::default().log_level(log_level))?
|
||||
.with_metrics(),
|
||||
);
|
||||
Ok(DatabaseInstance::Real { db, data_dir })
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,12 +15,12 @@ use crate::{
|
||||
dirs::{DataDirPath, PlatformPath},
|
||||
};
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx,
|
||||
AccountChangeSet, AccountHistory, AccountsTrie, BlockBodyIndices, BlockOmmers,
|
||||
BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, HashedAccount, HashedStorage,
|
||||
HeaderNumbers, HeaderTD, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints,
|
||||
Receipts, StorageChangeSet, StorageHistory, StoragesTrie, SyncStage, SyncStageProgress, Tables,
|
||||
TransactionBlock, Transactions, TxHashNumber, TxSenders,
|
||||
cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db_read_only,
|
||||
table::Table, transaction::DbTx, AccountChangeSet, AccountHistory, AccountsTrie,
|
||||
BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv,
|
||||
HashedAccount, HashedStorage, HeaderNumbers, HeaderTD, Headers, PlainAccountState,
|
||||
PlainStorageState, PruneCheckpoints, Receipts, StorageChangeSet, StorageHistory, StoragesTrie,
|
||||
SyncStage, SyncStageProgress, Tables, TransactionBlock, Transactions, TxHashNumber, TxSenders,
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
@ -61,7 +61,10 @@ impl Command {
|
||||
pub fn execute(self, tool: &DbTool<'_, DatabaseEnv>) -> eyre::Result<()> {
|
||||
// open second db
|
||||
let second_db_path: PathBuf = self.secondary_datadir.join("db").into();
|
||||
let second_db = open_db_read_only(&second_db_path, self.second_db.log_level)?;
|
||||
let second_db = open_db_read_only(
|
||||
&second_db_path,
|
||||
DatabaseArguments::default().log_level(self.second_db.log_level),
|
||||
)?;
|
||||
|
||||
let tables = match self.table {
|
||||
Some(table) => vec![table],
|
||||
|
||||
@ -14,7 +14,9 @@ use eyre::WrapErr;
|
||||
use human_bytes::human_bytes;
|
||||
use reth_db::{
|
||||
database::Database,
|
||||
mdbx, open_db, open_db_read_only,
|
||||
mdbx,
|
||||
mdbx::DatabaseArguments,
|
||||
open_db, open_db_read_only,
|
||||
version::{get_db_version, DatabaseVersionError, DB_VERSION},
|
||||
Tables,
|
||||
};
|
||||
@ -102,7 +104,10 @@ impl Command {
|
||||
match self.command {
|
||||
// TODO: We'll need to add this on the DB trait.
|
||||
Subcommands::Stats { .. } => {
|
||||
let db = open_db_read_only(&db_path, self.db.log_level)?;
|
||||
let db = open_db_read_only(
|
||||
&db_path,
|
||||
DatabaseArguments::default().log_level(self.db.log_level),
|
||||
)?;
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
let mut stats_table = ComfyTable::new();
|
||||
stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
|
||||
@ -186,17 +191,26 @@ impl Command {
|
||||
println!("{stats_table}");
|
||||
}
|
||||
Subcommands::List(command) => {
|
||||
let db = open_db_read_only(&db_path, self.db.log_level)?;
|
||||
let db = open_db_read_only(
|
||||
&db_path,
|
||||
DatabaseArguments::default().log_level(self.db.log_level),
|
||||
)?;
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
command.execute(&tool)?;
|
||||
}
|
||||
Subcommands::Diff(command) => {
|
||||
let db = open_db_read_only(&db_path, self.db.log_level)?;
|
||||
let db = open_db_read_only(
|
||||
&db_path,
|
||||
DatabaseArguments::default().log_level(self.db.log_level),
|
||||
)?;
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
command.execute(&tool)?;
|
||||
}
|
||||
Subcommands::Get(command) => {
|
||||
let db = open_db_read_only(&db_path, self.db.log_level)?;
|
||||
let db = open_db_read_only(
|
||||
&db_path,
|
||||
DatabaseArguments::default().log_level(self.db.log_level),
|
||||
)?;
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
command.execute(&tool)?;
|
||||
}
|
||||
@ -216,12 +230,14 @@ impl Command {
|
||||
}
|
||||
}
|
||||
|
||||
let db = open_db(&db_path, self.db.log_level)?;
|
||||
let db =
|
||||
open_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?;
|
||||
let mut tool = DbTool::new(&db, self.chain.clone())?;
|
||||
tool.drop(db_path)?;
|
||||
}
|
||||
Subcommands::Clear(command) => {
|
||||
let db = open_db(&db_path, self.db.log_level)?;
|
||||
let db =
|
||||
open_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?;
|
||||
command.execute(&db)?;
|
||||
}
|
||||
Subcommands::Snapshot(command) => {
|
||||
|
||||
@ -3,7 +3,7 @@ use super::{
|
||||
Command,
|
||||
};
|
||||
use rand::{seq::SliceRandom, Rng};
|
||||
use reth_db::{open_db_read_only, snapshot::HeaderMask};
|
||||
use reth_db::{mdbx::DatabaseArguments, open_db_read_only, snapshot::HeaderMask};
|
||||
use reth_interfaces::db::LogLevel;
|
||||
use reth_primitives::{
|
||||
snapshot::{Compression, Filters, InclusionFilter, PerfectHashingFunction},
|
||||
@ -28,7 +28,9 @@ impl Command {
|
||||
inclusion_filter: InclusionFilter,
|
||||
phf: Option<PerfectHashingFunction>,
|
||||
) -> eyre::Result<()> {
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone());
|
||||
let db_args = DatabaseArguments::default().log_level(log_level);
|
||||
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
|
||||
let provider = factory.provider()?;
|
||||
let tip = provider.last_block_number()?;
|
||||
let block_range =
|
||||
@ -43,7 +45,7 @@ impl Command {
|
||||
let mut row_indexes = block_range.clone().collect::<Vec<_>>();
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let tx_range = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone())
|
||||
let tx_range = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
|
||||
.provider()?
|
||||
.transaction_range_by_block_range(block_range.clone())?;
|
||||
|
||||
@ -61,7 +63,7 @@ impl Command {
|
||||
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
|
||||
bench(
|
||||
bench_kind,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Headers,
|
||||
filters,
|
||||
compression,
|
||||
@ -92,7 +94,7 @@ impl Command {
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())];
|
||||
bench(
|
||||
BenchKind::RandomOne,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Headers,
|
||||
filters,
|
||||
compression,
|
||||
@ -113,14 +115,14 @@ impl Command {
|
||||
{
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
|
||||
let header_hash =
|
||||
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone())
|
||||
ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
|
||||
.header_by_number(num)?
|
||||
.ok_or(ProviderError::HeaderNotFound(num.into()))?
|
||||
.hash_slow();
|
||||
|
||||
bench(
|
||||
BenchKind::RandomHash,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Headers,
|
||||
filters,
|
||||
compression,
|
||||
|
||||
@ -2,7 +2,11 @@ use clap::{builder::RangedU64ValueParser, Parser};
|
||||
use human_bytes::human_bytes;
|
||||
use itertools::Itertools;
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use reth_db::{database::Database, open_db_read_only, DatabaseEnv};
|
||||
use reth_db::{
|
||||
database::Database,
|
||||
mdbx::{DatabaseArguments, MaxReadTransactionDuration},
|
||||
open_db_read_only, DatabaseEnv,
|
||||
};
|
||||
use reth_interfaces::db::LogLevel;
|
||||
use reth_nippy_jar::{NippyJar, NippyJarCursor};
|
||||
use reth_primitives::{
|
||||
@ -89,7 +93,11 @@ impl Command {
|
||||
);
|
||||
|
||||
{
|
||||
let db = open_db_read_only(db_path, None)?;
|
||||
let db = open_db_read_only(
|
||||
db_path,
|
||||
DatabaseArguments::default()
|
||||
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
|
||||
)?;
|
||||
let factory = Arc::new(ProviderFactory::new(db, chain.clone()));
|
||||
|
||||
if !self.only_bench {
|
||||
|
||||
@ -14,6 +14,7 @@ use reth_provider::{
|
||||
TransactionsProvider, TransactionsProviderExt,
|
||||
};
|
||||
|
||||
use reth_db::mdbx::DatabaseArguments;
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
@ -29,7 +30,9 @@ impl Command {
|
||||
inclusion_filter: InclusionFilter,
|
||||
phf: Option<PerfectHashingFunction>,
|
||||
) -> eyre::Result<()> {
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone());
|
||||
let db_args = DatabaseArguments::default().log_level(log_level);
|
||||
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
|
||||
let provider = factory.provider()?;
|
||||
let tip = provider.last_block_number()?;
|
||||
let block_range =
|
||||
@ -43,7 +46,7 @@ impl Command {
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let tx_range = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone())
|
||||
let tx_range = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
|
||||
.provider()?
|
||||
.transaction_range_by_block_range(block_range.clone())?;
|
||||
|
||||
@ -64,7 +67,7 @@ impl Command {
|
||||
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
|
||||
bench(
|
||||
bench_kind,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Receipts,
|
||||
filters,
|
||||
compression,
|
||||
@ -95,7 +98,7 @@ impl Command {
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())];
|
||||
bench(
|
||||
BenchKind::RandomOne,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Receipts,
|
||||
filters,
|
||||
compression,
|
||||
@ -115,15 +118,14 @@ impl Command {
|
||||
// BENCHMARK QUERYING A RANDOM RECEIPT BY HASH
|
||||
{
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
|
||||
let tx_hash =
|
||||
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone())
|
||||
.transaction_by_id(num)?
|
||||
.ok_or(ProviderError::ReceiptNotFound(num.into()))?
|
||||
.hash();
|
||||
let tx_hash = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
|
||||
.transaction_by_id(num)?
|
||||
.ok_or(ProviderError::ReceiptNotFound(num.into()))?
|
||||
.hash();
|
||||
|
||||
bench(
|
||||
BenchKind::RandomHash,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Receipts,
|
||||
filters,
|
||||
compression,
|
||||
|
||||
@ -14,6 +14,7 @@ use reth_provider::{
|
||||
TransactionsProvider, TransactionsProviderExt,
|
||||
};
|
||||
|
||||
use reth_db::mdbx::DatabaseArguments;
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
@ -29,7 +30,9 @@ impl Command {
|
||||
inclusion_filter: InclusionFilter,
|
||||
phf: Option<PerfectHashingFunction>,
|
||||
) -> eyre::Result<()> {
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone());
|
||||
let db_args = DatabaseArguments::default().log_level(log_level);
|
||||
|
||||
let factory = ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone());
|
||||
let provider = factory.provider()?;
|
||||
let tip = provider.last_block_number()?;
|
||||
let block_range =
|
||||
@ -61,7 +64,7 @@ impl Command {
|
||||
for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] {
|
||||
bench(
|
||||
bench_kind,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Transactions,
|
||||
filters,
|
||||
compression,
|
||||
@ -93,7 +96,7 @@ impl Command {
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())];
|
||||
bench(
|
||||
BenchKind::RandomOne,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Transactions,
|
||||
filters,
|
||||
compression,
|
||||
@ -115,14 +118,14 @@ impl Command {
|
||||
{
|
||||
let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64;
|
||||
let transaction_hash =
|
||||
ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone())
|
||||
ProviderFactory::new(open_db_read_only(db_path, db_args)?, chain.clone())
|
||||
.transaction_by_id(num)?
|
||||
.ok_or(ProviderError::TransactionNotFound(num.into()))?
|
||||
.hash();
|
||||
|
||||
bench(
|
||||
BenchKind::RandomHash,
|
||||
(open_db_read_only(db_path, log_level)?, chain.clone()),
|
||||
(open_db_read_only(db_path, db_args)?, chain.clone()),
|
||||
SnapshotSegment::Transactions,
|
||||
filters,
|
||||
compression,
|
||||
|
||||
@ -11,7 +11,7 @@ use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_blockchain_tree::{
|
||||
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
|
||||
};
|
||||
use reth_db::{init_db, DatabaseEnv};
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
|
||||
use reth_interfaces::{consensus::Consensus, RethResult};
|
||||
use reth_node_api::PayloadBuilderAttributes;
|
||||
use reth_payload_builder::database::CachedReads;
|
||||
@ -150,7 +150,8 @@ impl Command {
|
||||
fs::create_dir_all(&db_path)?;
|
||||
|
||||
// initialize the database
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain));
|
||||
|
||||
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
||||
|
||||
@ -16,7 +16,7 @@ use clap::Parser;
|
||||
use futures::{stream::select as stream_select, StreamExt};
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_db::{database::Database, init_db, DatabaseEnv};
|
||||
use reth_db::{database::Database, init_db, mdbx::DatabaseArguments, DatabaseEnv};
|
||||
use reth_downloaders::{
|
||||
bodies::bodies::BodiesDownloaderBuilder,
|
||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||
@ -204,7 +204,8 @@ impl Command {
|
||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||
let db_path = data_dir.db_path();
|
||||
fs::create_dir_all(&db_path)?;
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
|
||||
|
||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||
|
||||
@ -13,7 +13,7 @@ use crate::{
|
||||
use backon::{ConstantBuilder, Retryable};
|
||||
use clap::Parser;
|
||||
use reth_config::Config;
|
||||
use reth_db::{init_db, DatabaseEnv};
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
|
||||
use reth_interfaces::executor::BlockValidationError;
|
||||
use reth_network::NetworkHandle;
|
||||
use reth_network_api::NetworkInfo;
|
||||
@ -112,7 +112,8 @@ impl Command {
|
||||
fs::create_dir_all(&db_path)?;
|
||||
|
||||
// initialize the database
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
let provider = factory.provider()?;
|
||||
|
||||
|
||||
@ -14,7 +14,9 @@ use backon::{ConstantBuilder, Retryable};
|
||||
use clap::Parser;
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv};
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO, init_db, mdbx::DatabaseArguments, tables, transaction::DbTx, DatabaseEnv,
|
||||
};
|
||||
use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient};
|
||||
use reth_network::NetworkHandle;
|
||||
use reth_network_api::NetworkInfo;
|
||||
@ -121,7 +123,8 @@ impl Command {
|
||||
fs::create_dir_all(&db_path)?;
|
||||
|
||||
// initialize the database
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
let provider_rw = factory.provider_rw()?;
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ use reth_blockchain_tree::{
|
||||
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
|
||||
};
|
||||
use reth_config::Config;
|
||||
use reth_db::{init_db, DatabaseEnv};
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
|
||||
use reth_interfaces::consensus::Consensus;
|
||||
use reth_network::NetworkHandle;
|
||||
use reth_network_api::NetworkInfo;
|
||||
@ -133,7 +133,8 @@ impl Command {
|
||||
fs::create_dir_all(&db_path)?;
|
||||
|
||||
// Initialize the database
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
|
||||
|
||||
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
||||
|
||||
@ -10,7 +10,7 @@ use eyre::Context;
|
||||
use futures::{Stream, StreamExt};
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_db::{database::Database, init_db};
|
||||
use reth_db::{database::Database, init_db, mdbx::DatabaseArguments};
|
||||
use reth_downloaders::{
|
||||
bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient,
|
||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||
@ -89,7 +89,8 @@ impl ImportCommand {
|
||||
let db_path = data_dir.db_path();
|
||||
|
||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
info!(target: "reth::cli", "Database opened");
|
||||
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
|
||||
|
||||
|
||||
@ -1,19 +1,18 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::init::init_genesis;
|
||||
use clap::Parser;
|
||||
use reth_db::init_db;
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
DatabaseArgs,
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
init::init_genesis,
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments};
|
||||
use reth_primitives::ChainSpec;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
/// Initializes the database with the genesis block.
|
||||
#[derive(Debug, Parser)]
|
||||
@ -53,7 +52,8 @@ impl InitCommand {
|
||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||
let db_path = data_dir.db_path();
|
||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(&db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
info!(target: "reth::cli", "Database opened");
|
||||
|
||||
info!(target: "reth::cli", "Writing genesis block");
|
||||
|
||||
@ -12,7 +12,7 @@ use crate::{
|
||||
use backon::{ConstantBuilder, Retryable};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_config::Config;
|
||||
use reth_db::open_db;
|
||||
use reth_db::{mdbx::DatabaseArguments, open_db};
|
||||
use reth_discv4::NatResolver;
|
||||
use reth_interfaces::p2p::bodies::client::BodiesClient;
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord};
|
||||
@ -100,7 +100,10 @@ impl Command {
|
||||
/// Execute `p2p` command
|
||||
pub async fn execute(&self) -> eyre::Result<()> {
|
||||
let tempdir = tempfile::TempDir::new()?;
|
||||
let noop_db = Arc::new(open_db(&tempdir.into_path(), self.db.log_level)?);
|
||||
let noop_db = Arc::new(open_db(
|
||||
&tempdir.into_path(),
|
||||
DatabaseArguments::default().log_level(self.db.log_level),
|
||||
)?);
|
||||
|
||||
// add network name to data dir
|
||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||
|
||||
@ -48,7 +48,7 @@ impl Command {
|
||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||
let db_path = data_dir.db_path();
|
||||
fs::create_dir_all(&db_path)?;
|
||||
let db = Arc::new(init_db(db_path, None)?);
|
||||
let db = Arc::new(init_db(db_path, Default::default())?);
|
||||
|
||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||
init_genesis(db.clone(), self.chain.clone())?;
|
||||
|
||||
@ -10,7 +10,9 @@ use crate::{
|
||||
utils::DbTool,
|
||||
};
|
||||
use clap::Parser;
|
||||
use reth_db::{database::Database, open_db, tables, transaction::DbTxMut, DatabaseEnv};
|
||||
use reth_db::{
|
||||
database::Database, mdbx::DatabaseArguments, open_db, tables, transaction::DbTxMut, DatabaseEnv,
|
||||
};
|
||||
use reth_primitives::{fs, stage::StageId, ChainSpec};
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
@ -54,7 +56,8 @@ impl Command {
|
||||
let db_path = data_dir.db_path();
|
||||
fs::create_dir_all(&db_path)?;
|
||||
|
||||
let db = open_db(db_path.as_ref(), self.db.log_level)?;
|
||||
let db =
|
||||
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
|
||||
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ use execution::dump_execution_stage;
|
||||
|
||||
mod merkle;
|
||||
use merkle::dump_merkle_stage;
|
||||
use reth_db::mdbx::DatabaseArguments;
|
||||
|
||||
/// `reth dump-stage` command
|
||||
#[derive(Debug, Parser)]
|
||||
@ -101,7 +102,8 @@ impl Command {
|
||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||
let db_path = data_dir.db_path();
|
||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
info!(target: "reth::cli", "Database opened");
|
||||
|
||||
let tool = DbTool::new(&db, self.chain.clone())?;
|
||||
@ -137,7 +139,7 @@ pub(crate) fn setup<DB: Database>(
|
||||
|
||||
info!(target: "reth::cli", ?output_db, "Creating separate db");
|
||||
|
||||
let output_db = init_db(output_db, None)?;
|
||||
let output_db = init_db(output_db, Default::default())?;
|
||||
|
||||
output_db.update(|tx| {
|
||||
tx.import_table_with_range::<tables::BlockBodyIndices, _>(
|
||||
|
||||
@ -15,7 +15,7 @@ use crate::{
|
||||
use clap::Parser;
|
||||
use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_db::init_db;
|
||||
use reth_db::{init_db, mdbx::DatabaseArguments};
|
||||
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
|
||||
|
||||
use reth_primitives::ChainSpec;
|
||||
@ -123,7 +123,8 @@ impl Command {
|
||||
let db_path = data_dir.db_path();
|
||||
|
||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||
let db =
|
||||
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
|
||||
info!(target: "reth::cli", "Database opened");
|
||||
|
||||
let factory = ProviderFactory::new(Arc::clone(&db), self.chain.clone());
|
||||
|
||||
@ -1,11 +1,5 @@
|
||||
//! Unwinding a certain block range
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx};
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec};
|
||||
use reth_provider::{BlockExecutionWriter, ProviderFactory};
|
||||
use std::{ops::RangeInclusive, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||
@ -13,6 +7,15 @@ use crate::{
|
||||
},
|
||||
dirs::{DataDirPath, MaybePlatformPath},
|
||||
};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db::{
|
||||
cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db, tables,
|
||||
transaction::DbTx,
|
||||
};
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec};
|
||||
use reth_provider::{BlockExecutionWriter, ProviderFactory};
|
||||
use std::{ops::RangeInclusive, sync::Arc};
|
||||
|
||||
/// `reth stage unwind` command
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
@ -56,7 +59,8 @@ impl Command {
|
||||
eyre::bail!("Database {db_path:?} does not exist.")
|
||||
}
|
||||
|
||||
let db = open_db(db_path.as_ref(), self.db.log_level)?;
|
||||
let db =
|
||||
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
|
||||
|
||||
let range = self.command.unwind_range(&db)?;
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ workspace = true
|
||||
reth-primitives.workspace = true
|
||||
reth-interfaces.workspace = true
|
||||
reth-codecs.workspace = true
|
||||
reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed"] }
|
||||
reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] }
|
||||
reth-nippy-jar.workspace = true
|
||||
reth-tracing.workspace = true
|
||||
|
||||
|
||||
@ -12,7 +12,8 @@ use metrics::{gauge, Label};
|
||||
use once_cell::sync::Lazy;
|
||||
use reth_interfaces::db::LogLevel;
|
||||
use reth_libmdbx::{
|
||||
DatabaseFlags, Environment, EnvironmentFlags, Geometry, Mode, PageSize, SyncMode, RO, RW,
|
||||
DatabaseFlags, Environment, EnvironmentFlags, Geometry, MaxReadTransactionDuration, Mode,
|
||||
PageSize, SyncMode, RO, RW,
|
||||
};
|
||||
use reth_tracing::tracing::error;
|
||||
use std::{ops::Deref, path::Path};
|
||||
@ -54,6 +55,32 @@ pub enum DatabaseEnvKind {
|
||||
RW,
|
||||
}
|
||||
|
||||
/// Arguments for database initialization.
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
pub struct DatabaseArguments {
|
||||
/// Database log level. If [None], the default value is used.
|
||||
log_level: Option<LogLevel>,
|
||||
/// Maximum duration of a read transaction. If [None], the default value is used.
|
||||
max_read_transaction_duration: Option<MaxReadTransactionDuration>,
|
||||
}
|
||||
|
||||
impl DatabaseArguments {
|
||||
/// Set the log level.
|
||||
pub fn log_level(mut self, log_level: Option<LogLevel>) -> Self {
|
||||
self.log_level = log_level;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the maximum duration of a read transaction.
|
||||
pub fn max_read_transaction_duration(
|
||||
mut self,
|
||||
max_read_transaction_duration: Option<MaxReadTransactionDuration>,
|
||||
) -> Self {
|
||||
self.max_read_transaction_duration = max_read_transaction_duration;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for the libmdbx environment: [Environment]
|
||||
#[derive(Debug)]
|
||||
pub struct DatabaseEnv {
|
||||
@ -164,7 +191,7 @@ impl DatabaseEnv {
|
||||
pub fn open(
|
||||
path: &Path,
|
||||
kind: DatabaseEnvKind,
|
||||
log_level: Option<LogLevel>,
|
||||
args: DatabaseArguments,
|
||||
) -> Result<DatabaseEnv, DatabaseError> {
|
||||
let mut inner_env = Environment::builder();
|
||||
|
||||
@ -250,7 +277,7 @@ impl DatabaseEnv {
|
||||
// https://github.com/paradigmxyz/reth/blob/fa2b9b685ed9787636d962f4366caf34a9186e66/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c#L16017.
|
||||
inner_env.set_rp_augment_limit(256 * 1024);
|
||||
|
||||
if let Some(log_level) = log_level {
|
||||
if let Some(log_level) = args.log_level {
|
||||
// Levels higher than [LogLevel::Notice] require libmdbx built with `MDBX_DEBUG` option.
|
||||
let is_log_level_available = if cfg!(debug_assertions) {
|
||||
true
|
||||
@ -276,6 +303,10 @@ impl DatabaseEnv {
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(max_read_transaction_duration) = args.max_read_transaction_duration {
|
||||
inner_env.set_max_read_transaction_duration(max_read_transaction_duration);
|
||||
}
|
||||
|
||||
let env = DatabaseEnv {
|
||||
inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?,
|
||||
with_metrics: false,
|
||||
@ -346,7 +377,8 @@ mod tests {
|
||||
|
||||
/// Create database for testing with specified path
|
||||
fn create_test_db_with_path(kind: DatabaseEnvKind, path: &Path) -> DatabaseEnv {
|
||||
let env = DatabaseEnv::open(path, kind, None).expect(ERROR_DB_CREATION);
|
||||
let env =
|
||||
DatabaseEnv::open(path, kind, DatabaseArguments::default()).expect(ERROR_DB_CREATION);
|
||||
env.create_tables().expect(ERROR_TABLE_CREATION);
|
||||
env
|
||||
}
|
||||
@ -971,7 +1003,8 @@ mod tests {
|
||||
assert_eq!(result.expect(ERROR_RETURN_VALUE), 200);
|
||||
}
|
||||
|
||||
let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, None).expect(ERROR_DB_CREATION);
|
||||
let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, Default::default())
|
||||
.expect(ERROR_DB_CREATION);
|
||||
|
||||
// GET
|
||||
let result =
|
||||
|
||||
@ -87,13 +87,13 @@ pub use utils::is_database_empty;
|
||||
#[cfg(feature = "mdbx")]
|
||||
pub use mdbx::{DatabaseEnv, DatabaseEnvKind};
|
||||
|
||||
use crate::mdbx::DatabaseArguments;
|
||||
use eyre::WrapErr;
|
||||
use reth_interfaces::db::LogLevel;
|
||||
use std::path::Path;
|
||||
|
||||
/// Opens up an existing database or creates a new one at the specified path. Creates tables if
|
||||
/// necessary. Read/Write mode.
|
||||
pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> {
|
||||
pub fn init_db<P: AsRef<Path>>(path: P, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
|
||||
use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError};
|
||||
|
||||
let rpath = path.as_ref();
|
||||
@ -110,7 +110,7 @@ pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Re
|
||||
}
|
||||
#[cfg(feature = "mdbx")]
|
||||
{
|
||||
let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, log_level)?;
|
||||
let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?;
|
||||
db.create_tables()?;
|
||||
Ok(db)
|
||||
}
|
||||
@ -121,10 +121,10 @@ pub fn init_db<P: AsRef<Path>>(path: P, log_level: Option<LogLevel>) -> eyre::Re
|
||||
}
|
||||
|
||||
/// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing.
|
||||
pub fn open_db_read_only(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> {
|
||||
pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
|
||||
#[cfg(feature = "mdbx")]
|
||||
{
|
||||
DatabaseEnv::open(path, DatabaseEnvKind::RO, log_level)
|
||||
DatabaseEnv::open(path, DatabaseEnvKind::RO, args)
|
||||
.with_context(|| format!("Could not open database at path: {}", path.display()))
|
||||
}
|
||||
#[cfg(not(feature = "mdbx"))]
|
||||
@ -135,10 +135,10 @@ pub fn open_db_read_only(path: &Path, log_level: Option<LogLevel>) -> eyre::Resu
|
||||
|
||||
/// Opens up an existing database. Read/Write mode with WriteMap enabled. It doesn't create it or
|
||||
/// create tables if missing.
|
||||
pub fn open_db(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<DatabaseEnv> {
|
||||
pub fn open_db(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
|
||||
#[cfg(feature = "mdbx")]
|
||||
{
|
||||
DatabaseEnv::open(path, DatabaseEnvKind::RW, log_level)
|
||||
DatabaseEnv::open(path, DatabaseEnvKind::RW, args)
|
||||
.with_context(|| format!("Could not open database at path: {}", path.display()))
|
||||
}
|
||||
#[cfg(not(feature = "mdbx"))]
|
||||
@ -155,6 +155,7 @@ pub mod test_utils {
|
||||
database::Database,
|
||||
database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics},
|
||||
};
|
||||
use reth_libmdbx::MaxReadTransactionDuration;
|
||||
use reth_primitives::fs;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
@ -235,7 +236,12 @@ pub mod test_utils {
|
||||
let path = tempdir_path();
|
||||
let emsg = format!("{}: {:?}", ERROR_DB_CREATION, path);
|
||||
|
||||
let db = init_db(&path, None).expect(&emsg);
|
||||
let db = init_db(
|
||||
&path,
|
||||
DatabaseArguments::default()
|
||||
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
|
||||
)
|
||||
.expect(&emsg);
|
||||
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
@ -243,17 +249,25 @@ pub mod test_utils {
|
||||
/// Create read/write database for testing
|
||||
pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<TempDatabase<DatabaseEnv>> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let db = init_db(path.as_path(), None).expect(ERROR_DB_CREATION);
|
||||
let db = init_db(
|
||||
path.as_path(),
|
||||
DatabaseArguments::default()
|
||||
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
|
||||
)
|
||||
.expect(ERROR_DB_CREATION);
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
|
||||
/// Create read only database for testing
|
||||
pub fn create_test_ro_db() -> Arc<TempDatabase<DatabaseEnv>> {
|
||||
let args = DatabaseArguments::default()
|
||||
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
|
||||
|
||||
let path = tempdir_path();
|
||||
{
|
||||
init_db(path.as_path(), None).expect(ERROR_DB_CREATION);
|
||||
init_db(path.as_path(), args).expect(ERROR_DB_CREATION);
|
||||
}
|
||||
let db = open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN);
|
||||
let db = open_db_read_only(path.as_path(), args).expect(ERROR_DB_OPEN);
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
}
|
||||
@ -262,9 +276,11 @@ pub mod test_utils {
|
||||
mod tests {
|
||||
use crate::{
|
||||
init_db,
|
||||
mdbx::DatabaseArguments,
|
||||
version::{db_version_file_path, DatabaseVersionError},
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use reth_libmdbx::MaxReadTransactionDuration;
|
||||
use reth_primitives::fs;
|
||||
use tempfile::tempdir;
|
||||
|
||||
@ -272,22 +288,25 @@ mod tests {
|
||||
fn db_version() {
|
||||
let path = tempdir().unwrap();
|
||||
|
||||
let args = DatabaseArguments::default()
|
||||
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
|
||||
|
||||
// Database is empty
|
||||
{
|
||||
let db = init_db(&path, None);
|
||||
let db = init_db(&path, args);
|
||||
assert_matches!(db, Ok(_));
|
||||
}
|
||||
|
||||
// Database is not empty, current version is the same as in the file
|
||||
{
|
||||
let db = init_db(&path, None);
|
||||
let db = init_db(&path, args);
|
||||
assert_matches!(db, Ok(_));
|
||||
}
|
||||
|
||||
// Database is not empty, version file is malformed
|
||||
{
|
||||
fs::write(path.path().join(db_version_file_path(&path)), "invalid-version").unwrap();
|
||||
let db = init_db(&path, None);
|
||||
let db = init_db(&path, args);
|
||||
assert!(db.is_err());
|
||||
assert_matches!(
|
||||
db.unwrap_err().downcast_ref::<DatabaseVersionError>(),
|
||||
@ -298,7 +317,7 @@ mod tests {
|
||||
// Database is not empty, version file contains not matching version
|
||||
{
|
||||
fs::write(path.path().join(db_version_file_path(&path)), "0").unwrap();
|
||||
let db = init_db(&path, None);
|
||||
let db = init_db(&path, args);
|
||||
assert!(db.is_err());
|
||||
assert_matches!(
|
||||
db.unwrap_err().downcast_ref::<DatabaseVersionError>(),
|
||||
|
||||
@ -22,6 +22,8 @@ indexmap = "2"
|
||||
libc = "0.2"
|
||||
parking_lot.workspace = true
|
||||
thiserror.workspace = true
|
||||
dashmap = { version = "5.5.3", features = ["inline"], optional = true }
|
||||
tracing = { workspace = true, optional = true }
|
||||
|
||||
ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" }
|
||||
|
||||
@ -31,6 +33,7 @@ libffi = "3.2.0"
|
||||
[features]
|
||||
default = []
|
||||
return-borrowed = []
|
||||
read-tx-timeouts = ["dashmap", "dashmap/inline", "tracing"]
|
||||
|
||||
[dev-dependencies]
|
||||
pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] }
|
||||
|
||||
@ -41,7 +41,7 @@ impl<'tx> TableObject for Cow<'tx, [u8]> {
|
||||
|
||||
#[cfg(not(feature = "return-borrowed"))]
|
||||
{
|
||||
let is_dirty = (!K::ONLY_CLEAN) &&
|
||||
let is_dirty = (!K::IS_READ_ONLY) &&
|
||||
crate::error::mdbx_result(ffi::mdbx_is_dirty(_txn, data_val.iov_base))?;
|
||||
|
||||
Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) })
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
error::{mdbx_result, Error, Result},
|
||||
error::{mdbx_result, mdbx_result_with_tx_kind, Error, Result},
|
||||
flags::*,
|
||||
mdbx_try_optional,
|
||||
transaction::{TransactionKind, RW},
|
||||
@ -30,7 +30,11 @@ where
|
||||
pub(crate) fn new(txn: Transaction<K>, dbi: ffi::MDBX_dbi) -> Result<Self> {
|
||||
let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut();
|
||||
unsafe {
|
||||
mdbx_result(txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)))?;
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)),
|
||||
txn.txn(),
|
||||
txn.env().txn_manager(),
|
||||
)?;
|
||||
}
|
||||
Ok(Self { txn, cursor })
|
||||
}
|
||||
@ -43,7 +47,7 @@ where
|
||||
|
||||
let s = Self { txn: other.txn.clone(), cursor };
|
||||
|
||||
mdbx_result(res)?;
|
||||
mdbx_result_with_tx_kind::<K>(res, s.txn.txn(), s.txn.env().txn_manager())?;
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
@ -91,12 +95,11 @@ where
|
||||
let key_ptr = key_val.iov_base;
|
||||
let data_ptr = data_val.iov_base;
|
||||
self.txn.txn_execute(|txn| {
|
||||
let v = mdbx_result(ffi::mdbx_cursor_get(
|
||||
self.cursor,
|
||||
&mut key_val,
|
||||
&mut data_val,
|
||||
op,
|
||||
))?;
|
||||
let v = mdbx_result_with_tx_kind::<K>(
|
||||
ffi::mdbx_cursor_get(self.cursor, &mut key_val, &mut data_val, op),
|
||||
txn,
|
||||
self.txn.env().txn_manager(),
|
||||
)?;
|
||||
assert_ne!(data_ptr, data_val.iov_base);
|
||||
let key_out = {
|
||||
// MDBX wrote in new key
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
error::{mdbx_result, Result},
|
||||
error::{mdbx_result_with_tx_kind, Result},
|
||||
transaction::TransactionKind,
|
||||
Environment, Transaction,
|
||||
};
|
||||
@ -30,9 +30,13 @@ impl Database {
|
||||
let c_name = name.map(|n| CString::new(n).unwrap());
|
||||
let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() };
|
||||
let mut dbi: ffi::MDBX_dbi = 0;
|
||||
mdbx_result(
|
||||
txn.txn_execute(|txn| unsafe { ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi) }),
|
||||
)?;
|
||||
txn.txn_execute(|txn_ptr| {
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
unsafe { ffi::mdbx_dbi_open(txn_ptr, name_ptr, flags, &mut dbi) },
|
||||
txn_ptr,
|
||||
txn.env().txn_manager(),
|
||||
)
|
||||
})?;
|
||||
Ok(Self::new_from_ptr(dbi, txn.env().clone()))
|
||||
}
|
||||
|
||||
|
||||
@ -2,8 +2,9 @@ use crate::{
|
||||
database::Database,
|
||||
error::{mdbx_result, Error, Result},
|
||||
flags::EnvironmentFlags,
|
||||
transaction::{CommitLatency, RO, RW},
|
||||
Mode, Transaction, TransactionKind,
|
||||
transaction::{RO, RW},
|
||||
txn_manager::{TxnManager, TxnManagerMessage, TxnPtr},
|
||||
Transaction, TransactionKind,
|
||||
};
|
||||
use byteorder::{ByteOrder, NativeEndian};
|
||||
use mem::size_of;
|
||||
@ -15,14 +16,15 @@ use std::{
|
||||
ops::{Bound, RangeBounds},
|
||||
path::Path,
|
||||
ptr,
|
||||
sync::{
|
||||
mpsc::{sync_channel, SyncSender},
|
||||
Arc,
|
||||
},
|
||||
sync::{mpsc::sync_channel, Arc},
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
/// The default maximum duration of a read transaction.
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
const DEFAULT_MAX_READ_TRANSACTION_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
/// An environment supports multiple databases, all residing in the same shared-memory map.
|
||||
///
|
||||
/// Accessing the environment is thread-safe.
|
||||
@ -50,6 +52,8 @@ impl Environment {
|
||||
kind: Default::default(),
|
||||
#[cfg(not(windows))]
|
||||
handle_slow_readers: None,
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
max_read_transaction_duration: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,32 +69,22 @@ impl Environment {
|
||||
self.inner.env_kind
|
||||
}
|
||||
|
||||
/// Returns true if the environment was opened in [Mode::ReadWrite] mode.
|
||||
/// Returns true if the environment was opened in [crate::Mode::ReadWrite] mode.
|
||||
#[inline]
|
||||
pub fn is_read_write(&self) -> bool {
|
||||
self.inner.txn_manager.is_some()
|
||||
self.inner.env_kind.is_write_map()
|
||||
}
|
||||
|
||||
/// Returns true if the environment was opened in [Mode::ReadOnly] mode.
|
||||
/// Returns true if the environment was opened in [crate::Mode::ReadOnly] mode.
|
||||
#[inline]
|
||||
pub fn is_read_only(&self) -> bool {
|
||||
self.inner.txn_manager.is_none()
|
||||
!self.inner.env_kind.is_write_map()
|
||||
}
|
||||
|
||||
/// Returns the manager that handles transaction messages.
|
||||
///
|
||||
/// Requires [Mode::ReadWrite] and returns None otherwise.
|
||||
/// Returns the transaction manager.
|
||||
#[inline]
|
||||
pub(crate) fn txn_manager(&self) -> Option<&SyncSender<TxnManagerMessage>> {
|
||||
self.inner.txn_manager.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the manager that handles transaction messages.
|
||||
///
|
||||
/// Requires [Mode::ReadWrite] and returns None otherwise.
|
||||
#[inline]
|
||||
pub(crate) fn ensure_txn_manager(&self) -> Result<&SyncSender<TxnManagerMessage>> {
|
||||
self.txn_manager().ok_or(Error::WriteTransactionUnsupportedInReadOnlyMode)
|
||||
pub(crate) fn txn_manager(&self) -> &TxnManager {
|
||||
&self.inner.txn_manager
|
||||
}
|
||||
|
||||
/// Create a read-only transaction for use with the environment.
|
||||
@ -102,16 +96,13 @@ impl Environment {
|
||||
/// Create a read-write transaction for use with the environment. This method will block while
|
||||
/// there are any other read-write transactions open on the environment.
|
||||
pub fn begin_rw_txn(&self) -> Result<Transaction<RW>> {
|
||||
let sender = self.ensure_txn_manager()?;
|
||||
let txn = loop {
|
||||
let (tx, rx) = sync_channel(0);
|
||||
sender
|
||||
.send(TxnManagerMessage::Begin {
|
||||
parent: TxnPtr(ptr::null_mut()),
|
||||
flags: RW::OPEN_FLAGS,
|
||||
sender: tx,
|
||||
})
|
||||
.unwrap();
|
||||
self.txn_manager().send_message(TxnManagerMessage::Begin {
|
||||
parent: TxnPtr(ptr::null_mut()),
|
||||
flags: RW::OPEN_FLAGS,
|
||||
sender: tx,
|
||||
});
|
||||
let res = rx.recv().unwrap();
|
||||
if let Err(Error::Busy) = &res {
|
||||
sleep(Duration::from_millis(250));
|
||||
@ -235,10 +226,8 @@ struct EnvironmentInner {
|
||||
env: *mut ffi::MDBX_env,
|
||||
/// Whether the environment was opened as WRITEMAP.
|
||||
env_kind: EnvironmentKind,
|
||||
/// the sender half of the transaction manager channel
|
||||
///
|
||||
/// Only set if the environment was opened in [Mode::ReadWrite] mode.
|
||||
txn_manager: Option<SyncSender<TxnManagerMessage>>,
|
||||
/// Transaction manager
|
||||
txn_manager: TxnManager,
|
||||
}
|
||||
|
||||
impl Drop for EnvironmentInner {
|
||||
@ -265,12 +254,12 @@ pub enum EnvironmentKind {
|
||||
Default,
|
||||
/// Open the environment as mdbx-WRITEMAP.
|
||||
/// Use a writeable memory map unless the environment is opened as MDBX_RDONLY
|
||||
/// ([Mode::ReadOnly]).
|
||||
/// ([crate::Mode::ReadOnly]).
|
||||
///
|
||||
/// All data will be mapped into memory in the read-write mode [Mode::ReadWrite]. This offers a
|
||||
/// significant performance benefit, since the data will be modified directly in mapped
|
||||
/// memory and then flushed to disk by single system call, without any memory management
|
||||
/// nor copying.
|
||||
/// All data will be mapped into memory in the read-write mode [crate::Mode::ReadWrite]. This
|
||||
/// offers a significant performance benefit, since the data will be modified directly in
|
||||
/// mapped memory and then flushed to disk by single system call, without any memory
|
||||
/// management nor copying.
|
||||
///
|
||||
/// This mode is incompatible with nested transactions.
|
||||
WriteMap,
|
||||
@ -292,22 +281,11 @@ impl EnvironmentKind {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
|
||||
unsafe impl Send for TxnPtr {}
|
||||
unsafe impl Sync for TxnPtr {}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct EnvPtr(pub(crate) *mut ffi::MDBX_env);
|
||||
unsafe impl Send for EnvPtr {}
|
||||
unsafe impl Sync for EnvPtr {}
|
||||
|
||||
pub(crate) enum TxnManagerMessage {
|
||||
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
|
||||
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
|
||||
Commit { tx: TxnPtr, sender: SyncSender<Result<(bool, CommitLatency)>> },
|
||||
}
|
||||
|
||||
/// Environment statistics.
|
||||
///
|
||||
/// Contains information about the size and layout of an MDBX environment or database.
|
||||
@ -597,6 +575,10 @@ pub struct EnvironmentBuilder {
|
||||
kind: EnvironmentKind,
|
||||
#[cfg(not(windows))]
|
||||
handle_slow_readers: Option<HandleSlowReadersCallback>,
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
/// The maximum duration of a read transaction. If [None], but the `read-tx-timeout` feature is
|
||||
/// enabled, the default value of [DEFAULT_MAX_READ_TRANSACTION_DURATION] is used.
|
||||
max_read_transaction_duration: Option<read_transactions::MaxReadTransactionDuration>,
|
||||
}
|
||||
|
||||
impl EnvironmentBuilder {
|
||||
@ -718,54 +700,24 @@ impl EnvironmentBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
let mut env = EnvironmentInner { env, txn_manager: None, env_kind: self.kind };
|
||||
#[cfg(not(feature = "read-tx-timeouts"))]
|
||||
let txn_manager = TxnManager::new(EnvPtr(env));
|
||||
|
||||
if let Mode::ReadWrite { .. } = self.flags.mode {
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
||||
let e = EnvPtr(env.env);
|
||||
std::thread::spawn(move || loop {
|
||||
match rx.recv() {
|
||||
Ok(msg) => match msg {
|
||||
TxnManagerMessage::Begin { parent, flags, sender } => {
|
||||
#[allow(clippy::redundant_locals)]
|
||||
let e = e;
|
||||
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
|
||||
sender
|
||||
.send(
|
||||
mdbx_result(unsafe {
|
||||
ffi::mdbx_txn_begin_ex(
|
||||
e.0,
|
||||
parent.0,
|
||||
flags,
|
||||
&mut txn,
|
||||
ptr::null_mut(),
|
||||
)
|
||||
})
|
||||
.map(|_| TxnPtr(txn)),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
TxnManagerMessage::Abort { tx, sender } => {
|
||||
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
|
||||
}
|
||||
TxnManagerMessage::Commit { tx, sender } => {
|
||||
sender
|
||||
.send({
|
||||
let mut latency = CommitLatency::new();
|
||||
mdbx_result(unsafe {
|
||||
ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency())
|
||||
})
|
||||
.map(|v| (v, latency))
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
},
|
||||
Err(_) => return,
|
||||
}
|
||||
});
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
let txn_manager = {
|
||||
let mut txn_manager = TxnManager::new(EnvPtr(env));
|
||||
if let crate::MaxReadTransactionDuration::Set(duration) = self
|
||||
.max_read_transaction_duration
|
||||
.unwrap_or(read_transactions::MaxReadTransactionDuration::Set(
|
||||
DEFAULT_MAX_READ_TRANSACTION_DURATION,
|
||||
))
|
||||
{
|
||||
txn_manager = txn_manager.with_max_read_transaction_duration(duration);
|
||||
};
|
||||
txn_manager
|
||||
};
|
||||
|
||||
env.txn_manager = Some(tx);
|
||||
}
|
||||
let env = EnvironmentInner { env, txn_manager, env_kind: self.kind };
|
||||
|
||||
Ok(Environment { inner: Arc::new(env) })
|
||||
}
|
||||
@ -861,16 +813,53 @@ impl EnvironmentBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self {
|
||||
self.log_level = Some(log_level);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the Handle-Slow-Readers callback. See [HandleSlowReadersCallback] for more information.
|
||||
#[cfg(not(windows))]
|
||||
pub fn set_handle_slow_readers(&mut self, hsr: HandleSlowReadersCallback) -> &mut Self {
|
||||
self.handle_slow_readers = Some(hsr);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self {
|
||||
self.log_level = Some(log_level);
|
||||
self
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
pub(crate) mod read_transactions {
|
||||
use crate::EnvironmentBuilder;
|
||||
use std::time::Duration;
|
||||
|
||||
/// The maximum duration of a read transaction.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
pub enum MaxReadTransactionDuration {
|
||||
/// The maximum duration of a read transaction is unbounded.
|
||||
Unbounded,
|
||||
/// The maximum duration of a read transaction is set to the given duration.
|
||||
Set(Duration),
|
||||
}
|
||||
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
impl MaxReadTransactionDuration {
|
||||
pub fn as_duration(&self) -> Option<Duration> {
|
||||
match self {
|
||||
MaxReadTransactionDuration::Unbounded => None,
|
||||
MaxReadTransactionDuration::Set(duration) => Some(*duration),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentBuilder {
|
||||
/// Set the maximum time a read-only transaction can be open.
|
||||
pub fn set_max_read_transaction_duration(
|
||||
&mut self,
|
||||
max_read_transaction_duration: MaxReadTransactionDuration,
|
||||
) -> &mut Self {
|
||||
self.max_read_transaction_duration = Some(max_read_transaction_duration);
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use crate::{txn_manager::TxnManager, TransactionKind};
|
||||
use libc::c_int;
|
||||
use std::result;
|
||||
|
||||
@ -5,7 +6,7 @@ use std::result;
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
/// An MDBX error kind.
|
||||
#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
/// Key/data pair already exists.
|
||||
#[error("key/data pair already exists")]
|
||||
@ -117,6 +118,8 @@ pub enum Error {
|
||||
/// [Mode::ReadOnly](crate::flags::Mode::ReadOnly), write transactions can't be opened.
|
||||
#[error("write transactions are not supported in read-only mode")]
|
||||
WriteTransactionUnsupportedInReadOnlyMode,
|
||||
#[error("read transaction has been aborted by the transaction manager")]
|
||||
ReadTransactionAborted,
|
||||
/// Unknown error code.
|
||||
#[error("unknown error code")]
|
||||
Other(i32),
|
||||
@ -190,7 +193,7 @@ impl Error {
|
||||
Error::DecodeErrorLenDiff | Error::DecodeError => ffi::MDBX_EINVAL,
|
||||
Error::Access => ffi::MDBX_EACCESS,
|
||||
Error::TooLarge => ffi::MDBX_TOO_LARGE,
|
||||
Error::BadSignature => ffi::MDBX_EBADSIGN,
|
||||
Error::BadSignature | Error::ReadTransactionAborted => ffi::MDBX_EBADSIGN,
|
||||
Error::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS,
|
||||
Error::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS,
|
||||
Error::Other(err_code) => *err_code,
|
||||
@ -213,6 +216,33 @@ pub(crate) fn mdbx_result(err_code: c_int) -> Result<bool> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
#[inline]
|
||||
pub(crate) fn mdbx_result_with_tx_kind<K: TransactionKind>(
|
||||
err_code: c_int,
|
||||
txn: *mut ffi::MDBX_txn,
|
||||
txn_manager: &TxnManager,
|
||||
) -> Result<bool> {
|
||||
if K::IS_READ_ONLY &&
|
||||
err_code == ffi::MDBX_EBADSIGN &&
|
||||
txn_manager.remove_aborted_read_transaction(txn).is_some()
|
||||
{
|
||||
return Err(Error::ReadTransactionAborted);
|
||||
}
|
||||
|
||||
mdbx_result(err_code)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "read-tx-timeouts"))]
|
||||
#[inline]
|
||||
pub(crate) fn mdbx_result_with_tx_kind<K: TransactionKind>(
|
||||
err_code: c_int,
|
||||
_txn: *mut ffi::MDBX_txn,
|
||||
_txn_manager: &TxnManager,
|
||||
) -> Result<bool> {
|
||||
mdbx_result(err_code)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! mdbx_try_optional {
|
||||
($expr:expr) => {{
|
||||
|
||||
@ -23,6 +23,9 @@ pub mod ffi {
|
||||
pub use ffi::{MDBX_dbi as DBI, MDBX_log_level_t as LogLevel};
|
||||
}
|
||||
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
pub use crate::environment::read_transactions::MaxReadTransactionDuration;
|
||||
|
||||
mod codec;
|
||||
mod cursor;
|
||||
mod database;
|
||||
@ -30,6 +33,7 @@ mod environment;
|
||||
mod error;
|
||||
mod flags;
|
||||
mod transaction;
|
||||
mod txn_manager;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils {
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
use crate::{
|
||||
database::Database,
|
||||
environment::{Environment, TxnManagerMessage, TxnPtr},
|
||||
error::{mdbx_result, Result},
|
||||
environment::Environment,
|
||||
error::{mdbx_result, mdbx_result_with_tx_kind, Result},
|
||||
flags::{DatabaseFlags, WriteFlags},
|
||||
txn_manager::{TxnManagerMessage, TxnPtr},
|
||||
Cursor, Error, Stat, TableObject,
|
||||
};
|
||||
use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE};
|
||||
@ -28,12 +29,10 @@ mod private {
|
||||
}
|
||||
|
||||
pub trait TransactionKind: private::Sealed + Send + Sync + Debug + 'static {
|
||||
#[doc(hidden)]
|
||||
const ONLY_CLEAN: bool;
|
||||
|
||||
#[doc(hidden)]
|
||||
const OPEN_FLAGS: MDBX_txn_flags_t;
|
||||
|
||||
/// Convenience flag for distinguishing between read-only and read-write transactions.
|
||||
#[doc(hidden)]
|
||||
const IS_READ_ONLY: bool;
|
||||
}
|
||||
@ -47,12 +46,10 @@ pub struct RO;
|
||||
pub struct RW;
|
||||
|
||||
impl TransactionKind for RO {
|
||||
const ONLY_CLEAN: bool = true;
|
||||
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY;
|
||||
const IS_READ_ONLY: bool = true;
|
||||
}
|
||||
impl TransactionKind for RW {
|
||||
const ONLY_CLEAN: bool = false;
|
||||
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE;
|
||||
const IS_READ_ONLY: bool = false;
|
||||
}
|
||||
@ -74,18 +71,27 @@ where
|
||||
pub(crate) fn new(env: Environment) -> Result<Self> {
|
||||
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
|
||||
unsafe {
|
||||
mdbx_result(ffi::mdbx_txn_begin_ex(
|
||||
env.env_ptr(),
|
||||
ptr::null_mut(),
|
||||
K::OPEN_FLAGS,
|
||||
&mut txn,
|
||||
ptr::null_mut(),
|
||||
))?;
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
ffi::mdbx_txn_begin_ex(
|
||||
env.env_ptr(),
|
||||
ptr::null_mut(),
|
||||
K::OPEN_FLAGS,
|
||||
&mut txn,
|
||||
ptr::null_mut(),
|
||||
),
|
||||
txn,
|
||||
env.txn_manager(),
|
||||
)?;
|
||||
Ok(Self::new_from_ptr(env, txn))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_from_ptr(env: Environment, txn: *mut ffi::MDBX_txn) -> Self {
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
if K::IS_READ_ONLY {
|
||||
env.txn_manager().add_active_read_transaction(txn)
|
||||
}
|
||||
|
||||
let inner = TransactionInner {
|
||||
txn: TransactionPtr::new(txn),
|
||||
primed_dbis: Mutex::new(IndexSet::new()),
|
||||
@ -93,6 +99,7 @@ where
|
||||
env,
|
||||
_marker: Default::default(),
|
||||
};
|
||||
|
||||
Self { inner: Arc::new(inner) }
|
||||
}
|
||||
|
||||
@ -179,22 +186,26 @@ where
|
||||
pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, CommitLatency, Vec<Database>)> {
|
||||
let result = {
|
||||
let result = self.txn_execute(|txn| {
|
||||
if K::ONLY_CLEAN {
|
||||
if K::IS_READ_ONLY {
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
self.env().txn_manager().remove_active_read_transaction(txn);
|
||||
|
||||
let mut latency = CommitLatency::new();
|
||||
mdbx_result(unsafe {
|
||||
ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency())
|
||||
})
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
unsafe { ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency()) },
|
||||
txn,
|
||||
self.env().txn_manager(),
|
||||
)
|
||||
.map(|v| (v, latency))
|
||||
} else {
|
||||
let (sender, rx) = sync_channel(0);
|
||||
self.env()
|
||||
.ensure_txn_manager()
|
||||
.unwrap()
|
||||
.send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender })
|
||||
.unwrap();
|
||||
.txn_manager()
|
||||
.send_message(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender });
|
||||
rx.recv().unwrap()
|
||||
}
|
||||
});
|
||||
|
||||
self.inner.set_committed();
|
||||
result
|
||||
};
|
||||
@ -231,9 +242,13 @@ where
|
||||
pub fn db_flags(&self, db: &Database) -> Result<DatabaseFlags> {
|
||||
let mut flags: c_uint = 0;
|
||||
unsafe {
|
||||
mdbx_result(self.txn_execute(|txn| {
|
||||
ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut())
|
||||
}))?;
|
||||
self.txn_execute(|txn| {
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()),
|
||||
txn,
|
||||
self.env().txn_manager(),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
// The types are not the same on Windows. Great!
|
||||
@ -250,9 +265,13 @@ where
|
||||
pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result<Stat> {
|
||||
unsafe {
|
||||
let mut stat = Stat::new();
|
||||
mdbx_result(self.txn_execute(|txn| {
|
||||
ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::<Stat>())
|
||||
}))?;
|
||||
self.txn_execute(|txn| {
|
||||
mdbx_result_with_tx_kind::<K>(
|
||||
ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::<Stat>()),
|
||||
txn,
|
||||
self.env().txn_manager(),
|
||||
)
|
||||
})?;
|
||||
Ok(stat)
|
||||
}
|
||||
}
|
||||
@ -330,17 +349,18 @@ where
|
||||
fn drop(&mut self) {
|
||||
self.txn_execute(|txn| {
|
||||
if !self.has_committed() {
|
||||
if K::ONLY_CLEAN {
|
||||
if K::IS_READ_ONLY {
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
self.env.txn_manager().remove_active_read_transaction(txn);
|
||||
|
||||
unsafe {
|
||||
ffi::mdbx_txn_abort(txn);
|
||||
}
|
||||
} else {
|
||||
let (sender, rx) = sync_channel(0);
|
||||
self.env
|
||||
.ensure_txn_manager()
|
||||
.unwrap()
|
||||
.send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender })
|
||||
.unwrap();
|
||||
.txn_manager()
|
||||
.send_message(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender });
|
||||
rx.recv().unwrap().unwrap();
|
||||
}
|
||||
}
|
||||
@ -489,7 +509,11 @@ impl Transaction<RO> {
|
||||
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
||||
/// BEFORE calling this function.
|
||||
pub unsafe fn close_db(&self, db: Database) -> Result<()> {
|
||||
mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?;
|
||||
mdbx_result_with_tx_kind::<RO>(
|
||||
ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()),
|
||||
self.txn(),
|
||||
self.env().txn_manager(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -503,15 +527,11 @@ impl Transaction<RW> {
|
||||
}
|
||||
self.txn_execute(|txn| {
|
||||
let (tx, rx) = sync_channel(0);
|
||||
self.env()
|
||||
.ensure_txn_manager()
|
||||
.unwrap()
|
||||
.send(TxnManagerMessage::Begin {
|
||||
parent: TxnPtr(txn),
|
||||
flags: RW::OPEN_FLAGS,
|
||||
sender: tx,
|
||||
})
|
||||
.unwrap();
|
||||
self.env().txn_manager().send_message(TxnManagerMessage::Begin {
|
||||
parent: TxnPtr(txn),
|
||||
flags: RW::OPEN_FLAGS,
|
||||
sender: tx,
|
||||
});
|
||||
|
||||
rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env().clone(), ptr.0))
|
||||
})
|
||||
|
||||
389
crates/storage/libmdbx-rs/src/txn_manager.rs
Normal file
389
crates/storage/libmdbx-rs/src/txn_manager.rs
Normal file
@ -0,0 +1,389 @@
|
||||
use crate::{
|
||||
environment::EnvPtr,
|
||||
error::{mdbx_result, Result},
|
||||
CommitLatency,
|
||||
};
|
||||
use std::{
|
||||
ptr,
|
||||
sync::mpsc::{sync_channel, Receiver, SyncSender},
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
|
||||
unsafe impl Send for TxnPtr {}
|
||||
unsafe impl Sync for TxnPtr {}
|
||||
|
||||
pub(crate) enum TxnManagerMessage {
|
||||
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
|
||||
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
|
||||
Commit { tx: TxnPtr, sender: SyncSender<Result<(bool, CommitLatency)>> },
|
||||
}
|
||||
|
||||
/// Manages transactions by doing two things:
|
||||
/// - Opening, aborting, and committing transactions using [TxnManager::send_message] with the
|
||||
/// corresponding [TxnManagerMessage]
|
||||
/// - Aborting long-lived read transactions (if the `read-tx-timeouts` feature is enabled and
|
||||
/// `TxnManager::with_max_read_transaction_duration` is called)
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TxnManager {
|
||||
sender: SyncSender<TxnManagerMessage>,
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
read_transactions: Option<std::sync::Arc<read_transactions::ReadTransactions>>,
|
||||
}
|
||||
|
||||
impl TxnManager {
|
||||
pub(crate) fn new(env: EnvPtr) -> Self {
|
||||
let (tx, rx) = sync_channel(0);
|
||||
let txn_manager = Self {
|
||||
sender: tx,
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
read_transactions: None,
|
||||
};
|
||||
|
||||
txn_manager.start_message_listener(env, rx);
|
||||
|
||||
txn_manager
|
||||
}
|
||||
|
||||
/// Spawns a new thread with [std::thread::spawn] that listens to incoming [TxnManagerMessage]
|
||||
/// messages, executes an FFI function, and returns the result on the provided channel.
|
||||
///
|
||||
/// - [TxnManagerMessage::Begin] opens a new transaction with [ffi::mdbx_txn_begin_ex]
|
||||
/// - [TxnManagerMessage::Abort] aborts a transaction with [ffi::mdbx_txn_abort]
|
||||
/// - [TxnManagerMessage::Commit] commits a transaction with [ffi::mdbx_txn_commit_ex]
|
||||
fn start_message_listener(&self, env: EnvPtr, rx: Receiver<TxnManagerMessage>) {
|
||||
let read_transactions = self.read_transactions.clone();
|
||||
std::thread::spawn(move || {
|
||||
#[allow(clippy::redundant_locals)]
|
||||
let env = env;
|
||||
loop {
|
||||
match rx.recv() {
|
||||
Ok(msg) => match msg {
|
||||
TxnManagerMessage::Begin { parent, flags, sender } => {
|
||||
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
|
||||
sender
|
||||
.send(
|
||||
mdbx_result(unsafe {
|
||||
ffi::mdbx_txn_begin_ex(
|
||||
env.0,
|
||||
parent.0,
|
||||
flags,
|
||||
&mut txn,
|
||||
ptr::null_mut(),
|
||||
)
|
||||
})
|
||||
.map(|_| TxnPtr(txn)),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
{
|
||||
use crate::transaction::TransactionKind;
|
||||
|
||||
if flags == crate::transaction::RO::OPEN_FLAGS {
|
||||
if let Some(read_transactions) = &read_transactions {
|
||||
read_transactions.add_active(txn);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TxnManagerMessage::Abort { tx, sender } => {
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
if let Some(read_transactions) = &read_transactions {
|
||||
read_transactions.remove_active(tx.0);
|
||||
}
|
||||
|
||||
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
|
||||
}
|
||||
TxnManagerMessage::Commit { tx, sender } => {
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
if let Some(read_transactions) = &read_transactions {
|
||||
read_transactions.remove_active(tx.0);
|
||||
}
|
||||
|
||||
sender
|
||||
.send({
|
||||
let mut latency = CommitLatency::new();
|
||||
mdbx_result(unsafe {
|
||||
ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency())
|
||||
})
|
||||
.map(|v| (v, latency))
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
},
|
||||
Err(_) => return,
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn send_message(&self, message: TxnManagerMessage) {
|
||||
self.sender.send(message).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "read-tx-timeouts")]
|
||||
mod read_transactions {
|
||||
use crate::{error::mdbx_result, txn_manager::TxnManager, Error};
|
||||
use dashmap::{DashMap, DashSet};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::{error, trace, warn};
|
||||
|
||||
const READ_TRANSACTIONS_CHECK_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
impl TxnManager {
|
||||
/// Sets the maximum duration that a read transaction can be open.
|
||||
pub(crate) fn with_max_read_transaction_duration(
|
||||
mut self,
|
||||
duration: Duration,
|
||||
) -> TxnManager {
|
||||
let read_transactions = Arc::new(ReadTransactions::new(duration));
|
||||
read_transactions.clone().start_monitor();
|
||||
self.read_transactions = Some(read_transactions);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds a new transaction to the list of active read transactions.
|
||||
pub(crate) fn add_active_read_transaction(&self, ptr: *mut ffi::MDBX_txn) {
|
||||
if let Some(read_transactions) = &self.read_transactions {
|
||||
read_transactions.add_active(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes a transaction from the list of active read transactions.
|
||||
pub(crate) fn remove_active_read_transaction(
|
||||
&self,
|
||||
ptr: *mut ffi::MDBX_txn,
|
||||
) -> Option<(usize, Instant)> {
|
||||
self.read_transactions.as_ref()?.remove_active(ptr)
|
||||
}
|
||||
|
||||
/// Removes a transaction from the list of aborted read transactions.
|
||||
pub(crate) fn remove_aborted_read_transaction(
|
||||
&self,
|
||||
ptr: *mut ffi::MDBX_txn,
|
||||
) -> Option<usize> {
|
||||
self.read_transactions.as_ref()?.remove_aborted(ptr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(super) struct ReadTransactions {
|
||||
/// Maximum duration that a read transaction can be open until the
|
||||
/// [ReadTransactions::start_monitor] aborts it.
|
||||
max_duration: Duration,
|
||||
/// List of currently active read transactions.
|
||||
///
|
||||
/// We store `usize` instead of a raw pointer as a key, because pointers are not
|
||||
/// comparable. The time of transaction opening is stored as a value.
|
||||
active: DashMap<usize, Instant>,
|
||||
/// List of read transactions aborted by the [ReadTransactions::start_monitor].
|
||||
/// We keep them until user tries to abort the transaction, so we're able to report a nice
|
||||
/// [Error::ReadTransactionAborted] error.
|
||||
///
|
||||
/// We store `usize` instead of a raw pointer, because pointers are not comparable.
|
||||
aborted: DashSet<usize>,
|
||||
}
|
||||
|
||||
impl ReadTransactions {
|
||||
pub(super) fn new(max_duration: Duration) -> Self {
|
||||
Self { max_duration, ..Default::default() }
|
||||
}
|
||||
|
||||
/// Adds a new transaction to the list of active read transactions.
|
||||
pub(super) fn add_active(&self, ptr: *mut ffi::MDBX_txn) {
|
||||
let _ = self.active.insert(ptr as usize, Instant::now());
|
||||
}
|
||||
|
||||
/// Removes a transaction from the list of active read transactions.
|
||||
pub(super) fn remove_active(&self, ptr: *mut ffi::MDBX_txn) -> Option<(usize, Instant)> {
|
||||
self.active.remove(&(ptr as usize))
|
||||
}
|
||||
|
||||
/// Adds a new transaction to the list of aborted read transactions.
|
||||
pub(super) fn add_aborted(&self, ptr: *mut ffi::MDBX_txn) {
|
||||
self.aborted.insert(ptr as usize);
|
||||
}
|
||||
|
||||
/// Removes a transaction from the list of aborted read transactions.
|
||||
pub(super) fn remove_aborted(&self, ptr: *mut ffi::MDBX_txn) -> Option<usize> {
|
||||
self.aborted.remove(&(ptr as usize))
|
||||
}
|
||||
|
||||
/// Spawns a new thread with [std::thread::spawn] that monitors the list of active read
|
||||
/// transactions and aborts those that are open for longer than
|
||||
/// `ReadTransactions.max_duration`.
|
||||
///
|
||||
/// Aborted transaction pointers are placed into the list of aborted read transactions, and
|
||||
/// removed from this list by [crate::error::mdbx_result_with_tx_kind] when the user tries
|
||||
/// to use it.
|
||||
pub(super) fn start_monitor(self: Arc<Self>) {
|
||||
std::thread::spawn(move || {
|
||||
let mut aborted_active = Vec::new();
|
||||
|
||||
loop {
|
||||
let now = Instant::now();
|
||||
let mut max_active_transaction_duration = None;
|
||||
|
||||
// Iterate through active read transactions and abort those that's open for
|
||||
// longer than `self.max_duration`.
|
||||
for entry in self.active.iter() {
|
||||
let (ptr, start) = entry.pair();
|
||||
let duration = now - *start;
|
||||
|
||||
if duration > self.max_duration {
|
||||
let ptr = *ptr as *mut ffi::MDBX_txn;
|
||||
|
||||
// Add the transaction to the list of aborted transactions, so further
|
||||
// usages report the correct error when the transaction is closed.
|
||||
self.add_aborted(ptr);
|
||||
|
||||
// Abort the transaction
|
||||
let result = mdbx_result(unsafe { ffi::mdbx_txn_abort(ptr) });
|
||||
|
||||
// Add the transaction to `aborted_active`. We can't remove it instantly
|
||||
// from the list of active transactions, because we iterate through it.
|
||||
aborted_active.push((ptr, duration, result.err()));
|
||||
} else {
|
||||
max_active_transaction_duration = Some(
|
||||
duration.max(max_active_transaction_duration.unwrap_or_default()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Walk through aborted transactions, and delete them from the list of active
|
||||
// transactions.
|
||||
for (ptr, open_duration, err) in aborted_active.iter().copied() {
|
||||
// Try deleting the transaction from the list of active transactions.
|
||||
let was_in_active = self.remove_active(ptr).is_some();
|
||||
if let Some(err) = err {
|
||||
// If there was an error when aborting the transaction, we need to
|
||||
// remove it from the list of aborted transactions, because otherwise it
|
||||
// will stay there forever.
|
||||
self.remove_aborted(ptr);
|
||||
if was_in_active && err != Error::BadSignature {
|
||||
// If the transaction was in the list of active transactions and the
|
||||
// error code is not `EBADSIGN`, then user didn't abort it.
|
||||
error!(target: "libmdbx", ?err, ?open_duration, "Failed to abort the long-lived read transactions");
|
||||
}
|
||||
} else {
|
||||
// Happy path, the transaction has been aborted by us with no errors.
|
||||
warn!(target: "libmdbx", ?open_duration, "Long-lived read transactions has been aborted");
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the list of aborted transactions, but not de-allocate the reserved
|
||||
// capacity to save on further pushes.
|
||||
aborted_active.clear();
|
||||
|
||||
if !self.active.is_empty() || !self.aborted.is_empty() {
|
||||
trace!(
|
||||
target: "libmdbx",
|
||||
elapsed = ?now.elapsed(),
|
||||
active = ?self.active.iter().map(|entry| {
|
||||
let (ptr, start) = entry.pair();
|
||||
(*ptr, start.elapsed())
|
||||
}).collect::<Vec<_>>(),
|
||||
aborted = ?self.aborted.iter().map(|entry| *entry).collect::<Vec<_>>(),
|
||||
"Read transactions"
|
||||
);
|
||||
}
|
||||
|
||||
// Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until
|
||||
// the closest deadline of an active read transaction
|
||||
let duration_until_closest_deadline =
|
||||
self.max_duration - max_active_transaction_duration.unwrap_or_default();
|
||||
std::thread::sleep(
|
||||
READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
txn_manager::read_transactions::READ_TRANSACTIONS_CHECK_INTERVAL, Environment, Error,
|
||||
MaxReadTransactionDuration,
|
||||
};
|
||||
use std::{thread::sleep, time::Duration};
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn txn_manager_read_transactions_duration_set() {
|
||||
const MAX_DURATION: Duration = Duration::from_secs(1);
|
||||
|
||||
let dir = tempdir().unwrap();
|
||||
let env = Environment::builder()
|
||||
.set_max_read_transaction_duration(MaxReadTransactionDuration::Set(MAX_DURATION))
|
||||
.open(dir.path())
|
||||
.unwrap();
|
||||
|
||||
let read_transactions = env.txn_manager().read_transactions.as_ref().unwrap();
|
||||
|
||||
// Create a read-only transaction, successfully use it, close it by dropping.
|
||||
{
|
||||
let tx = env.begin_ro_txn().unwrap();
|
||||
let tx_ptr = tx.txn() as usize;
|
||||
assert!(read_transactions.active.contains_key(&tx_ptr));
|
||||
|
||||
tx.open_db(None).unwrap();
|
||||
drop(tx);
|
||||
|
||||
assert!(!read_transactions.active.contains_key(&tx_ptr));
|
||||
assert!(!read_transactions.aborted.contains(&tx_ptr));
|
||||
}
|
||||
|
||||
// Create a read-only transaction, successfully use it, close it by committing.
|
||||
{
|
||||
let tx = env.begin_ro_txn().unwrap();
|
||||
let tx_ptr = tx.txn() as usize;
|
||||
assert!(read_transactions.active.contains_key(&tx_ptr));
|
||||
|
||||
tx.open_db(None).unwrap();
|
||||
tx.commit().unwrap();
|
||||
|
||||
assert!(!read_transactions.active.contains_key(&tx_ptr));
|
||||
assert!(!read_transactions.aborted.contains(&tx_ptr));
|
||||
}
|
||||
|
||||
// Create a read-only transaction, wait until `MAX_DURATION` time is elapsed so the
|
||||
// manager kills it, use it and observe the `Error::ReadTransactionAborted` error.
|
||||
{
|
||||
let tx = env.begin_ro_txn().unwrap();
|
||||
let tx_ptr = tx.txn() as usize;
|
||||
assert!(read_transactions.active.contains_key(&tx_ptr));
|
||||
|
||||
sleep(MAX_DURATION + READ_TRANSACTIONS_CHECK_INTERVAL);
|
||||
|
||||
assert!(!read_transactions.active.contains_key(&tx_ptr));
|
||||
assert!(read_transactions.aborted.contains(&tx_ptr));
|
||||
|
||||
assert_eq!(tx.open_db(None).err(), Some(Error::ReadTransactionAborted));
|
||||
assert!(!read_transactions.active.contains_key(&tx_ptr));
|
||||
assert!(!read_transactions.aborted.contains(&tx_ptr));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn txn_manager_read_transactions_duration_unbounded() {
|
||||
let dir = tempdir().unwrap();
|
||||
let env = Environment::builder()
|
||||
.set_max_read_transaction_duration(MaxReadTransactionDuration::Unbounded)
|
||||
.open(dir.path())
|
||||
.unwrap();
|
||||
|
||||
assert!(env.txn_manager().read_transactions.is_none());
|
||||
|
||||
let tx = env.begin_ro_txn().unwrap();
|
||||
sleep(READ_TRANSACTIONS_CHECK_INTERVAL);
|
||||
assert!(tx.commit().is_ok())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10,7 +10,7 @@ use crate::{
|
||||
TransactionsProvider, WithdrawalsProvider,
|
||||
};
|
||||
use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv};
|
||||
use reth_interfaces::{db::LogLevel, provider::ProviderResult, RethError, RethResult};
|
||||
use reth_interfaces::{provider::ProviderResult, RethError, RethResult};
|
||||
use reth_primitives::{
|
||||
snapshot::HighestSnapshots,
|
||||
stage::{StageCheckpoint, StageId},
|
||||
@ -32,6 +32,7 @@ mod metrics;
|
||||
mod provider;
|
||||
|
||||
pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW};
|
||||
use reth_db::mdbx::DatabaseArguments;
|
||||
|
||||
/// A common provider that fetches data from a database.
|
||||
///
|
||||
@ -69,10 +70,10 @@ impl<DB> ProviderFactory<DB> {
|
||||
pub fn new_with_database_path<P: AsRef<Path>>(
|
||||
path: P,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
log_level: Option<LogLevel>,
|
||||
args: DatabaseArguments,
|
||||
) -> RethResult<ProviderFactory<DatabaseEnv>> {
|
||||
Ok(ProviderFactory::<DatabaseEnv> {
|
||||
db: init_db(path, log_level).map_err(|e| RethError::Custom(e.to_string()))?,
|
||||
db: init_db(path, args).map_err(|e| RethError::Custom(e.to_string()))?,
|
||||
chain_spec,
|
||||
snapshot_provider: None,
|
||||
})
|
||||
@ -556,7 +557,7 @@ mod tests {
|
||||
let factory = ProviderFactory::<DatabaseEnv>::new_with_database_path(
|
||||
tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(),
|
||||
Arc::new(chain_spec),
|
||||
None,
|
||||
Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
@ -17,8 +17,8 @@ use std::path::Path;
|
||||
fn main() -> eyre::Result<()> {
|
||||
// Opens a RO handle to the database file.
|
||||
// TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of
|
||||
// doing in 2 steps.
|
||||
let db = open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?;
|
||||
// doing in 2 steps.
|
||||
let db = open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), Default::default())?;
|
||||
|
||||
// Instantiate a provider factory for Ethereum mainnet using the provided DB.
|
||||
// TODO: Should the DB version include the spec so that you do not need to specify it here?
|
||||
|
||||
@ -35,7 +35,10 @@ pub mod myrpc_ext;
|
||||
#[tokio::main]
|
||||
async fn main() -> eyre::Result<()> {
|
||||
// 1. Setup the DB
|
||||
let db = Arc::new(open_db_read_only(Path::new(&std::env::var("RETH_DB_PATH")?), None)?);
|
||||
let db = Arc::new(open_db_read_only(
|
||||
Path::new(&std::env::var("RETH_DB_PATH")?),
|
||||
Default::default(),
|
||||
)?);
|
||||
let spec = Arc::new(ChainSpecBuilder::mainnet().build());
|
||||
let factory = ProviderFactory::new(db.clone(), spec.clone());
|
||||
|
||||
|
||||
Reference in New Issue
Block a user