feat(db): record client version history (#7119)

This commit is contained in:
Roman Krasiuk
2024-03-13 13:07:13 +01:00
committed by GitHub
parent 884fd71a01
commit 610731ced8
32 changed files with 366 additions and 119 deletions

1
Cargo.lock generated
View File

@ -7029,6 +7029,7 @@ dependencies = [
"futures",
"jsonrpsee",
"reth",
"reth-db",
"reth-node-ethereum",
"tokio",
]

View File

@ -5,13 +5,13 @@ use crate::{
};
use clap::Parser;
use reth_db::{
cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db_read_only,
table::Table, transaction::DbTx, AccountChangeSets, AccountsHistory, AccountsTrie,
BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv,
HashedAccounts, HashedStorages, HeaderNumbers, HeaderTerminalDifficulties, Headers,
PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, StageCheckpointProgresses,
StageCheckpoints, StorageChangeSets, StoragesHistory, StoragesTrie, Tables, TransactionBlocks,
TransactionHashNumbers, TransactionSenders, Transactions,
cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx,
AccountChangeSets, AccountsHistory, AccountsTrie, BlockBodyIndices, BlockOmmers,
BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, HashedAccounts, HashedStorages,
HeaderNumbers, HeaderTerminalDifficulties, Headers, PlainAccountState, PlainStorageState,
PruneCheckpoints, Receipts, StageCheckpointProgresses, StageCheckpoints, StorageChangeSets,
StoragesHistory, StoragesTrie, Tables, TransactionBlocks, TransactionHashNumbers,
TransactionSenders, Transactions, VersionHistory,
};
use std::{
collections::HashMap,
@ -60,10 +60,7 @@ impl Command {
pub fn execute(self, tool: &DbTool<DatabaseEnv>) -> eyre::Result<()> {
// open second db
let second_db_path: PathBuf = self.secondary_datadir.join("db").into();
let second_db = open_db_read_only(
&second_db_path,
DatabaseArguments::default().log_level(self.second_db.log_level),
)?;
let second_db = open_db_read_only(&second_db_path, self.second_db.database_args())?;
let tables = match &self.table {
Some(table) => std::slice::from_ref(table),
@ -148,6 +145,9 @@ impl Command {
Tables::PruneCheckpoints => {
find_diffs::<PruneCheckpoints>(primary_tx, secondary_tx, output_dir)?
}
Tables::VersionHistory => {
find_diffs::<VersionHistory>(primary_tx, secondary_tx, output_dir)?
}
};
}

View File

@ -93,12 +93,13 @@ impl TableViewer<()> for ListTableViewer<'_> {
let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?;
let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?;
let total_entries = stats.entries();
if self.args.skip > total_entries - 1 {
let final_entry_idx = total_entries.saturating_sub(1);
if self.args.skip > final_entry_idx {
error!(
target: "reth::cli",
"Start index {start} is greater than the final entry index ({final_entry_idx}) in the table {table}",
start = self.args.skip,
final_entry_idx = total_entries - 1,
final_entry_idx = final_entry_idx,
table = self.args.table.name()
);
return Ok(())

View File

@ -10,7 +10,6 @@ use crate::{
};
use clap::{Parser, Subcommand};
use reth_db::{
mdbx::DatabaseArguments,
open_db, open_db_read_only,
version::{get_db_version, DatabaseVersionError, DB_VERSION},
};
@ -96,13 +95,13 @@ impl Command {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_log_level = DatabaseArguments::default().log_level(self.db.log_level);
let db_args = self.db.database_args();
let static_files_path = data_dir.static_files_path();
match self.command {
// TODO: We'll need to add this on the DB trait.
Subcommands::Stats(command) => {
let db = open_db_read_only(&db_path, db_log_level)?;
let db = open_db_read_only(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path)?;
@ -110,7 +109,7 @@ impl Command {
command.execute(data_dir, &tool)?;
}
Subcommands::List(command) => {
let db = open_db_read_only(&db_path, db_log_level)?;
let db = open_db_read_only(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path)?;
@ -118,7 +117,7 @@ impl Command {
command.execute(&tool)?;
}
Subcommands::Diff(command) => {
let db = open_db_read_only(&db_path, db_log_level)?;
let db = open_db_read_only(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path)?;
@ -126,7 +125,7 @@ impl Command {
command.execute(&tool)?;
}
Subcommands::Get(command) => {
let db = open_db_read_only(&db_path, db_log_level)?;
let db = open_db_read_only(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path)?;
@ -149,7 +148,7 @@ impl Command {
}
}
let db = open_db(&db_path, db_log_level)?;
let db = open_db(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path.clone())?;
@ -157,14 +156,14 @@ impl Command {
tool.drop(db_path, static_files_path)?;
}
Subcommands::Clear(command) => {
let db = open_db(&db_path, db_log_level)?;
let db = open_db(&db_path, db_args)?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), static_files_path)?;
command.execute(provider_factory)?;
}
Subcommands::CreateStaticFiles(command) => {
command.execute(data_dir, self.db.log_level, self.chain.clone())?;
command.execute(data_dir, self.db.database_args(), self.chain.clone())?;
}
Subcommands::Version => {
let local_db_version = match get_db_version(&db_path) {

View File

@ -7,7 +7,6 @@ use reth_db::{
mdbx::{DatabaseArguments, MaxReadTransactionDuration},
open_db_read_only, DatabaseEnv,
};
use reth_interfaces::db::LogLevel;
use reth_nippy_jar::{NippyJar, NippyJarCursor};
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_primitives::{
@ -83,7 +82,7 @@ impl Command {
pub fn execute(
self,
data_dir: ChainPath<DataDirPath>,
log_level: Option<LogLevel>,
db_args: DatabaseArguments,
chain: Arc<ChainSpec>,
) -> eyre::Result<()> {
let all_combinations = self
@ -98,9 +97,7 @@ impl Command {
let db = open_db_read_only(
data_dir.db_path().as_path(),
DatabaseArguments::default()
.log_level(log_level)
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)?;
let provider_factory =
Arc::new(ProviderFactory::new(db, chain.clone(), data_dir.static_files_path())?);

View File

@ -18,7 +18,7 @@ use reth_beacon_consensus::BeaconConsensus;
use reth_blockchain_tree::{
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
};
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db::{init_db, DatabaseEnv};
use reth_interfaces::{consensus::Consensus, RethResult};
use reth_node_api::PayloadBuilderAttributes;
#[cfg(not(feature = "optimism"))]
@ -157,8 +157,7 @@ impl Command {
fs::create_dir_all(&db_path)?;
// initialize the database
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory = ProviderFactory::new(
Arc::clone(&db),
Arc::clone(&self.chain),

View File

@ -14,7 +14,7 @@ use clap::Parser;
use futures::{stream::select as stream_select, StreamExt};
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{database::Database, init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db::{database::Database, init_db, DatabaseEnv};
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@ -208,8 +208,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;

View File

@ -13,7 +13,7 @@ use crate::{
use backon::{ConstantBuilder, Retryable};
use clap::Parser;
use reth_config::Config;
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db::{init_db, DatabaseEnv};
use reth_interfaces::executor::BlockValidationError;
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
@ -116,8 +116,7 @@ impl Command {
fs::create_dir_all(&db_path)?;
// initialize the database
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
let provider = factory.provider()?;

View File

@ -14,9 +14,7 @@ use backon::{ConstantBuilder, Retryable};
use clap::Parser;
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{
cursor::DbCursorRO, init_db, mdbx::DatabaseArguments, tables, transaction::DbTx, DatabaseEnv,
};
use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv};
use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient};
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
@ -127,8 +125,7 @@ impl Command {
fs::create_dir_all(&db_path)?;
// initialize the database
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
let provider_rw = factory.provider_rw()?;

View File

@ -15,7 +15,7 @@ use reth_blockchain_tree::{
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
};
use reth_config::Config;
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db::{init_db, DatabaseEnv};
use reth_interfaces::consensus::Consensus;
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
@ -124,8 +124,7 @@ impl Command {
fs::create_dir_all(&db_path)?;
// Initialize the database
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;

View File

@ -13,7 +13,7 @@ use eyre::Context;
use futures::{Stream, StreamExt};
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{database::Database, init_db, mdbx::DatabaseArguments};
use reth_db::{database::Database, init_db};
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@ -87,8 +87,7 @@ impl ImportCommand {
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;

View File

@ -8,7 +8,7 @@ use crate::{
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::Parser;
use reth_db::{init_db, mdbx::DatabaseArguments};
use reth_db::init_db;
use reth_node_core::init::init_genesis;
use reth_primitives::ChainSpec;
use reth_provider::ProviderFactory;
@ -53,8 +53,7 @@ impl InitCommand {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db =
Arc::new(init_db(&db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =

View File

@ -10,7 +10,7 @@ use crate::{
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::{value_parser, Args, Parser};
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db::{init_db, DatabaseEnv};
use reth_node_builder::{InitState, NodeBuilder, WithLaunchContext};
use reth_node_core::node_config::NodeConfig;
use reth_primitives::ChainSpec;
@ -187,10 +187,7 @@ impl<Ext: clap::Args + fmt::Debug> NodeCommand<Ext> {
let db_path = data_dir.db_path();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let database = Arc::new(
init_db(db_path.clone(), DatabaseArguments::default().log_level(db.log_level))?
.with_metrics(),
);
let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics());
if with_unused_ports {
node_config = node_config.with_unused_ports();

View File

@ -12,7 +12,7 @@ use crate::{
use backon::{ConstantBuilder, Retryable};
use clap::{Parser, Subcommand};
use reth_config::Config;
use reth_db::{mdbx::DatabaseArguments, open_db};
use reth_db::open_db;
use reth_discv4::NatResolver;
use reth_interfaces::p2p::bodies::client::BodiesClient;
use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord};
@ -100,10 +100,7 @@ impl Command {
/// Execute `p2p` command
pub async fn execute(&self) -> eyre::Result<()> {
let tempdir = tempfile::TempDir::new()?;
let noop_db = Arc::new(open_db(
&tempdir.into_path(),
DatabaseArguments::default().log_level(self.db.log_level),
)?);
let noop_db = Arc::new(open_db(&tempdir.into_path(), self.db.database_args())?);
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);

View File

@ -9,7 +9,7 @@ use reth_db::{
init_db, tables,
transaction::DbTx,
};
use reth_node_core::init::init_genesis;
use reth_node_core::{args::DatabaseArgs, init::init_genesis};
use reth_primitives::ChainSpec;
use reth_provider::{BlockNumReader, HeaderProvider, ProviderError, ProviderFactory};
use reth_trie::StateRoot;
@ -40,6 +40,10 @@ pub struct Command {
value_parser = genesis_value_parser
)]
chain: Arc<ChainSpec>,
/// All database related arguments
#[command(flatten)]
pub db: DatabaseArgs,
}
impl Command {
@ -48,7 +52,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path, Default::default())?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;

View File

@ -11,8 +11,8 @@ use crate::{
use clap::Parser;
use itertools::Itertools;
use reth_db::{
database::Database, mdbx::DatabaseArguments, open_db, static_file::iter_static_files, tables,
transaction::DbTxMut, DatabaseEnv,
database::Database, open_db, static_file::iter_static_files, tables, transaction::DbTxMut,
DatabaseEnv,
};
use reth_node_core::init::{insert_genesis_header, insert_genesis_state};
use reth_primitives::{
@ -60,8 +60,7 @@ impl Command {
let db_path = data_dir.db_path();
fs::create_dir_all(&db_path)?;
let db =
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
let db = open_db(db_path.as_ref(), self.db.database_args())?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
let static_file_provider = provider_factory.static_file_provider();

View File

@ -11,8 +11,9 @@ use crate::args::{
};
use clap::Parser;
use reth_db::{
cursor::DbCursorRO, database::Database, init_db, table::TableImporter, tables,
transaction::DbTx, DatabaseEnv,
cursor::DbCursorRO, database::Database, init_db, mdbx::DatabaseArguments,
models::client_version::ClientVersion, table::TableImporter, tables, transaction::DbTx,
DatabaseEnv,
};
use reth_node_core::dirs::PlatformPath;
use reth_primitives::ChainSpec;
@ -31,7 +32,6 @@ use execution::dump_execution_stage;
mod merkle;
use merkle::dump_merkle_stage;
use reth_db::mdbx::DatabaseArguments;
/// `reth dump-stage` command
#[derive(Debug, Parser)]
@ -104,8 +104,7 @@ impl Command {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
@ -172,7 +171,7 @@ pub(crate) fn setup<DB: Database>(
info!(target: "reth::cli", ?output_db, "Creating separate db");
let output_datadir = init_db(output_db, Default::default())?;
let output_datadir = init_db(output_db, DatabaseArguments::new(ClientVersion::default()))?;
output_datadir.update(|tx| {
tx.import_table_with_range::<tables::BlockBodyIndices, _>(

View File

@ -15,7 +15,7 @@ use crate::{
use clap::Parser;
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{init_db, mdbx::DatabaseArguments};
use reth_db::init_db;
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
use reth_node_ethereum::EthEvmConfig;
use reth_primitives::ChainSpec;
@ -127,8 +127,7 @@ impl Command {
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db =
Arc::new(init_db(db_path, DatabaseArguments::default().log_level(self.db.log_level))?);
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let factory = ProviderFactory::new(

View File

@ -8,10 +8,7 @@ use crate::{
dirs::{DataDirPath, MaybePlatformPath},
};
use clap::{Parser, Subcommand};
use reth_db::{
cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db, tables,
transaction::DbTx,
};
use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx};
use reth_primitives::{BlockHashOrNumber, ChainSpec};
use reth_provider::{BlockExecutionWriter, ProviderFactory};
use std::{ops::RangeInclusive, sync::Arc};
@ -59,8 +56,7 @@ impl Command {
eyre::bail!("Database {db_path:?} does not exist.")
}
let db =
open_db(db_path.as_ref(), DatabaseArguments::default().log_level(self.db.log_level))?;
let db = open_db(db_path.as_ref(), self.db.database_args())?;
let range = self.command.unwind_range(&db)?;

View File

@ -3,6 +3,8 @@
use clap::Args;
use reth_interfaces::db::LogLevel;
use crate::version::default_client_version;
/// Parameters for database configuration
#[derive(Debug, Args, PartialEq, Default, Clone, Copy)]
#[command(next_help_heading = "Database")]
@ -12,6 +14,14 @@ pub struct DatabaseArgs {
pub log_level: Option<LogLevel>,
}
impl DatabaseArgs {
/// Returns default database arguments with configured log level and client version.
pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments {
reth_db::mdbx::DatabaseArguments::new(default_client_version())
.with_log_level(self.log_level)
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,5 +1,16 @@
//! Version information for reth.
use reth_db::models::client_version::ClientVersion;
/// The latest version from Cargo.toml.
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
/// The short SHA of the latest commit.
pub const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA");
/// The build timestamp.
pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP");
/// The short version information for reth.
///
/// - The latest version from Cargo.toml
@ -81,6 +92,15 @@ pub fn default_extradata() -> String {
format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS)
}
/// The default client version accessing the database.
pub fn default_client_version() -> ClientVersion {
ClientVersion {
version: CARGO_PKG_VERSION.to_string(),
git_sha: VERGEN_GIT_SHA.to_string(),
build_timestamp: VERGEN_BUILD_TIMESTAMP.to_string(),
}
}
pub(crate) const fn build_profile_name() -> &'static str {
// Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime
// We split on the path separator of the *host* machine, which may be different from

View File

@ -1,10 +1,13 @@
//! Module that interacts with MDBX.
use crate::{
cursor::{DbCursorRO, DbCursorRW},
database::Database,
database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics},
metrics::DatabaseEnvMetrics,
tables::{TableType, Tables},
models::client_version::ClientVersion,
tables::{self, TableType, Tables},
transaction::{DbTx, DbTxMut},
utils::default_page_size,
DatabaseError,
};
@ -16,7 +19,12 @@ use reth_libmdbx::{
PageSize, SyncMode, RO, RW,
};
use reth_tracing::tracing::error;
use std::{ops::Deref, path::Path, sync::Arc};
use std::{
ops::Deref,
path::Path,
sync::Arc,
time::{SystemTime, UNIX_EPOCH},
};
use tx::Tx;
pub mod cursor;
@ -42,9 +50,18 @@ pub enum DatabaseEnvKind {
RW,
}
impl DatabaseEnvKind {
/// Returns `true` if the environment is read-write.
pub fn is_rw(&self) -> bool {
matches!(self, Self::RW)
}
}
/// Arguments for database initialization.
#[derive(Debug, Default, Clone, Copy)]
#[derive(Clone, Debug)]
pub struct DatabaseArguments {
/// Client version that accesses the database.
client_version: ClientVersion,
/// Database log level. If [None], the default value is used.
log_level: Option<LogLevel>,
/// Maximum duration of a read transaction. If [None], the default value is used.
@ -73,14 +90,24 @@ pub struct DatabaseArguments {
}
impl DatabaseArguments {
/// Create new database arguments with given client version.
pub fn new(client_version: ClientVersion) -> Self {
Self {
client_version,
log_level: None,
max_read_transaction_duration: None,
exclusive: None,
}
}
/// Set the log level.
pub fn log_level(mut self, log_level: Option<LogLevel>) -> Self {
pub fn with_log_level(mut self, log_level: Option<LogLevel>) -> Self {
self.log_level = log_level;
self
}
/// Set the maximum duration of a read transaction.
pub fn max_read_transaction_duration(
pub fn with_max_read_transaction_duration(
mut self,
max_read_transaction_duration: Option<MaxReadTransactionDuration>,
) -> Self {
@ -89,10 +116,15 @@ impl DatabaseArguments {
}
/// Set the mdbx exclusive flag.
pub fn exclusive(mut self, exclusive: Option<bool>) -> Self {
pub fn with_exclusive(mut self, exclusive: Option<bool>) -> Self {
self.exclusive = exclusive;
self
}
/// Returns the client version if any.
pub fn client_version(&self) -> &ClientVersion {
&self.client_version
}
}
/// Wrapper for the libmdbx environment: [Environment]
@ -375,6 +407,27 @@ impl DatabaseEnv {
Ok(())
}
/// Records version that accesses the database with write privileges.
pub fn record_client_version(&self, version: ClientVersion) -> Result<(), DatabaseError> {
if version.is_empty() {
return Ok(())
}
let tx = self.tx_mut()?;
let mut version_cursor = tx.cursor_write::<tables::VersionHistory>()?;
let last_version = version_cursor.last()?.map(|(_, v)| v);
if Some(&version) != last_version.as_ref() {
version_cursor.upsert(
SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
version,
)?;
tx.commit()?;
}
Ok(())
}
}
impl Deref for DatabaseEnv {
@ -390,13 +443,12 @@ mod tests {
use super::*;
use crate::{
abstraction::table::{Encode, Table},
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker},
cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker},
models::{AccountBeforeTx, ShardedKey},
tables::{
AccountsHistory, CanonicalHeaders, Headers, PlainAccountState, PlainStorageState,
},
test_utils::*,
transaction::{DbTx, DbTxMut},
AccountChangeSets,
};
use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation};
@ -415,8 +467,8 @@ mod tests {
/// Create database for testing with specified path
fn create_test_db_with_path(kind: DatabaseEnvKind, path: &Path) -> DatabaseEnv {
let env =
DatabaseEnv::open(path, kind, DatabaseArguments::default()).expect(ERROR_DB_CREATION);
let env = DatabaseEnv::open(path, kind, DatabaseArguments::new(ClientVersion::default()))
.expect(ERROR_DB_CREATION);
env.create_tables().expect(ERROR_TABLE_CREATION);
env
}
@ -1041,8 +1093,12 @@ mod tests {
assert_eq!(result.expect(ERROR_RETURN_VALUE), 200);
}
let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, Default::default())
.expect(ERROR_DB_CREATION);
let env = DatabaseEnv::open(
&path,
DatabaseEnvKind::RO,
DatabaseArguments::new(ClientVersion::default()),
)
.expect(ERROR_DB_CREATION);
// GET
let result =

View File

@ -392,8 +392,8 @@ impl DbTxMut for Tx<RW> {
#[cfg(test)]
mod tests {
use crate::{
database::Database, mdbx::DatabaseArguments, tables, transaction::DbTx, DatabaseEnv,
DatabaseEnvKind,
database::Database, mdbx::DatabaseArguments, models::client_version::ClientVersion, tables,
transaction::DbTx, DatabaseEnv, DatabaseEnvKind,
};
use reth_interfaces::db::DatabaseError;
use reth_libmdbx::MaxReadTransactionDuration;
@ -405,8 +405,10 @@ mod tests {
const MAX_DURATION: Duration = Duration::from_secs(1);
let dir = tempdir().unwrap();
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Set(MAX_DURATION)));
let args = DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Set(
MAX_DURATION,
)));
let db = DatabaseEnv::open(dir.path(), DatabaseEnvKind::RW, args).unwrap().with_metrics();
let mut tx = db.tx().unwrap();
@ -429,8 +431,10 @@ mod tests {
const MAX_DURATION: Duration = Duration::from_secs(1);
let dir = tempdir().unwrap();
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Set(MAX_DURATION)));
let args = DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Set(
MAX_DURATION,
)));
let db = DatabaseEnv::open(dir.path(), DatabaseEnvKind::RW, args).unwrap().with_metrics();
let mut tx = db.tx().unwrap();

View File

@ -111,8 +111,9 @@ pub fn init_db<P: AsRef<Path>>(path: P, args: DatabaseArguments) -> eyre::Result
}
#[cfg(feature = "mdbx")]
{
let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?;
let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args.clone())?;
db.create_tables()?;
db.record_client_version(args.client_version().clone())?;
Ok(db)
}
#[cfg(not(feature = "mdbx"))]
@ -139,8 +140,10 @@ pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result<D
pub fn open_db(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
#[cfg(feature = "mdbx")]
{
DatabaseEnv::open(path, DatabaseEnvKind::RW, args)
.with_context(|| format!("Could not open database at path: {}", path.display()))
let db = DatabaseEnv::open(path, DatabaseEnvKind::RW, args.clone())
.with_context(|| format!("Could not open database at path: {}", path.display()))?;
db.record_client_version(args.client_version().clone())?;
Ok(db)
}
#[cfg(not(feature = "mdbx"))]
{
@ -155,6 +158,7 @@ pub mod test_utils {
use crate::{
database::Database,
database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics},
models::client_version::ClientVersion,
};
use reth_libmdbx::MaxReadTransactionDuration;
use reth_primitives::fs;
@ -250,8 +254,8 @@ pub mod test_utils {
let db = init_db(
&path,
DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)
.expect(&emsg);
@ -263,8 +267,8 @@ pub mod test_utils {
let path = path.as_ref().to_path_buf();
let db = init_db(
path.as_path(),
DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)
.expect(ERROR_DB_CREATION);
Arc::new(TempDatabase { db: Some(db), path })
@ -272,12 +276,12 @@ pub mod test_utils {
/// Create read only database for testing
pub fn create_test_ro_db() -> Arc<TempDatabase<DatabaseEnv>> {
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
let args = DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
let path = tempdir_path();
{
init_db(path.as_path(), args).expect(ERROR_DB_CREATION);
init_db(path.as_path(), args.clone()).expect(ERROR_DB_CREATION);
}
let db = open_db_read_only(path.as_path(), args).expect(ERROR_DB_OPEN);
Arc::new(TempDatabase { db: Some(db), path })
@ -286,9 +290,16 @@ pub mod test_utils {
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::{
cursor::DbCursorRO,
database::Database,
init_db,
mdbx::DatabaseArguments,
models::client_version::ClientVersion,
open_db, tables,
transaction::DbTx,
version::{db_version_file_path, DatabaseVersionError},
};
use assert_matches::assert_matches;
@ -300,25 +311,25 @@ mod tests {
fn db_version() {
let path = tempdir().unwrap();
let args = DatabaseArguments::default()
.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
let args = DatabaseArguments::new(ClientVersion::default())
.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded));
// Database is empty
{
let db = init_db(&path, args);
let db = init_db(&path, args.clone());
assert_matches!(db, Ok(_));
}
// Database is not empty, current version is the same as in the file
{
let db = init_db(&path, args);
let db = init_db(&path, args.clone());
assert_matches!(db, Ok(_));
}
// Database is not empty, version file is malformed
{
fs::write(path.path().join(db_version_file_path(&path)), "invalid-version").unwrap();
let db = init_db(&path, args);
let db = init_db(&path, args.clone());
assert!(db.is_err());
assert_matches!(
db.unwrap_err().downcast_ref::<DatabaseVersionError>(),
@ -337,4 +348,86 @@ mod tests {
)
}
}
#[test]
fn db_client_version() {
let path = tempdir().unwrap();
// Empty client version is not recorded
{
let db = init_db(&path, DatabaseArguments::new(ClientVersion::default())).unwrap();
let tx = db.tx().unwrap();
let mut cursor = tx.cursor_read::<tables::VersionHistory>().unwrap();
assert_matches!(cursor.first(), Ok(None));
}
// Client version is recorded
let first_version = ClientVersion { version: String::from("v1"), ..Default::default() };
{
let db = init_db(&path, DatabaseArguments::new(first_version.clone())).unwrap();
let tx = db.tx().unwrap();
let mut cursor = tx.cursor_read::<tables::VersionHistory>().unwrap();
assert_eq!(
cursor
.walk_range(..)
.unwrap()
.map(|x| x.map(|(_, v)| v))
.collect::<Result<Vec<_>, _>>()
.unwrap(),
vec![first_version.clone()]
);
}
// Same client version is not duplicated.
{
let db = init_db(&path, DatabaseArguments::new(first_version.clone())).unwrap();
let tx = db.tx().unwrap();
let mut cursor = tx.cursor_read::<tables::VersionHistory>().unwrap();
assert_eq!(
cursor
.walk_range(..)
.unwrap()
.map(|x| x.map(|(_, v)| v))
.collect::<Result<Vec<_>, _>>()
.unwrap(),
vec![first_version.clone()]
);
}
// Different client version is recorded
std::thread::sleep(Duration::from_secs(1));
let second_version = ClientVersion { version: String::from("v2"), ..Default::default() };
{
let db = init_db(&path, DatabaseArguments::new(second_version.clone())).unwrap();
let tx = db.tx().unwrap();
let mut cursor = tx.cursor_read::<tables::VersionHistory>().unwrap();
assert_eq!(
cursor
.walk_range(..)
.unwrap()
.map(|x| x.map(|(_, v)| v))
.collect::<Result<Vec<_>, _>>()
.unwrap(),
vec![first_version.clone(), second_version.clone()]
);
}
// Different client version is recorded on db open.
std::thread::sleep(Duration::from_secs(1));
let third_version = ClientVersion { version: String::from("v3"), ..Default::default() };
{
let db = open_db(path.path(), DatabaseArguments::new(third_version.clone())).unwrap();
let tx = db.tx().unwrap();
let mut cursor = tx.cursor_read::<tables::VersionHistory>().unwrap();
assert_eq!(
cursor
.walk_range(..)
.unwrap()
.map(|x| x.map(|(_, v)| v))
.collect::<Result<Vec<_>, _>>()
.unwrap(),
vec![first_version, second_version, third_version]
);
}
}
}

View File

@ -1,4 +1,5 @@
use crate::{
models::client_version::ClientVersion,
table::{Compress, Decompress},
tables::models::*,
};
@ -48,7 +49,8 @@ impl_compression_for_compact!(
TransactionSignedNoHash,
CompactU256,
StageCheckpoint,
PruneCheckpoint
PruneCheckpoint,
ClientVersion
);
macro_rules! impl_compression_fixed_compact {
@ -118,3 +120,4 @@ macro_rules! add_wrapper_struct {
add_wrapper_struct!((U256, CompactU256));
add_wrapper_struct!((u64, CompactU64));
add_wrapper_struct!((ClientVersion, CompactClientVersion));

View File

@ -31,6 +31,7 @@ use crate::{
models::{
accounts::{AccountBeforeTx, BlockNumberAddress},
blocks::{HeaderHash, StoredBlockOmmers},
client_version::ClientVersion,
storage_sharded_key::StorageShardedKey,
ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals,
},
@ -372,6 +373,9 @@ tables! {
/// Stores the highest pruned block number and prune mode of each prune segment.
table PruneCheckpoints<Key = PruneSegment, Value = PruneCheckpoint>;
/// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds.
table VersionHistory<Key = u64, Value = ClientVersion>;
}
// Alias types.

View File

@ -0,0 +1,47 @@
//! Client version model.
use reth_codecs::{derive_arbitrary, Compact};
use serde::{Deserialize, Serialize};
/// Client version that accessed the database.
#[derive_arbitrary(compact)]
#[derive(Clone, Eq, PartialEq, Debug, Default, Serialize, Deserialize)]
pub struct ClientVersion {
/// Client version
pub version: String,
/// The git commit sha
pub git_sha: String,
/// Build timestamp
pub build_timestamp: String,
}
impl ClientVersion {
/// Returns `true` if no version fields are set.
pub fn is_empty(&self) -> bool {
self.version.is_empty() && self.git_sha.is_empty() && self.build_timestamp.is_empty()
}
}
impl Compact for ClientVersion {
fn to_compact<B>(self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let Self { version, git_sha, build_timestamp } = self;
version.into_bytes().to_compact(buf);
git_sha.into_bytes().to_compact(buf);
build_timestamp.into_bytes().to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (version, buf) = Vec::<u8>::from_compact(buf, len);
let (git_sha, buf) = Vec::<u8>::from_compact(buf, len);
let (build_timestamp, buf) = Vec::<u8>::from_compact(buf, len);
let client_version = Self {
version: unsafe { String::from_utf8_unchecked(version) },
git_sha: unsafe { String::from_utf8_unchecked(git_sha) },
build_timestamp: unsafe { String::from_utf8_unchecked(build_timestamp) },
};
(client_version, buf)
}
}

View File

@ -12,6 +12,7 @@ use reth_primitives::{
pub mod accounts;
pub mod blocks;
pub mod client_version;
pub mod integer_list;
pub mod sharded_key;
pub mod storage_sharded_key;
@ -20,6 +21,8 @@ pub use accounts::*;
pub use blocks::*;
pub use sharded_key::ShardedKey;
use self::client_version::ClientVersion;
/// Macro that implements [`Encode`] and [`Decode`] for uint types.
macro_rules! impl_uints {
($($name:tt),+) => {
@ -155,3 +158,21 @@ impl Decode for PruneSegment {
Ok(Self::from_compact(buf, buf.len()).0)
}
}
impl Encode for ClientVersion {
type Encoded = Vec<u8>;
// Delegate to the Compact implementation
fn encode(self) -> Self::Encoded {
let mut buf = vec![];
self.to_compact(&mut buf);
buf
}
}
impl Decode for ClientVersion {
fn decode<B: AsRef<[u8]>>(value: B) -> Result<Self, DatabaseError> {
let buf = value.as_ref();
Ok(Self::from_compact(buf, buf.len()).0)
}
}

View File

@ -613,6 +613,7 @@ mod tests {
use assert_matches::assert_matches;
use rand::Rng;
use reth_db::{
mdbx::DatabaseArguments,
tables,
test_utils::{create_test_static_files_dir, ERROR_TEMPDIR},
};
@ -663,7 +664,7 @@ mod tests {
let factory = ProviderFactory::new_with_database_path(
tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(),
Arc::new(chain_spec),
Default::default(),
DatabaseArguments::new(Default::default()),
create_test_static_files_dir(),
)
.unwrap();

View File

@ -1,4 +1,4 @@
use reth_db::open_db_read_only;
use reth_db::{mdbx::DatabaseArguments, models::client_version::ClientVersion, open_db_read_only};
use reth_primitives::{Address, ChainSpecBuilder, B256};
use reth_provider::{
AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider,
@ -19,7 +19,10 @@ fn main() -> eyre::Result<()> {
// doing in 2 steps.
let db_path = std::env::var("RETH_DB_PATH")?;
let db_path = Path::new(&db_path);
let db = open_db_read_only(db_path.join("db").as_path(), Default::default())?;
let db = open_db_read_only(
db_path.join("db").as_path(),
DatabaseArguments::new(ClientVersion::default()),
)?;
// Instantiate a provider factory for Ethereum mainnet using the provided DB.
// TODO: Should the DB version include the spec so that you do not need to specify it here?

View File

@ -9,6 +9,7 @@ license.workspace = true
futures.workspace = true
jsonrpsee.workspace = true
reth.workspace = true
reth-db.workspace = true
reth-node-ethereum.workspace = true
tokio = { workspace = true, features = ["full"] }
eyre.workspace = true

View File

@ -17,6 +17,7 @@ use reth::{
providers::{providers::BlockchainProvider, ProviderFactory},
utils::db::open_db_read_only,
};
use reth_db::{mdbx::DatabaseArguments, models::client_version::ClientVersion};
// Bringing up the RPC
use reth::rpc::builder::{
RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig,
@ -38,7 +39,10 @@ async fn main() -> eyre::Result<()> {
// 1. Setup the DB
let db_path = std::env::var("RETH_DB_PATH")?;
let db_path = Path::new(&db_path);
let db = Arc::new(open_db_read_only(db_path.join("db").as_path(), Default::default())?);
let db = Arc::new(open_db_read_only(
db_path.join("db").as_path(),
DatabaseArguments::new(ClientVersion::default()),
)?);
let spec = Arc::new(ChainSpecBuilder::mainnet().build());
let factory = ProviderFactory::new(db.clone(), spec.clone(), db_path.join("static_files"))?;