mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 19:09:54 +00:00
add doc_markdown clippy lint (#8552)
Co-authored-by: Alexey Shekhirin <a.shekhirin@gmail.com> Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
@ -26,7 +26,7 @@ criterion_main!(benches);
|
||||
/// * `put_sorted`: Table is preloaded with rows (same as batch size). Sorts during benchmark.
|
||||
/// * `put_unsorted`: Table is preloaded with rows (same as batch size).
|
||||
///
|
||||
/// It does the above steps with different batches of rows. 10_000, 100_000, 1_000_000. In the
|
||||
/// It does the above steps with different batches of rows. `10_000`, `100_000`, `1_000_000`. In the
|
||||
/// end, the table statistics are shown (eg. number of pages, table size...)
|
||||
pub fn hash_keys(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("Hash-Keys Table Insertion");
|
||||
|
||||
@ -13,7 +13,7 @@ use std::{path::Path, sync::Arc};
|
||||
#[allow(dead_code)]
|
||||
const BENCH_DB_PATH: &str = "/tmp/reth-benches";
|
||||
|
||||
/// Used for RandomRead and RandomWrite benchmarks.
|
||||
/// Used for `RandomRead` and `RandomWrite` benchmarks.
|
||||
#[allow(dead_code)]
|
||||
const RANDOM_INDEXES: [usize; 10] = [23, 2, 42, 5, 3, 99, 54, 0, 33, 64];
|
||||
|
||||
|
||||
@ -120,7 +120,7 @@ pub trait DbCursorRW<T: Table> {
|
||||
fn delete_current(&mut self) -> Result<(), DatabaseError>;
|
||||
}
|
||||
|
||||
/// Read Write Cursor over DupSorted table.
|
||||
/// Read Write Cursor over `DupSorted` table.
|
||||
pub trait DbDupCursorRW<T: DupSort> {
|
||||
/// Delete all duplicate entries for current key.
|
||||
fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError>;
|
||||
@ -209,7 +209,7 @@ where
|
||||
}
|
||||
|
||||
impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> ReverseWalker<'cursor, T, CURSOR> {
|
||||
/// construct ReverseWalker
|
||||
/// construct `ReverseWalker`
|
||||
pub fn new(cursor: &'cursor mut CURSOR, start: IterPairResult<T>) -> Self {
|
||||
Self { cursor, start }
|
||||
}
|
||||
@ -300,7 +300,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> Iterator for RangeWalker<'cursor,
|
||||
}
|
||||
|
||||
impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> RangeWalker<'cursor, T, CURSOR> {
|
||||
/// construct RangeWalker
|
||||
/// construct `RangeWalker`
|
||||
pub fn new(
|
||||
cursor: &'cursor mut CURSOR,
|
||||
start: IterPairResult<T>,
|
||||
|
||||
@ -49,7 +49,7 @@ pub struct DatabaseMetadataValue {
|
||||
}
|
||||
|
||||
impl DatabaseMetadataValue {
|
||||
/// Creates a new [DatabaseMetadataValue] with the given freelist size.
|
||||
/// Creates a new [`DatabaseMetadataValue`] with the given freelist size.
|
||||
pub const fn new(freelist_size: Option<usize>) -> Self {
|
||||
Self { freelist_size }
|
||||
}
|
||||
@ -60,10 +60,10 @@ impl DatabaseMetadataValue {
|
||||
}
|
||||
}
|
||||
|
||||
/// Includes a method to return a [DatabaseMetadataValue] type, which can be used to dynamically
|
||||
/// Includes a method to return a [`DatabaseMetadataValue`] type, which can be used to dynamically
|
||||
/// retrieve information about the database.
|
||||
pub trait DatabaseMetadata {
|
||||
/// Returns a metadata type, [DatabaseMetadataValue] for the database.
|
||||
/// Returns a metadata type, [`DatabaseMetadataValue`] for the database.
|
||||
fn metadata(&self) -> DatabaseMetadataValue;
|
||||
}
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@ use crate::{
|
||||
use core::ops::Bound;
|
||||
use std::{collections::BTreeMap, ops::RangeBounds};
|
||||
|
||||
/// Mock database used for testing with inner BTreeMap structure
|
||||
/// Mock database used for testing with inner `BTreeMap` structure
|
||||
// TODO
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DatabaseMock {
|
||||
|
||||
@ -98,7 +98,7 @@ pub trait Table: Send + Sync + Debug + 'static {
|
||||
/// Tuple with `T::Key` and `T::Value`.
|
||||
pub type TableRow<T> = (<T as Table>::Key, <T as Table>::Value);
|
||||
|
||||
/// DupSort allows for keys to be repeated in the database.
|
||||
/// `DupSort` allows for keys to be repeated in the database.
|
||||
///
|
||||
/// Upstream docs: <https://libmdbx.dqdkfa.ru/usage.html#autotoc_md48>
|
||||
pub trait DupSort: Table {
|
||||
|
||||
@ -8,7 +8,7 @@ use crate::{
|
||||
pub trait DbTx: Send + Sync {
|
||||
/// Cursor type for this read-only transaction
|
||||
type Cursor<T: Table>: DbCursorRO<T> + Send + Sync;
|
||||
/// DupCursor type for this read-only transaction
|
||||
/// `DupCursor` type for this read-only transaction
|
||||
type DupCursor<T: DupSort>: DbDupCursorRO<T> + DbCursorRO<T> + Send + Sync;
|
||||
|
||||
/// Get value
|
||||
@ -32,7 +32,7 @@ pub trait DbTx: Send + Sync {
|
||||
pub trait DbTxMut: Send + Sync {
|
||||
/// Read-Write Cursor type
|
||||
type CursorMut<T: Table>: DbCursorRW<T> + DbCursorRO<T> + Send + Sync;
|
||||
/// Read-Write DupCursor type
|
||||
/// Read-Write `DupCursor` type
|
||||
type DupCursorMut<T: DupSort>: DbDupCursorRW<T>
|
||||
+ DbCursorRW<T>
|
||||
+ DbDupCursorRO<T>
|
||||
@ -49,6 +49,6 @@ pub trait DbTxMut: Send + Sync {
|
||||
fn clear<T: Table>(&self) -> Result<(), DatabaseError>;
|
||||
/// Cursor mut
|
||||
fn cursor_write<T: Table>(&self) -> Result<Self::CursorMut<T>, DatabaseError>;
|
||||
/// DupCursor mut.
|
||||
/// `DupCursor` mut.
|
||||
fn cursor_dup_write<T: DupSort>(&self) -> Result<Self::DupCursorMut<T>, DatabaseError>;
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ const TERABYTE: usize = GIGABYTE * 1024;
|
||||
const DEFAULT_MAX_READERS: u64 = 32_000;
|
||||
|
||||
/// Space that a read-only transaction can occupy until the warning is emitted.
|
||||
/// See [reth_libmdbx::EnvironmentBuilder::set_handle_slow_readers] for more information.
|
||||
/// See [`reth_libmdbx::EnvironmentBuilder::set_handle_slow_readers`] for more information.
|
||||
#[cfg(not(windows))]
|
||||
const MAX_SAFE_READER_SPACE: usize = 10 * GIGABYTE;
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ pub struct Tx<K: TransactionKind> {
|
||||
pub inner: Transaction<K>,
|
||||
|
||||
/// Handler for metrics with its own [Drop] implementation for cases when the transaction isn't
|
||||
/// closed by [Tx::commit] or [Tx::abort], but we still need to report it in the metrics.
|
||||
/// closed by [`Tx::commit`] or [`Tx::abort`], but we still need to report it in the metrics.
|
||||
///
|
||||
/// If [Some], then metrics are reported.
|
||||
metrics_handler: Option<MetricsHandler<K>>,
|
||||
@ -186,13 +186,13 @@ struct MetricsHandler<K: TransactionKind> {
|
||||
/// Duration after which we emit the log about long-lived database transactions.
|
||||
long_transaction_duration: Duration,
|
||||
/// If `true`, the metric about transaction closing has already been recorded and we don't need
|
||||
/// to do anything on [Drop::drop].
|
||||
/// to do anything on [`Drop::drop`].
|
||||
close_recorded: bool,
|
||||
/// If `true`, the backtrace of transaction will be recorded and logged.
|
||||
/// See [MetricsHandler::log_backtrace_on_long_read_transaction].
|
||||
/// See [`MetricsHandler::log_backtrace_on_long_read_transaction`].
|
||||
record_backtrace: bool,
|
||||
/// If `true`, the backtrace of transaction has already been recorded and logged.
|
||||
/// See [MetricsHandler::log_backtrace_on_long_read_transaction].
|
||||
/// See [`MetricsHandler::log_backtrace_on_long_read_transaction`].
|
||||
backtrace_recorded: AtomicBool,
|
||||
env_metrics: Arc<DatabaseEnvMetrics>,
|
||||
_marker: PhantomData<K>,
|
||||
@ -233,11 +233,11 @@ impl<K: TransactionKind> MetricsHandler<K> {
|
||||
}
|
||||
|
||||
/// Logs the backtrace of current call if the duration that the read transaction has been open
|
||||
/// is more than [LONG_TRANSACTION_DURATION] and `record_backtrace == true`.
|
||||
/// is more than [`LONG_TRANSACTION_DURATION`] and `record_backtrace == true`.
|
||||
/// The backtrace is recorded and logged just once, guaranteed by `backtrace_recorded` atomic.
|
||||
///
|
||||
/// NOTE: Backtrace is recorded using [Backtrace::force_capture], so `RUST_BACKTRACE` env var is
|
||||
/// not needed.
|
||||
/// NOTE: Backtrace is recorded using [`Backtrace::force_capture`], so `RUST_BACKTRACE` env var
|
||||
/// is not needed.
|
||||
fn log_backtrace_on_long_read_transaction(&self) {
|
||||
if self.record_backtrace &&
|
||||
!self.backtrace_recorded.load(Ordering::Relaxed) &&
|
||||
|
||||
@ -172,7 +172,7 @@ pub mod test_utils {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create static_files path for testing
|
||||
/// Create `static_files` path for testing
|
||||
pub fn create_test_static_files_dir() -> (TempDir, PathBuf) {
|
||||
let temp_dir = TempDir::with_prefix("reth-test-static-").expect(ERROR_TEMPDIR);
|
||||
let path = temp_dir.path().to_path_buf();
|
||||
|
||||
@ -18,7 +18,7 @@ use sysinfo::System;
|
||||
pub struct StorageLock(Arc<StorageLockInner>);
|
||||
|
||||
impl StorageLock {
|
||||
/// Tries to acquires a write lock on the target directory, returning [StorageLockError] if
|
||||
/// Tries to acquire a write lock on the target directory, returning [`StorageLockError`] if
|
||||
/// unsuccessful.
|
||||
pub fn try_acquire(path: &Path) -> Result<Self, StorageLockError> {
|
||||
let path = path.join("lock");
|
||||
|
||||
@ -48,7 +48,7 @@ pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result<D
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens up an existing database. Read/Write mode with WriteMap enabled. It doesn't create it or
|
||||
/// Opens up an existing database. Read/Write mode with `WriteMap` enabled. It doesn't create it or
|
||||
/// create tables if missing.
|
||||
pub fn open_db(path: &Path, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
|
||||
let db = DatabaseEnv::open(path, DatabaseEnvKind::RW, args.clone())
|
||||
|
||||
@ -18,12 +18,12 @@ const LARGE_VALUE_THRESHOLD_BYTES: usize = 4096;
|
||||
/// Otherwise, metric recording will no-op.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DatabaseEnvMetrics {
|
||||
/// Caches OperationMetrics handles for each table and operation tuple.
|
||||
/// Caches `OperationMetrics` handles for each table and operation tuple.
|
||||
operations: FxHashMap<(Tables, Operation), OperationMetrics>,
|
||||
/// Caches TransactionMetrics handles for counters grouped by only transaction mode.
|
||||
/// Caches `TransactionMetrics` handles for counters grouped by only transaction mode.
|
||||
/// Updated both at tx open and close.
|
||||
transactions: FxHashMap<TransactionMode, TransactionMetrics>,
|
||||
/// Caches TransactionOutcomeMetrics handles for counters grouped by transaction mode and
|
||||
/// Caches `TransactionOutcomeMetrics` handles for counters grouped by transaction mode and
|
||||
/// outcome. Can only be updated at tx close, as outcome is only known at that point.
|
||||
transaction_outcomes:
|
||||
FxHashMap<(TransactionMode, TransactionOutcome), TransactionOutcomeMetrics>,
|
||||
@ -336,8 +336,8 @@ impl TransactionOutcomeMetrics {
|
||||
pub(crate) struct OperationMetrics {
|
||||
/// Total number of database operations made
|
||||
calls_total: Counter,
|
||||
/// The time it took to execute a database operation (put/upsert/insert/append/append_dup) with
|
||||
/// value larger than [LARGE_VALUE_THRESHOLD_BYTES] bytes.
|
||||
/// The time it took to execute a database operation (`put/upsert/insert/append/append_dup`)
|
||||
/// with value larger than [`LARGE_VALUE_THRESHOLD_BYTES`] bytes.
|
||||
large_value_duration_seconds: Histogram,
|
||||
}
|
||||
|
||||
@ -345,7 +345,7 @@ impl OperationMetrics {
|
||||
/// Record operation metric.
|
||||
///
|
||||
/// The duration it took to execute the closure is recorded only if the provided `value_size` is
|
||||
/// larger than [LARGE_VALUE_THRESHOLD_BYTES].
|
||||
/// larger than [`LARGE_VALUE_THRESHOLD_BYTES`].
|
||||
pub(crate) fn record<R>(&self, value_size: Option<usize>, f: impl FnOnce() -> R) -> R {
|
||||
self.calls_total.increment(1);
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@ add_segments!(Header, Receipt, Transaction);
|
||||
pub trait ColumnSelectorOne {
|
||||
/// First desired column value
|
||||
type FIRST: Decompress;
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a static_file.
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a `static_file`.
|
||||
const MASK: usize;
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ pub trait ColumnSelectorTwo {
|
||||
type FIRST: Decompress;
|
||||
/// Second desired column value
|
||||
type SECOND: Decompress;
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a static_file.
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a `static_file`.
|
||||
const MASK: usize;
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ pub trait ColumnSelectorThree {
|
||||
type SECOND: Decompress;
|
||||
/// Third desired column value
|
||||
type THIRD: Decompress;
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a static_file.
|
||||
/// Mask to obtain desired values, should correspond to the order of columns in a `static_file`.
|
||||
const MASK: usize;
|
||||
}
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@ mod masks;
|
||||
type SortedStaticFiles =
|
||||
HashMap<StaticFileSegment, Vec<(SegmentRangeInclusive, Option<SegmentRangeInclusive>)>>;
|
||||
|
||||
/// Given the static_files directory path, it returns a list over the existing static_files
|
||||
/// Given the `static_files` directory path, it returns a list over the existing `static_files`
|
||||
/// organized by [`StaticFileSegment`]. Each segment has a sorted list of block ranges and
|
||||
/// transaction ranges as presented in the file configuration.
|
||||
pub fn iter_static_files(path: impl AsRef<Path>) -> Result<SortedStaticFiles, NippyJarError> {
|
||||
|
||||
@ -87,7 +87,7 @@ macro_rules! impl_compression_fixed_compact {
|
||||
|
||||
impl_compression_fixed_compact!(B256, Address);
|
||||
|
||||
/// Adds wrapper structs for some primitive types so they can use StructFlags from Compact, when
|
||||
/// Adds wrapper structs for some primitive types so they can use `StructFlags` from Compact, when
|
||||
/// used as pure table values.
|
||||
macro_rules! add_wrapper_struct {
|
||||
($(($name:tt, $wrapper:tt)),+) => {
|
||||
|
||||
@ -21,7 +21,7 @@ impl<T: Table> Table for RawTable<T> {
|
||||
type Value = RawValue<T::Value>;
|
||||
}
|
||||
|
||||
/// Raw DupSort table that can be used to access any table and its data in raw mode.
|
||||
/// Raw `DupSort` table that can be used to access any table and its data in raw mode.
|
||||
/// This is useful for delayed decoding/encoding of data.
|
||||
#[derive(Default, Copy, Clone, Debug)]
|
||||
pub struct RawDupSort<T: DupSort> {
|
||||
|
||||
@ -7,11 +7,11 @@ use std::{
|
||||
|
||||
/// The name of the file that contains the version of the database.
|
||||
pub const DB_VERSION_FILE_NAME: &str = "database.version";
|
||||
/// The version of the database stored in the [DB_VERSION_FILE_NAME] file in the same directory as
|
||||
/// The version of the database stored in the [`DB_VERSION_FILE_NAME`] file in the same directory as
|
||||
/// database.
|
||||
pub const DB_VERSION: u64 = 2;
|
||||
|
||||
/// Error when checking a database version using [check_db_version_file]
|
||||
/// Error when checking a database version using [`check_db_version_file`]
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum DatabaseVersionError {
|
||||
/// Unable to determine the version of the database; the file is missing.
|
||||
@ -41,10 +41,10 @@ pub enum DatabaseVersionError {
|
||||
},
|
||||
}
|
||||
|
||||
/// Checks the database version file with [DB_VERSION_FILE_NAME] name.
|
||||
/// Checks the database version file with [`DB_VERSION_FILE_NAME`] name.
|
||||
///
|
||||
/// Returns [Ok] if file is found and has one line which equals to [DB_VERSION].
|
||||
/// Otherwise, returns different [DatabaseVersionError] error variants.
|
||||
/// Returns [Ok] if file is found and has one line which equals to [`DB_VERSION`].
|
||||
/// Otherwise, returns different [`DatabaseVersionError`] error variants.
|
||||
pub fn check_db_version_file<P: AsRef<Path>>(db_path: P) -> Result<(), DatabaseVersionError> {
|
||||
let version = get_db_version(db_path)?;
|
||||
if version != DB_VERSION {
|
||||
@ -54,10 +54,10 @@ pub fn check_db_version_file<P: AsRef<Path>>(db_path: P) -> Result<(), DatabaseV
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the database version from file with [DB_VERSION_FILE_NAME] name.
|
||||
/// Returns the database version from file with [`DB_VERSION_FILE_NAME`] name.
|
||||
///
|
||||
/// Returns [Ok] if file is found and contains a valid version.
|
||||
/// Otherwise, returns different [DatabaseVersionError] error variants.
|
||||
/// Otherwise, returns different [`DatabaseVersionError`] error variants.
|
||||
pub fn get_db_version<P: AsRef<Path>>(db_path: P) -> Result<u64, DatabaseVersionError> {
|
||||
let version_file_path = db_version_file_path(db_path);
|
||||
match fs::read_to_string(&version_file_path) {
|
||||
@ -69,7 +69,8 @@ pub fn get_db_version<P: AsRef<Path>>(db_path: P) -> Result<u64, DatabaseVersion
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a database version file with [DB_VERSION_FILE_NAME] name containing [DB_VERSION] string.
|
||||
/// Creates a database version file with [`DB_VERSION_FILE_NAME`] name containing [`DB_VERSION`]
|
||||
/// string.
|
||||
///
|
||||
/// This function will create a file if it does not exist,
|
||||
/// and will entirely replace its contents if it does.
|
||||
|
||||
Reference in New Issue
Block a user