chore(db): move mod tables to db-api (#14540)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
DaniPopes
2025-02-17 21:53:39 +01:00
committed by GitHub
parent 336c3d1fac
commit 482f4557eb
106 changed files with 269 additions and 274 deletions

View File

@ -15,32 +15,23 @@ workspace = true
# reth
reth-db-api.workspace = true
reth-primitives = { workspace = true, features = ["reth-codec"] }
reth-primitives-traits = { workspace = true, features = ["reth-codec"] }
reth-fs-util.workspace = true
reth-storage-errors.workspace = true
reth-nippy-jar.workspace = true
reth-prune-types = { workspace = true, features = ["reth-codec", "serde"] }
reth-stages-types.workspace = true
reth-trie-common = { workspace = true, features = ["serde"] }
reth-tracing.workspace = true
# ethereum
alloy-primitives.workspace = true
alloy-consensus.workspace = true
# mdbx
reth-libmdbx = { workspace = true, optional = true, features = ["return-borrowed", "read-tx-timeouts"] }
eyre = { workspace = true, optional = true }
# codecs
serde = { workspace = true, default-features = false }
# metrics
reth-metrics = { workspace = true, optional = true }
metrics = { workspace = true, optional = true }
# misc
bytes.workspace = true
page_size = { version = "0.6.0", optional = true }
thiserror.workspace = true
tempfile = { workspace = true, optional = true }
@ -55,11 +46,13 @@ strum = { workspace = true, features = ["derive"], optional = true }
[dev-dependencies]
# reth libs with arbitrary
reth-primitives = { workspace = true, features = ["arbitrary"] }
reth-primitives-traits = { workspace = true, features = ["reth-codec"] }
serde_json.workspace = true
tempfile.workspace = true
test-fuzz.workspace = true
parking_lot.workspace = true
alloy-consensus.workspace = true
serde.workspace = true
pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] }
criterion.workspace = true
@ -85,23 +78,17 @@ test-utils = [
"arbitrary",
"parking_lot",
"reth-primitives/test-utils",
"reth-primitives-traits/test-utils",
"reth-db-api/test-utils",
"reth-nippy-jar/test-utils",
"reth-trie-common/test-utils",
"reth-prune-types/test-utils",
"reth-stages-types/test-utils",
"reth-primitives-traits/test-utils",
]
bench = []
bench = ["reth-db-api/bench"]
arbitrary = [
"reth-primitives/arbitrary",
"reth-db-api/arbitrary",
"reth-primitives-traits/arbitrary",
"reth-trie-common/arbitrary",
"alloy-primitives/arbitrary",
"reth-prune-types/arbitrary",
"reth-stages-types/arbitrary",
"alloy-consensus/arbitrary",
"reth-primitives-traits/arbitrary",
]
optimism = ["reth-db-api/optimism"]
op = [

View File

@ -6,11 +6,12 @@ use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use pprof::criterion::{Output, PProfProfiler};
use reth_db::{tables::*, test_utils::create_test_rw_db_with_path};
use reth_db::test_utils::create_test_rw_db_with_path;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
database::Database,
table::{Compress, Decode, Decompress, DupSort, Encode, Table},
tables::*,
transaction::{DbTx, DbTxMut},
};
use reth_fs_util as fs;

View File

@ -1,8 +1,8 @@
//! Cursor wrapper for libmdbx-sys.
use super::utils::*;
use crate::{
metrics::{DatabaseEnvMetrics, Operation},
tables::utils::*,
DatabaseError,
};
use reth_db_api::{

View File

@ -33,6 +33,8 @@ use tx::Tx;
pub mod cursor;
pub mod tx;
mod utils;
/// 1 KB in bytes
pub const KILOBYTE: usize = 1024;
/// 1 MB in bytes

View File

@ -1,9 +1,8 @@
//! Transaction wrapper for libmdbx-sys.
use super::cursor::Cursor;
use super::{cursor::Cursor, utils::*};
use crate::{
metrics::{DatabaseEnvMetrics, Operation, TransactionMode, TransactionOutcome},
tables::utils::decode_one,
DatabaseError,
};
use reth_db_api::{

View File

@ -1,7 +1,9 @@
//! Small database table utilities and helper functions.
use crate::DatabaseError;
use reth_db_api::table::{Decode, Decompress, Table, TableRow};
use crate::{
table::{Decode, Decompress, Table, TableRow},
DatabaseError,
};
use std::borrow::Cow;
/// Helper function to decode a `(key, value)` pair.

View File

@ -20,7 +20,6 @@ pub mod lockfile;
#[cfg(feature = "mdbx")]
mod metrics;
pub mod static_file;
pub mod tables;
#[cfg(feature = "mdbx")]
mod utils;
pub mod version;
@ -29,7 +28,6 @@ pub mod version;
pub mod mdbx;
pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation};
pub use tables::*;
#[cfg(feature = "mdbx")]
pub use utils::is_database_empty;

View File

@ -1,16 +0,0 @@
//! Curates the input coming from the fuzzer for certain types.
use reth_db_api::models::IntegerList;
use serde::{Deserialize, Serialize};
/// Makes sure that the list provided by the fuzzer is not empty and pre-sorted
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
pub struct IntegerListInput(pub Vec<u64>);
impl From<IntegerListInput> for IntegerList {
fn from(list: IntegerListInput) -> Self {
let mut v = list.0;
v.sort_unstable();
Self::new_pre_sorted(v)
}
}

View File

@ -1,91 +0,0 @@
//! Implements fuzzing targets to be used by test-fuzz
mod inputs;
/// Fuzzer generates a random instance of the object and proceeds to encode and decode it. It then
/// makes sure that it matches the original object.
///
/// Some types like [`IntegerList`] might have some restrictions on how they're fuzzed. For example,
/// the list is assumed to be sorted before creating the object.
macro_rules! impl_fuzzer_with_input {
($(($name:tt, $input_type:tt, $encode:tt, $encode_method:tt, $decode:tt, $decode_method:tt)),+) => {
$(
/// Macro generated module to be used by test-fuzz and `bench` if it applies.
#[allow(non_snake_case)]
#[cfg(any(test, feature = "bench"))]
pub mod $name {
use reth_db_api::table;
#[allow(unused_imports)]
use reth_primitives_traits::*;
#[allow(unused_imports)]
use super::inputs::*;
#[allow(unused_imports)]
use reth_db_api::models::*;
/// Encodes and decodes table types returning its encoded size and the decoded object.
/// This method is used for benchmarking, so its parameter should be the actual type that is being tested.
pub fn encode_and_decode(obj: $name) -> (usize, $name) {
let data = table::$encode::$encode_method(obj);
let size = data.len();
// Some `data` might be a fixed array.
(size, table::$decode::$decode_method(&data).expect("failed to decode"))
}
#[cfg(test)]
#[allow(dead_code)]
#[allow(missing_docs)]
#[test_fuzz::test_fuzz]
pub fn fuzz(obj: $input_type) {
let obj: $name = obj.into();
assert!(encode_and_decode(obj.clone()).1 == obj );
}
#[test]
#[allow(missing_docs)]
pub fn test() {
fuzz($input_type::default())
}
}
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to encode and decode it. It then
/// makes sure that it matches the original object.
macro_rules! impl_fuzzer_key {
($($name:tt),+) => {
$(
impl_fuzzer_with_input!(($name, $name, Encode, encode, Decode, decode));
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to compress and decompress it. It
/// then makes sure that it matches the original object.
#[allow(unused_macros)]
macro_rules! impl_fuzzer_value {
($($name:tt),+) => {
$(
impl_fuzzer_value_with_input!($name, $name);
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to compress and decompress it. It
/// then makes sure that it matches the original object. It supports being fed a different kind of
/// input, as long as it supports `Into<T>`.
macro_rules! impl_fuzzer_value_with_input {
($(($name:tt, $input:tt)),+) => {
$(
impl_fuzzer_with_input!(($name, $input, Compress, compress, Decompress, decompress));
)+
};
}
impl_fuzzer_key!(BlockNumberAddress);
impl_fuzzer_value_with_input!((IntegerList, IntegerListInput));

View File

@ -1,3 +0,0 @@
//! Integrates different codecs into `table::Encode` and `table::Decode`.
pub mod fuzz;

View File

@ -1,586 +0,0 @@
//! Tables and data models.
//!
//! # Overview
//!
//! This module defines the tables in reth, as well as some table-related abstractions:
//!
//! - [`codecs`] integrates different codecs into [`Encode`] and [`Decode`]
//! - [`models`](reth_db_api::models) defines the values written to tables
//!
//! # Database Tour
//!
//! TODO(onbjerg): Find appropriate format for this...
pub mod codecs;
mod raw;
pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow};
#[cfg(feature = "mdbx")]
pub(crate) mod utils;
use alloy_consensus::Header;
use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256};
use reth_db_api::{
models::{
accounts::BlockNumberAddress,
blocks::{HeaderHash, StoredBlockOmmers},
storage_sharded_key::StorageShardedKey,
AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey,
StoredBlockBodyIndices, StoredBlockWithdrawals,
},
table::{Decode, DupSort, Encode, Table, TableInfo},
};
use reth_primitives::{Receipt, StorageEntry, TransactionSigned};
use reth_primitives_traits::{Account, Bytecode};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::StageCheckpoint;
use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey};
use serde::{Deserialize, Serialize};
use std::fmt;
/// Enum for the types of tables present in libmdbx.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum TableType {
/// key value table
Table,
/// Duplicate key value table
DupSort,
}
/// The general purpose of this is to use with a combination of Tables enum,
/// by implementing a `TableViewer` trait you can operate on db tables in an abstract way.
///
/// # Example
///
/// ```
/// use reth_db::{TableViewer, Tables};
/// use reth_db_api::table::{DupSort, Table};
///
/// struct MyTableViewer;
///
/// impl TableViewer<()> for MyTableViewer {
/// type Error = &'static str;
///
/// fn view<T: Table>(&self) -> Result<(), Self::Error> {
/// // operate on table in a generic way
/// Ok(())
/// }
///
/// fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
/// // operate on a dupsort table in a generic way
/// Ok(())
/// }
/// }
///
/// let viewer = MyTableViewer {};
///
/// let _ = Tables::Headers.view(&viewer);
/// let _ = Tables::Transactions.view(&viewer);
/// ```
pub trait TableViewer<R> {
/// The error type returned by the viewer.
type Error;
/// Calls `view` with the correct table type.
fn view_rt(&self, table: Tables) -> Result<R, Self::Error> {
table.view(self)
}
/// Operate on the table in a generic way.
fn view<T: Table>(&self) -> Result<R, Self::Error>;
/// Operate on the dupsort table in a generic way.
///
/// By default, the `view` function is invoked unless overridden.
fn view_dupsort<T: DupSort>(&self) -> Result<R, Self::Error> {
self.view::<T>()
}
}
/// General trait for defining the set of tables
/// Used to initialize database
pub trait TableSet {
/// Returns an iterator over the tables
fn tables() -> Box<dyn Iterator<Item = Box<dyn TableInfo>>>;
}
/// Defines all the tables in the database.
#[macro_export]
macro_rules! tables {
(@bool) => { false };
(@bool $($t:tt)+) => { true };
(@view $name:ident $v:ident) => { $v.view::<$name>() };
(@view $name:ident $v:ident $_subkey:ty) => { $v.view_dupsort::<$name>() };
(@value_doc $key:ty, $value:ty) => {
concat!("[`", stringify!($value), "`]")
};
// Don't generate links if we have generics
(@value_doc $key:ty, $value:ty, $($generic:ident),*) => {
concat!("`", stringify!($value), "`")
};
($($(#[$attr:meta])* table $name:ident$(<$($generic:ident $(= $default:ty)?),*>)? { type Key = $key:ty; type Value = $value:ty; $(type SubKey = $subkey:ty;)? } )*) => {
// Table marker types.
$(
$(#[$attr])*
///
#[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to ", tables!(@value_doc $key, $value, $($($generic),*)?), ".")]
$(
#[doc = concat!("\n\nThis table's `DUPSORT` subkey is [`", stringify!($subkey), "`].")]
)?
pub struct $name$(<$($generic $( = $default)?),*>)? {
_private: std::marker::PhantomData<($($($generic,)*)?)>,
}
// Ideally this implementation wouldn't exist, but it is necessary to derive `Debug`
// when a type is generic over `T: Table`. See: https://github.com/rust-lang/rust/issues/26925
impl$(<$($generic),*>)? fmt::Debug for $name$(<$($generic),*>)? {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
unreachable!("this type cannot be instantiated")
}
}
impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)?
where
$value: reth_db_api::table::Value + 'static
$($(,$generic: Send + Sync)*)?
{
const NAME: &'static str = table_names::$name;
const DUPSORT: bool = tables!(@bool $($subkey)?);
type Key = $key;
type Value = $value;
}
$(
impl DupSort for $name {
type SubKey = $subkey;
}
)?
)*
// Tables enum.
// NOTE: the ordering of the enum does not matter, but it is assumed that the discriminants
// start at 0 and increment by 1 for each variant (the default behavior).
// See for example `reth_db::implementation::mdbx::tx::Tx::db_handles`.
/// A table in the database.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum Tables {
$(
#[doc = concat!("The [`", stringify!($name), "`] database table.")]
$name,
)*
}
impl Tables {
/// All the tables in the database.
pub const ALL: &'static [Self] = &[$(Self::$name,)*];
/// The number of tables in the database.
pub const COUNT: usize = Self::ALL.len();
/// Returns the name of the table as a string.
pub const fn name(&self) -> &'static str {
match self {
$(
Self::$name => table_names::$name,
)*
}
}
/// Returns `true` if the table is a `DUPSORT` table.
pub const fn is_dupsort(&self) -> bool {
match self {
$(
Self::$name => tables!(@bool $($subkey)?),
)*
}
}
/// The type of the given table in database.
pub const fn table_type(&self) -> TableType {
if self.is_dupsort() {
TableType::DupSort
} else {
TableType::Table
}
}
/// Allows to operate on specific table type
pub fn view<T, R>(&self, visitor: &T) -> Result<R, T::Error>
where
T: ?Sized + TableViewer<R>,
{
match self {
$(
Self::$name => tables!(@view $name visitor $($subkey)?),
)*
}
}
}
impl fmt::Debug for Tables {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.name())
}
}
impl fmt::Display for Tables {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.name().fmt(f)
}
}
impl std::str::FromStr for Tables {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
$(
table_names::$name => Ok(Self::$name),
)*
s => Err(format!("unknown table: {s:?}")),
}
}
}
impl TableInfo for Tables {
fn name(&self) -> &'static str {
self.name()
}
fn is_dupsort(&self) -> bool {
self.is_dupsort()
}
}
impl TableSet for Tables {
fn tables() -> Box<dyn Iterator<Item = Box<dyn TableInfo>>> {
Box::new(Self::ALL.iter().map(|table| Box::new(*table) as Box<dyn TableInfo>))
}
}
// Need constants to match on in the `FromStr` implementation.
#[allow(non_upper_case_globals)]
mod table_names {
$(
pub(super) const $name: &'static str = stringify!($name);
)*
}
/// Maps a run-time [`Tables`] enum value to its corresponding compile-time [`Table`] type.
///
/// This is a simpler alternative to [`TableViewer`].
///
/// # Examples
///
/// ```
/// use reth_db::{Tables, tables_to_generic};
/// use reth_db_api::table::Table;
///
/// let table = Tables::Headers;
/// let result = tables_to_generic!(table, |GenericTable| <GenericTable as Table>::NAME);
/// assert_eq!(result, table.name());
/// ```
#[macro_export]
macro_rules! tables_to_generic {
($table:expr, |$generic_name:ident| $e:expr) => {
match $table {
$(
Tables::$name => {
use $crate::tables::$name as $generic_name;
$e
},
)*
}
};
}
};
}
tables! {
/// Stores the header hashes belonging to the canonical chain.
table CanonicalHeaders {
type Key = BlockNumber;
type Value = HeaderHash;
}
/// Stores the total difficulty from a block header.
table HeaderTerminalDifficulties {
type Key = BlockNumber;
type Value = CompactU256;
}
/// Stores the block number corresponding to a header.
table HeaderNumbers {
type Key = BlockHash;
type Value = BlockNumber;
}
/// Stores header bodies.
table Headers<H = Header> {
type Key = BlockNumber;
type Value = H;
}
/// Stores block indices that contains indexes of transaction and the count of them.
///
/// More information about stored indices can be found in the [`StoredBlockBodyIndices`] struct.
table BlockBodyIndices {
type Key = BlockNumber;
type Value = StoredBlockBodyIndices;
}
/// Stores the uncles/ommers of the block.
table BlockOmmers<H = Header> {
type Key = BlockNumber;
type Value = StoredBlockOmmers<H>;
}
/// Stores the block withdrawals.
table BlockWithdrawals {
type Key = BlockNumber;
type Value = StoredBlockWithdrawals;
}
/// Canonical only Stores the transaction body for canonical transactions.
table Transactions<T = TransactionSigned> {
type Key = TxNumber;
type Value = T;
}
/// Stores the mapping of the transaction hash to the transaction number.
table TransactionHashNumbers {
type Key = TxHash;
type Value = TxNumber;
}
/// Stores the mapping of transaction number to the blocks number.
///
/// The key is the highest transaction ID in the block.
table TransactionBlocks {
type Key = TxNumber;
type Value = BlockNumber;
}
/// Canonical only Stores transaction receipts.
table Receipts<R = Receipt> {
type Key = TxNumber;
type Value = R;
}
/// Stores all smart contract bytecodes.
/// There will be multiple accounts that have same bytecode
/// So we would need to introduce reference counter.
/// This will be small optimization on state.
table Bytecodes {
type Key = B256;
type Value = Bytecode;
}
/// Stores the current state of an [`Account`].
table PlainAccountState {
type Key = Address;
type Value = Account;
}
/// Stores the current value of a storage key.
table PlainStorageState {
type Key = Address;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores pointers to block changeset with changes for each account key.
///
/// Last shard key of the storage will contain `u64::MAX` `BlockNumber`,
/// this would allows us small optimization on db access when change is in plain state.
///
/// Imagine having shards as:
/// * `Address | 100`
/// * `Address | u64::MAX`
///
/// What we need to find is number that is one greater than N. Db `seek` function allows us to fetch
/// the shard that equal or more than asked. For example:
/// * For N=50 we would get first shard.
/// * for N=150 we would get second shard.
/// * If max block number is 200 and we ask for N=250 we would fetch last shard and
/// know that needed entry is in `AccountPlainState`.
/// * If there were no shard we would get `None` entry or entry of different storage key.
///
/// Code example can be found in `reth_provider::HistoricalStateProviderRef`
table AccountsHistory {
type Key = ShardedKey<Address>;
type Value = BlockNumberList;
}
/// Stores pointers to block number changeset with changes for each storage key.
///
/// Last shard key of the storage will contain `u64::MAX` `BlockNumber`,
/// this would allows us small optimization on db access when change is in plain state.
///
/// Imagine having shards as:
/// * `Address | StorageKey | 100`
/// * `Address | StorageKey | u64::MAX`
///
/// What we need to find is number that is one greater than N. Db `seek` function allows us to fetch
/// the shard that equal or more than asked. For example:
/// * For N=50 we would get first shard.
/// * for N=150 we would get second shard.
/// * If max block number is 200 and we ask for N=250 we would fetch last shard and
/// know that needed entry is in `StoragePlainState`.
/// * If there were no shard we would get `None` entry or entry of different storage key.
///
/// Code example can be found in `reth_provider::HistoricalStateProviderRef`
table StoragesHistory {
type Key = StorageShardedKey;
type Value = BlockNumberList;
}
/// Stores the state of an account before a certain transaction changed it.
/// Change on state can be: account is created, selfdestructed, touched while empty
/// or changed balance,nonce.
table AccountChangeSets {
type Key = BlockNumber;
type Value = AccountBeforeTx;
type SubKey = Address;
}
/// Stores the state of a storage key before a certain transaction changed it.
/// If [`StorageEntry::value`] is zero, this means storage was not existing
/// and needs to be removed.
table StorageChangeSets {
type Key = BlockNumberAddress;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores the current state of an [`Account`] indexed with `keccak256Address`
/// This table is in preparation for merklization and calculation of state root.
/// We are saving whole account data as it is needed for partial update when
/// part of storage is changed. Benefit for merklization is that hashed addresses are sorted.
table HashedAccounts {
type Key = B256;
type Value = Account;
}
/// Stores the current storage values indexed with `keccak256Address` and
/// hash of storage key `keccak256key`.
/// This table is in preparation for merklization and calculation of state root.
/// Benefit for merklization is that hashed addresses/keys are sorted.
table HashedStorages {
type Key = B256;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores the current state's Merkle Patricia Tree.
table AccountsTrie {
type Key = StoredNibbles;
type Value = BranchNodeCompact;
}
/// From HashedAddress => NibblesSubKey => Intermediate value
table StoragesTrie {
type Key = B256;
type Value = StorageTrieEntry;
type SubKey = StoredNibblesSubKey;
}
/// Stores the transaction sender for each canonical transaction.
/// It is needed to speed up execution stage and allows fetching signer without doing
/// transaction signed recovery
table TransactionSenders {
type Key = TxNumber;
type Value = Address;
}
/// Stores the highest synced block number and stage-specific checkpoint of each stage.
table StageCheckpoints {
type Key = StageId;
type Value = StageCheckpoint;
}
/// Stores arbitrary data to keep track of a stage first-sync progress.
table StageCheckpointProgresses {
type Key = StageId;
type Value = Vec<u8>;
}
/// Stores the highest pruned block number and prune mode of each prune segment.
table PruneCheckpoints {
type Key = PruneSegment;
type Value = PruneCheckpoint;
}
/// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds.
table VersionHistory {
type Key = u64;
type Value = ClientVersion;
}
/// Stores generic chain state info, like the last finalized block.
table ChainState {
type Key = ChainStateKey;
type Value = BlockNumber;
}
}
/// Keys for the `ChainState` table.
#[derive(Ord, Clone, Eq, PartialOrd, PartialEq, Debug, Deserialize, Serialize, Hash)]
pub enum ChainStateKey {
/// Last finalized block key
LastFinalizedBlock,
/// Last finalized block key
LastSafeBlockBlock,
}
impl Encode for ChainStateKey {
type Encoded = [u8; 1];
fn encode(self) -> Self::Encoded {
match self {
Self::LastFinalizedBlock => [0],
Self::LastSafeBlockBlock => [1],
}
}
}
impl Decode for ChainStateKey {
fn decode(value: &[u8]) -> Result<Self, reth_db_api::DatabaseError> {
match value {
[0] => Ok(Self::LastFinalizedBlock),
[1] => Ok(Self::LastSafeBlockBlock),
_ => Err(reth_db_api::DatabaseError::Decode),
}
}
}
// Alias types.
/// List with transaction numbers.
pub type BlockNumberList = IntegerList;
/// Encoded stage id.
pub type StageId = String;
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn parse_table_from_str() {
for table in Tables::ALL {
assert_eq!(format!("{table:?}"), table.name());
assert_eq!(table.to_string(), table.name());
assert_eq!(Tables::from_str(table.name()).unwrap(), *table);
}
}
}

View File

@ -1,184 +0,0 @@
use crate::DatabaseError;
use reth_db_api::table::{Compress, Decode, Decompress, DupSort, Encode, Key, Table, Value};
use serde::{Deserialize, Serialize};
/// Tuple with `RawKey<T::Key>` and `RawValue<T::Value>`.
pub type TableRawRow<T> = (RawKey<<T as Table>::Key>, RawValue<<T as Table>::Value>);
/// Raw table that can be used to access any table and its data in raw mode.
/// This is useful for delayed decoding/encoding of data.
#[derive(Default, Copy, Clone, Debug)]
pub struct RawTable<T: Table> {
phantom: std::marker::PhantomData<T>,
}
impl<T: Table> Table for RawTable<T> {
const NAME: &'static str = T::NAME;
const DUPSORT: bool = false;
type Key = RawKey<T::Key>;
type Value = RawValue<T::Value>;
}
/// Raw `DupSort` table that can be used to access any table and its data in raw mode.
/// This is useful for delayed decoding/encoding of data.
#[derive(Default, Copy, Clone, Debug)]
pub struct RawDupSort<T: DupSort> {
phantom: std::marker::PhantomData<T>,
}
impl<T: DupSort> Table for RawDupSort<T> {
const NAME: &'static str = T::NAME;
const DUPSORT: bool = true;
type Key = RawKey<T::Key>;
type Value = RawValue<T::Value>;
}
impl<T: DupSort> DupSort for RawDupSort<T> {
type SubKey = RawKey<T::SubKey>;
}
/// Raw table key.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct RawKey<K: Key> {
/// Inner encoded key
key: Vec<u8>,
_phantom: std::marker::PhantomData<K>,
}
impl<K: Key> RawKey<K> {
/// Create new raw key.
pub fn new(key: K) -> Self {
Self { key: K::encode(key).into(), _phantom: std::marker::PhantomData }
}
/// Creates a raw key from an existing `Vec`. Useful when we already have the encoded
/// key.
pub const fn from_vec(vec: Vec<u8>) -> Self {
Self { key: vec, _phantom: std::marker::PhantomData }
}
/// Returns the decoded value.
pub fn key(&self) -> Result<K, DatabaseError> {
K::decode(&self.key)
}
/// Returns the raw key as seen on the database.
pub const fn raw_key(&self) -> &Vec<u8> {
&self.key
}
/// Consumes [`Self`] and returns the inner raw key.
pub fn into_key(self) -> Vec<u8> {
self.key
}
}
impl<K: Key> From<K> for RawKey<K> {
fn from(key: K) -> Self {
Self::new(key)
}
}
impl AsRef<[u8]> for RawKey<Vec<u8>> {
fn as_ref(&self) -> &[u8] {
&self.key
}
}
// Encode
impl<K: Key> Encode for RawKey<K> {
type Encoded = Vec<u8>;
fn encode(self) -> Self::Encoded {
self.key
}
}
// Decode
impl<K: Key> Decode for RawKey<K> {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self { key: value.to_vec(), _phantom: std::marker::PhantomData })
}
fn decode_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Ok(Self { key: value, _phantom: std::marker::PhantomData })
}
}
/// Raw table value.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Serialize, Ord, Hash)]
pub struct RawValue<V: Value> {
/// Inner compressed value
value: Vec<u8>,
#[serde(skip)]
_phantom: std::marker::PhantomData<V>,
}
impl<V: Value> RawValue<V> {
/// Create new raw value.
pub fn new(value: V) -> Self {
Self { value: V::compress(value).into(), _phantom: std::marker::PhantomData }
}
/// Creates a raw value from an existing `Vec`. Useful when we already have the encoded
/// value.
pub const fn from_vec(vec: Vec<u8>) -> Self {
Self { value: vec, _phantom: std::marker::PhantomData }
}
/// Returns the decompressed value.
pub fn value(&self) -> Result<V, DatabaseError> {
V::decompress(&self.value)
}
/// Returns the raw value as seen on the database.
pub fn raw_value(&self) -> &[u8] {
&self.value
}
/// Consumes [`Self`] and returns the inner raw value.
pub fn into_value(self) -> Vec<u8> {
self.value
}
}
impl<V: Value> From<V> for RawValue<V> {
fn from(value: V) -> Self {
Self::new(value)
}
}
impl AsRef<[u8]> for RawValue<Vec<u8>> {
fn as_ref(&self) -> &[u8] {
&self.value
}
}
impl<V: Value> Compress for RawValue<V> {
type Compressed = Vec<u8>;
fn uncompressable_ref(&self) -> Option<&[u8]> {
// Already compressed
Some(&self.value)
}
fn compress(self) -> Self::Compressed {
self.value
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
buf.put_slice(self.value.as_slice())
}
}
impl<V: Value> Decompress for RawValue<V> {
fn decompress(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self { value: value.to_vec(), _phantom: std::marker::PhantomData })
}
fn decompress_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Ok(Self { value, _phantom: std::marker::PhantomData })
}
}