mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 19:09:54 +00:00
feat(pruner): shared deletion limit (#4880)
This commit is contained in:
@ -7,7 +7,7 @@ use crate::{
|
||||
header::Head,
|
||||
proofs::genesis_state_root,
|
||||
Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header,
|
||||
PruneBatchSizes, SealedHeader, B256, EMPTY_OMMER_ROOT, U256,
|
||||
SealedHeader, B256, EMPTY_OMMER_ROOT, U256,
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
use revm_primitives::{address, b256};
|
||||
@ -64,7 +64,7 @@ pub static MAINNET: Lazy<Arc<ChainSpec>> = Lazy::new(|| {
|
||||
b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
|
||||
)),
|
||||
base_fee_params: BaseFeeParams::ethereum(),
|
||||
prune_batch_sizes: PruneBatchSizes::mainnet(),
|
||||
prune_delete_limit: 3500,
|
||||
snapshot_block_interval: 500_000,
|
||||
}
|
||||
.into()
|
||||
@ -107,7 +107,7 @@ pub static GOERLI: Lazy<Arc<ChainSpec>> = Lazy::new(|| {
|
||||
b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
|
||||
)),
|
||||
base_fee_params: BaseFeeParams::ethereum(),
|
||||
prune_batch_sizes: PruneBatchSizes::testnet(),
|
||||
prune_delete_limit: 1700,
|
||||
snapshot_block_interval: 1_000_000,
|
||||
}
|
||||
.into()
|
||||
@ -154,7 +154,7 @@ pub static SEPOLIA: Lazy<Arc<ChainSpec>> = Lazy::new(|| {
|
||||
b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
|
||||
)),
|
||||
base_fee_params: BaseFeeParams::ethereum(),
|
||||
prune_batch_sizes: PruneBatchSizes::testnet(),
|
||||
prune_delete_limit: 1700,
|
||||
snapshot_block_interval: 1_000_000,
|
||||
}
|
||||
.into()
|
||||
@ -196,7 +196,7 @@ pub static HOLESKY: Lazy<Arc<ChainSpec>> = Lazy::new(|| {
|
||||
b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
|
||||
)),
|
||||
base_fee_params: BaseFeeParams::ethereum(),
|
||||
prune_batch_sizes: PruneBatchSizes::testnet(),
|
||||
prune_delete_limit: 1700,
|
||||
snapshot_block_interval: 1_000_000,
|
||||
}
|
||||
.into()
|
||||
@ -302,11 +302,11 @@ pub struct ChainSpec {
|
||||
/// The parameters that configure how a block's base fee is computed
|
||||
pub base_fee_params: BaseFeeParams,
|
||||
|
||||
/// The batch sizes for pruner, per block. In the actual pruner run it will be multiplied by
|
||||
/// The delete limit for pruner, per block. In the actual pruner run it will be multiplied by
|
||||
/// the amount of blocks between pruner runs to account for the difference in amount of new
|
||||
/// data coming in.
|
||||
#[serde(default)]
|
||||
pub prune_batch_sizes: PruneBatchSizes,
|
||||
pub prune_delete_limit: usize,
|
||||
|
||||
/// The block interval for creating snapshots. Each snapshot will have that much blocks in it.
|
||||
pub snapshot_block_interval: u64,
|
||||
@ -323,7 +323,7 @@ impl Default for ChainSpec {
|
||||
hardforks: Default::default(),
|
||||
deposit_contract: Default::default(),
|
||||
base_fee_params: BaseFeeParams::ethereum(),
|
||||
prune_batch_sizes: Default::default(),
|
||||
prune_delete_limit: MAINNET.prune_delete_limit,
|
||||
snapshot_block_interval: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ pub use net::{
|
||||
};
|
||||
pub use peer::{PeerId, WithPeerId};
|
||||
pub use prune::{
|
||||
PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError,
|
||||
PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, PruneProgress,
|
||||
ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE,
|
||||
};
|
||||
pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts};
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
use paste::paste;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Batch sizes for configuring the pruner.
|
||||
/// The batch size for each prune part should be both large enough to prune the data which was
|
||||
/// generated with each new block, and small enough to not generate an excessive load on the
|
||||
/// database due to deletion of too many rows at once.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct PruneBatchSizes {
|
||||
/// Maximum number of receipts to prune, per block.
|
||||
receipts: usize,
|
||||
/// Maximum number of transaction lookup entries to prune, per block.
|
||||
transaction_lookup: usize,
|
||||
/// Maximum number of transaction senders to prune, per block.
|
||||
transaction_senders: usize,
|
||||
/// Maximum number of account history entries to prune, per block.
|
||||
/// Measured in the number of `AccountChangeSet` table rows.
|
||||
account_history: usize,
|
||||
/// Maximum number of storage history entries to prune, per block.
|
||||
/// Measured in the number of `StorageChangeSet` table rows.
|
||||
storage_history: usize,
|
||||
}
|
||||
|
||||
macro_rules! impl_prune_batch_size_methods {
|
||||
($(($human_name:expr, $name:ident)),+) => {
|
||||
paste! {
|
||||
impl PruneBatchSizes {
|
||||
$(
|
||||
#[doc = concat!("Maximum number of ", $human_name, " to prune, accounting for the block interval.")]
|
||||
pub fn $name(&self, block_interval: usize) -> usize {
|
||||
self.$name * block_interval
|
||||
}
|
||||
|
||||
#[doc = concat!("Set the maximum number of ", $human_name, " to prune per block.")]
|
||||
pub fn [<with_ $name>](mut self, batch_size: usize) -> Self {
|
||||
self.$name = batch_size;
|
||||
self
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_prune_batch_size_methods!(
|
||||
("receipts", receipts),
|
||||
("transaction lookup entries", transaction_lookup),
|
||||
("transaction senders", transaction_senders),
|
||||
("account history entries", account_history),
|
||||
("storage history entries", storage_history)
|
||||
);
|
||||
|
||||
impl PruneBatchSizes {
|
||||
/// Default prune batch sizes for Ethereum mainnet.
|
||||
/// These settings are sufficient to prune more data than generated with each new block.
|
||||
pub const fn mainnet() -> Self {
|
||||
Self {
|
||||
receipts: 250,
|
||||
transaction_lookup: 250,
|
||||
transaction_senders: 1000,
|
||||
account_history: 1000,
|
||||
storage_history: 1000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Default prune batch sizes for Ethereum testnets.
|
||||
/// These settings are sufficient to prune more data than generated with each new block.
|
||||
pub const fn testnet() -> Self {
|
||||
Self {
|
||||
receipts: 100,
|
||||
transaction_lookup: 100,
|
||||
transaction_senders: 500,
|
||||
account_history: 500,
|
||||
storage_history: 500,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PruneBatchSizes {
|
||||
fn default() -> Self {
|
||||
Self::mainnet()
|
||||
}
|
||||
}
|
||||
@ -1,11 +1,9 @@
|
||||
mod batch_sizes;
|
||||
mod checkpoint;
|
||||
mod mode;
|
||||
mod part;
|
||||
mod target;
|
||||
|
||||
use crate::{Address, BlockNumber};
|
||||
pub use batch_sizes::PruneBatchSizes;
|
||||
pub use checkpoint::PruneCheckpoint;
|
||||
pub use mode::PruneMode;
|
||||
pub use part::{PrunePart, PrunePartError};
|
||||
@ -88,3 +86,31 @@ impl ReceiptsLogPruneConfig {
|
||||
Ok(lowest.map(|lowest| lowest.max(pruned_block)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Progress of pruning.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum PruneProgress {
|
||||
/// There is more data to prune.
|
||||
HasMoreData,
|
||||
/// Pruning has been finished.
|
||||
Finished,
|
||||
}
|
||||
|
||||
impl PruneProgress {
|
||||
/// Creates new [PruneProgress] from `done` boolean value.
|
||||
///
|
||||
/// If `done == true`, returns [PruneProgress::Finished], otherwise [PruneProgress::HasMoreData]
|
||||
/// is returned.
|
||||
pub fn from_done(done: bool) -> Self {
|
||||
if done {
|
||||
Self::Finished
|
||||
} else {
|
||||
Self::HasMoreData
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if pruning has been finished.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
matches!(self, Self::Finished)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user