wip: lru changes (#7484)

Co-authored-by: Emilia Hane <elsaemiliaevahane@gmail.com>
This commit is contained in:
Waylon Jepsen
2024-05-16 19:31:47 -06:00
committed by GitHub
parent 0edf3509a9
commit 31b6bdd13c
11 changed files with 96 additions and 110 deletions

View File

@ -21,11 +21,11 @@ reth-provider.workspace = true
reth-stages-api.workspace = true
reth-trie = { workspace = true, features = ["metrics"] }
reth-trie-parallel = { workspace = true, features = ["parallel"] }
reth-network = { workspace = true }
reth-consensus.workspace = true
# common
parking_lot.workspace = true
lru = "0.12"
tracing.workspace = true
tokio = { workspace = true, features = ["macros", "sync"] }

View File

@ -1,10 +1,7 @@
use crate::metrics::BlockBufferMetrics;
use lru::LruCache;
use reth_network::cache::LruCache;
use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders};
use std::{
collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet},
num::NonZeroUsize,
};
use std::collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet};
/// Contains the tree of pending blocks that cannot be executed due to missing parent.
/// It allows to store unconnected blocks for potential future inclusion.
@ -32,19 +29,19 @@ pub struct BlockBuffer {
/// first in line for evicting if `max_blocks` limit is hit.
///
/// Used as counter of amount of blocks inside buffer.
pub(crate) lru: LruCache<BlockHash, ()>,
pub(crate) lru: LruCache<BlockHash>,
/// Various metrics for the block buffer.
pub(crate) metrics: BlockBufferMetrics,
}
impl BlockBuffer {
/// Create new buffer with max limit of blocks
pub fn new(limit: usize) -> Self {
pub fn new(limit: u32) -> Self {
Self {
blocks: Default::default(),
parent_to_child: Default::default(),
earliest_blocks: Default::default(),
lru: LruCache::new(NonZeroUsize::new(limit).unwrap()),
lru: LruCache::new(limit),
metrics: Default::default(),
}
}
@ -76,7 +73,7 @@ impl BlockBuffer {
self.earliest_blocks.entry(block.number).or_default().insert(hash);
self.blocks.insert(hash, block);
if let Some((evicted_hash, _)) = self.lru.push(hash, ()).filter(|(b, _)| *b != hash) {
if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) {
// evict the block if limit is hit
if let Some(evicted_block) = self.remove_block(&evicted_hash) {
// evict the block if limit is hit
@ -85,7 +82,6 @@ impl BlockBuffer {
}
self.metrics.blocks.set(self.blocks.len() as f64);
}
/// Removes the given block from the buffer and also all the children of the block.
///
/// This is used to get all the blocks that are dependent on the block that is included.
@ -157,7 +153,7 @@ impl BlockBuffer {
let block = self.blocks.remove(hash)?;
self.remove_from_earliest_blocks(block.number, hash);
self.remove_from_parent(block.parent_hash, hash);
self.lru.pop(hash);
self.lru.remove(hash);
Some(block)
}

View File

@ -10,7 +10,7 @@ pub struct BlockchainTreeConfig {
/// The number of blocks that can be re-orged (finalization windows)
max_reorg_depth: u64,
/// The number of unconnected blocks that we are buffering
max_unconnected_blocks: usize,
max_unconnected_blocks: u32,
/// Number of additional block hashes to save in blockchain tree. For `BLOCKHASH` EVM opcode we
/// need last 256 block hashes.
///
@ -43,7 +43,7 @@ impl BlockchainTreeConfig {
max_reorg_depth: u64,
max_blocks_in_chain: u64,
num_of_additional_canonical_block_hashes: u64,
max_unconnected_blocks: usize,
max_unconnected_blocks: u32,
) -> Self {
if max_reorg_depth > max_blocks_in_chain {
panic!("Side chain size should be more than finalization window");
@ -84,7 +84,7 @@ impl BlockchainTreeConfig {
}
/// Return max number of unconnected blocks that we are buffering
pub fn max_unconnected_blocks(&self) -> usize {
pub fn max_unconnected_blocks(&self) -> u32 {
self.max_unconnected_blocks
}
}

View File

@ -26,7 +26,7 @@ impl TreeState {
pub(crate) fn new(
last_finalized_block_number: BlockNumber,
last_canonical_hashes: impl IntoIterator<Item = (BlockNumber, BlockHash)>,
buffer_limit: usize,
buffer_limit: u32,
) -> Self {
Self {
block_chain_id_generator: 0,