chore!: rm legacy blockchain tree crate (#13726)

This commit is contained in:
Matthias Seitz
2025-01-08 13:51:51 +01:00
committed by GitHub
parent 2f94aeebed
commit 052a730e3c
17 changed files with 0 additions and 5511 deletions

38
Cargo.lock generated
View File

@ -6517,44 +6517,6 @@ dependencies = [
"tracing",
]
[[package]]
name = "reth-blockchain-tree"
version = "1.1.5"
dependencies = [
"alloy-consensus",
"alloy-eips",
"alloy-genesis",
"alloy-primitives",
"aquamarine",
"assert_matches",
"linked_hash_set",
"metrics",
"parking_lot",
"reth-blockchain-tree-api",
"reth-chainspec",
"reth-consensus",
"reth-db",
"reth-db-api",
"reth-evm",
"reth-evm-ethereum",
"reth-execution-errors",
"reth-execution-types",
"reth-metrics",
"reth-network",
"reth-node-types",
"reth-primitives",
"reth-provider",
"reth-revm",
"reth-stages-api",
"reth-storage-errors",
"reth-testing-utils",
"reth-trie",
"reth-trie-db",
"reth-trie-parallel",
"tokio",
"tracing",
]
[[package]]
name = "reth-blockchain-tree-api"
version = "1.1.5"

View File

@ -12,7 +12,6 @@ members = [
"bin/reth-bench/",
"bin/reth/",
"crates/blockchain-tree-api/",
"crates/blockchain-tree/",
"crates/chain-state/",
"crates/chainspec/",
"crates/cli/cli/",
@ -304,7 +303,6 @@ op-reth = { path = "crates/optimism/bin" }
reth = { path = "bin/reth" }
reth-basic-payload-builder = { path = "crates/payload/basic" }
reth-bench = { path = "bin/reth-bench" }
reth-blockchain-tree = { path = "crates/blockchain-tree" }
reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" }
reth-chain-state = { path = "crates/chain-state" }
reth-chainspec = { path = "crates/chainspec", default-features = false }

View File

@ -1,89 +0,0 @@
[package]
name = "reth-blockchain-tree"
version.workspace = true
edition.workspace = true
rust-version.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[lints]
workspace = true
[dependencies]
# reth
reth-blockchain-tree-api.workspace = true
reth-primitives.workspace = true
reth-storage-errors.workspace = true
reth-execution-errors.workspace = true
reth-db.workspace = true
reth-db-api.workspace = true
reth-evm.workspace = true
reth-revm.workspace = true
reth-provider.workspace = true
reth-execution-types.workspace = true
reth-stages-api.workspace = true
reth-trie = { workspace = true, features = ["metrics"] }
reth-trie-db = { workspace = true, features = ["metrics"] }
reth-trie-parallel.workspace = true
reth-network.workspace = true
reth-consensus.workspace = true
reth-node-types.workspace = true
# ethereum
alloy-consensus.workspace = true
alloy-primitives.workspace = true
alloy-eips.workspace = true
# common
parking_lot.workspace = true
tracing.workspace = true
tokio = { workspace = true, features = ["macros", "sync"] }
# metrics
reth-metrics = { workspace = true, features = ["common"] }
metrics.workspace = true
# misc
aquamarine.workspace = true
linked_hash_set.workspace = true
[dev-dependencies]
reth-chainspec.workspace = true
reth-db = { workspace = true, features = ["test-utils"] }
reth-primitives = { workspace = true, features = ["test-utils"] }
reth-provider = { workspace = true, features = ["test-utils"] }
reth-evm = { workspace = true, features = ["test-utils"] }
reth-consensus = { workspace = true, features = ["test-utils"] }
reth-testing-utils.workspace = true
reth-revm.workspace = true
reth-evm-ethereum.workspace = true
reth-execution-types.workspace = true
parking_lot.workspace = true
assert_matches.workspace = true
alloy-genesis.workspace = true
alloy-consensus.workspace = true
[features]
test-utils = [
"reth-chainspec/test-utils",
"reth-consensus/test-utils",
"reth-evm/test-utils",
"reth-network/test-utils",
"reth-primitives/test-utils",
"reth-revm/test-utils",
"reth-stages-api/test-utils",
"reth-db/test-utils",
"reth-db-api/test-utils",
"reth-provider/test-utils",
"reth-trie-db/test-utils",
"reth-trie/test-utils",
"reth-trie-parallel/test-utils"
]
optimism = [
"reth-primitives/optimism",
"reth-provider/optimism",
"reth-execution-types/optimism",
"reth-db/optimism",
"reth-db-api/optimism"
]

View File

@ -1,21 +0,0 @@
flowchart BT
subgraph canonical chain
CanonState:::state
block0canon:::canon -->block1canon:::canon -->block2canon:::canon -->block3canon:::canon -->
block4canon:::canon --> block5canon:::canon
end
block5canon --> block6pending1:::pending
block5canon --> block6pending2:::pending
subgraph sidechain2
S2State:::state
block3canon --> block4s2:::sidechain --> block5s2:::sidechain
end
subgraph sidechain1
S1State:::state
block2canon --> block3s1:::sidechain --> block4s1:::sidechain --> block5s1:::sidechain -->
block6s1:::sidechain
end
classDef state fill:#1882C4
classDef canon fill:#8AC926
classDef pending fill:#FFCA3A
classDef sidechain fill:#FF595E

View File

@ -1,494 +0,0 @@
use crate::metrics::BlockBufferMetrics;
use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockHash, BlockNumber};
use reth_network::cache::LruCache;
use reth_node_types::Block;
use reth_primitives::SealedBlockWithSenders;
use std::collections::{BTreeMap, HashMap, HashSet};
/// Contains the tree of pending blocks that cannot be executed due to missing parent.
/// It allows to store unconnected blocks for potential future inclusion.
///
/// The buffer has three main functionalities:
/// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer.
/// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received
/// and inserted.
/// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number.
///
/// Note: Buffer is limited by number of blocks that it can contain and eviction of the block
/// is done by last recently used block.
#[derive(Debug)]
pub struct BlockBuffer<B: Block = reth_primitives::Block> {
/// All blocks in the buffer stored by their block hash.
pub(crate) blocks: HashMap<BlockHash, SealedBlockWithSenders<B>>,
/// Map of any parent block hash (even the ones not currently in the buffer)
/// to the buffered children.
/// Allows connecting buffered blocks by parent.
pub(crate) parent_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
/// `BTreeMap` tracking the earliest blocks by block number.
/// Used for removal of old blocks that precede finalization.
pub(crate) earliest_blocks: BTreeMap<BlockNumber, HashSet<BlockHash>>,
/// LRU used for tracing oldest inserted blocks that are going to be
/// first in line for evicting if `max_blocks` limit is hit.
///
/// Used as counter of amount of blocks inside buffer.
pub(crate) lru: LruCache<BlockHash>,
/// Various metrics for the block buffer.
pub(crate) metrics: BlockBufferMetrics,
}
impl<B: Block> BlockBuffer<B> {
/// Create new buffer with max limit of blocks
pub fn new(limit: u32) -> Self {
Self {
blocks: Default::default(),
parent_to_child: Default::default(),
earliest_blocks: Default::default(),
lru: LruCache::new(limit),
metrics: Default::default(),
}
}
/// Return reference to buffered blocks
pub const fn blocks(&self) -> &HashMap<BlockHash, SealedBlockWithSenders<B>> {
&self.blocks
}
/// Return reference to the requested block.
pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders<B>> {
self.blocks.get(hash)
}
/// Return a reference to the lowest ancestor of the given block in the buffer.
pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders<B>> {
let mut current_block = self.blocks.get(hash)?;
while let Some(parent) = self.blocks.get(&current_block.parent_hash()) {
current_block = parent;
}
Some(current_block)
}
/// Insert a correct block inside the buffer.
pub fn insert_block(&mut self, block: SealedBlockWithSenders<B>) {
let hash = block.hash();
self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash);
self.earliest_blocks.entry(block.number()).or_default().insert(hash);
self.blocks.insert(hash, block);
if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) {
// evict the block if limit is hit
if let Some(evicted_block) = self.remove_block(&evicted_hash) {
// evict the block if limit is hit
self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash);
}
}
self.metrics.blocks.set(self.blocks.len() as f64);
}
/// Removes the given block from the buffer and also all the children of the block.
///
/// This is used to get all the blocks that are dependent on the block that is included.
///
/// Note: that order of returned blocks is important and the blocks with lower block number
/// in the chain will come first so that they can be executed in the correct order.
pub fn remove_block_with_children(
&mut self,
parent_hash: &BlockHash,
) -> Vec<SealedBlockWithSenders<B>> {
let removed = self
.remove_block(parent_hash)
.into_iter()
.chain(self.remove_children(vec![*parent_hash]))
.collect();
self.metrics.blocks.set(self.blocks.len() as f64);
removed
}
/// Discard all blocks that precede block number from the buffer.
pub fn remove_old_blocks(&mut self, block_number: BlockNumber) {
let mut block_hashes_to_remove = Vec::new();
// discard all blocks that are before the finalized number.
while let Some(entry) = self.earliest_blocks.first_entry() {
if *entry.key() > block_number {
break
}
let block_hashes = entry.remove();
block_hashes_to_remove.extend(block_hashes);
}
// remove from other collections.
for block_hash in &block_hashes_to_remove {
// It's fine to call
self.remove_block(block_hash);
}
self.remove_children(block_hashes_to_remove);
self.metrics.blocks.set(self.blocks.len() as f64);
}
/// Remove block entry
fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) {
if let Some(entry) = self.earliest_blocks.get_mut(&number) {
entry.remove(hash);
if entry.is_empty() {
self.earliest_blocks.remove(&number);
}
}
}
/// Remove from parent child connection. This method does not remove children.
fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) {
// remove from parent to child connection, but only for this block parent.
if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) {
entry.remove(hash);
// if set is empty remove block entry.
if entry.is_empty() {
self.parent_to_child.remove(&parent_hash);
}
}
}
/// Removes block from inner collections.
/// This method will only remove the block if it's present inside `self.blocks`.
/// The block might be missing from other collections, the method will only ensure that it has
/// been removed.
fn remove_block(&mut self, hash: &BlockHash) -> Option<SealedBlockWithSenders<B>> {
let block = self.blocks.remove(hash)?;
self.remove_from_earliest_blocks(block.number(), hash);
self.remove_from_parent(block.parent_hash(), hash);
self.lru.remove(hash);
Some(block)
}
/// Remove all children and their descendants for the given blocks and return them.
fn remove_children(&mut self, parent_hashes: Vec<BlockHash>) -> Vec<SealedBlockWithSenders<B>> {
// remove all parent child connection and all the child children blocks that are connected
// to the discarded parent blocks.
let mut remove_parent_children = parent_hashes;
let mut removed_blocks = Vec::new();
while let Some(parent_hash) = remove_parent_children.pop() {
// get this child blocks children and add them to the remove list.
if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) {
// remove child from buffer
for child_hash in &parent_children {
if let Some(block) = self.remove_block(child_hash) {
removed_blocks.push(block);
}
}
remove_parent_children.extend(parent_children);
}
}
removed_blocks
}
}
#[cfg(test)]
mod tests {
use crate::BlockBuffer;
use alloy_eips::BlockNumHash;
use alloy_primitives::BlockHash;
use reth_primitives::SealedBlockWithSenders;
use reth_testing_utils::generators::{self, random_block, BlockParams, Rng};
use std::collections::HashMap;
/// Create random block with specified number and parent hash.
fn create_block<R: Rng>(rng: &mut R, number: u64, parent: BlockHash) -> SealedBlockWithSenders {
let block =
random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() });
block.seal_with_senders().unwrap()
}
/// Assert that all buffer collections have the same data length.
fn assert_buffer_lengths(buffer: &BlockBuffer, expected: usize) {
assert_eq!(buffer.blocks.len(), expected);
assert_eq!(buffer.lru.len(), expected);
assert_eq!(
buffer.parent_to_child.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()),
expected
);
assert_eq!(
buffer.earliest_blocks.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()),
expected
);
}
/// Assert that the block was removed from all buffer collections.
fn assert_block_removal(buffer: &BlockBuffer, block: &SealedBlockWithSenders) {
assert!(!buffer.blocks.contains_key(&block.hash()));
assert!(buffer
.parent_to_child
.get(&block.parent_hash)
.and_then(|p| p.get(&block.hash()))
.is_none());
assert!(buffer
.earliest_blocks
.get(&block.number)
.and_then(|hashes| hashes.get(&block.hash()))
.is_none());
}
#[test]
fn simple_insertion() {
let mut rng = generators::rng();
let parent = rng.gen();
let block1 = create_block(&mut rng, 10, parent);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
assert_buffer_lengths(&buffer, 1);
assert_eq!(buffer.block(&block1.hash()), Some(&block1));
}
#[test]
fn take_entire_chain_of_children() {
let mut rng = generators::rng();
let main_parent_hash = rng.gen();
let block1 = create_block(&mut rng, 10, main_parent_hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(buffer.block(&block4.hash()), Some(&block4));
assert_eq!(buffer.block(&block2.hash()), Some(&block2));
assert_eq!(buffer.block(&main_parent_hash), None);
assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4));
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
assert_eq!(
buffer.remove_block_with_children(&main_parent_hash),
vec![block1, block2, block3]
);
assert_buffer_lengths(&buffer, 1);
}
#[test]
fn take_all_multi_level_children() {
let mut rng = generators::rng();
let main_parent_hash = rng.gen();
let block1 = create_block(&mut rng, 10, main_parent_hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(
buffer
.remove_block_with_children(&main_parent_hash)
.into_iter()
.map(|b| (b.hash(), b))
.collect::<HashMap<_, _>>(),
HashMap::from([
(block1.hash(), block1),
(block2.hash(), block2),
(block3.hash(), block3),
(block4.hash(), block4)
])
);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn take_block_with_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(
buffer
.remove_block_with_children(&block1.hash())
.into_iter()
.map(|b| (b.hash(), b))
.collect::<HashMap<_, _>>(),
HashMap::from([
(block1.hash(), block1),
(block2.hash(), block2),
(block3.hash(), block3),
(block4.hash(), block4)
])
);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn remove_chain_of_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
assert_buffer_lengths(&buffer, 4);
buffer.remove_old_blocks(block1.number);
assert_buffer_lengths(&buffer, 1);
}
#[test]
fn remove_all_multi_level_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
assert_buffer_lengths(&buffer, 4);
buffer.remove_old_blocks(block1.number);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn remove_multi_chains() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block1a = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block2a = create_block(&mut rng, 11, block1.hash());
let random_parent1 = rng.gen();
let random_block1 = create_block(&mut rng, 10, random_parent1);
let random_parent2 = rng.gen();
let random_block2 = create_block(&mut rng, 11, random_parent2);
let random_parent3 = rng.gen();
let random_block3 = create_block(&mut rng, 12, random_parent3);
let mut buffer = BlockBuffer::new(10);
buffer.insert_block(block1.clone());
buffer.insert_block(block1a.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block2a.clone());
buffer.insert_block(random_block1.clone());
buffer.insert_block(random_block2.clone());
buffer.insert_block(random_block3.clone());
// check that random blocks are their own ancestor, and that chains have proper ancestors
assert_eq!(buffer.lowest_ancestor(&random_block1.hash()), Some(&random_block1));
assert_eq!(buffer.lowest_ancestor(&random_block2.hash()), Some(&random_block2));
assert_eq!(buffer.lowest_ancestor(&random_block3.hash()), Some(&random_block3));
// descendants have ancestors
assert_eq!(buffer.lowest_ancestor(&block2a.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1));
// roots are themselves
assert_eq!(buffer.lowest_ancestor(&block1a.hash()), Some(&block1a));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
assert_buffer_lengths(&buffer, 7);
buffer.remove_old_blocks(10);
assert_buffer_lengths(&buffer, 2);
}
#[test]
fn evict_with_gap() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
// pre-eviction block1 is the root
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
buffer.insert_block(block4.clone());
assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4));
// block1 gets evicted
assert_block_removal(&buffer, &block1);
// check lowest ancestor results post eviction
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block2));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block2));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), None);
assert_buffer_lengths(&buffer, 3);
}
#[test]
fn simple_eviction() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
// block3 gets evicted
assert_block_removal(&buffer, &block1);
assert_buffer_lengths(&buffer, 3);
}
}

View File

@ -1,620 +0,0 @@
//! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`]
use super::state::SidechainId;
use crate::canonical_chain::CanonicalChain;
use alloy_eips::BlockNumHash;
use alloy_primitives::{BlockHash, BlockNumber};
use linked_hash_set::LinkedHashSet;
use reth_execution_types::Chain;
use reth_primitives::SealedBlockWithSenders;
use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet};
/// Internal indices of the blocks and chains.
///
/// This is main connection between blocks, chains and canonical chain.
///
/// It contains a list of canonical block hashes, forks to child blocks, and a mapping of block hash
/// to chain ID.
#[derive(Debug, Clone)]
pub struct BlockIndices {
/// Last finalized block.
last_finalized_block: BlockNumber,
/// Non-finalized canonical chain. Contains N number (depends on `finalization_depth`) of
/// blocks. These blocks are found in `fork_to_child` but not inside `blocks_to_chain` or
/// `number_to_block` as those are sidechain specific indices.
canonical_chain: CanonicalChain,
/// Index needed when discarding the chain, so we can remove connected chains from tree.
///
/// This maintains insertion order for all child blocks, so
/// [`BlockIndices::pending_block_num_hash`] returns always the same block: the first child
/// block we inserted.
///
/// NOTE: It contains just blocks that are forks as a key and not all blocks.
fork_to_child: HashMap<BlockHash, LinkedHashSet<BlockHash>>,
/// Utility index for Block number to block hash(s).
///
/// This maps all blocks with same block number to their hash.
///
/// Can be used for RPC fetch block(s) in chain by its number.
///
/// Note: This is a bijection: at all times `blocks_to_chain` and this map contain the block
/// hashes.
block_number_to_block_hashes: BTreeMap<BlockNumber, HashSet<BlockHash>>,
/// Block hashes to the sidechain IDs they belong to.
blocks_to_chain: HashMap<BlockHash, SidechainId>,
}
impl BlockIndices {
/// Create new block indices structure
pub fn new(
last_finalized_block: BlockNumber,
canonical_chain: BTreeMap<BlockNumber, BlockHash>,
) -> Self {
Self {
last_finalized_block,
canonical_chain: CanonicalChain::new(canonical_chain),
fork_to_child: Default::default(),
blocks_to_chain: Default::default(),
block_number_to_block_hashes: Default::default(),
}
}
/// Return fork to child indices
pub const fn fork_to_child(&self) -> &HashMap<BlockHash, LinkedHashSet<BlockHash>> {
&self.fork_to_child
}
/// Return block to sidechain id
#[allow(dead_code)]
pub(crate) const fn blocks_to_chain(&self) -> &HashMap<BlockHash, SidechainId> {
&self.blocks_to_chain
}
/// Returns the hash and number of the pending block.
///
/// It is possible that multiple child blocks for the canonical tip exist.
/// This will always return the _first_ child we recorded for the canonical tip.
pub(crate) fn pending_block_num_hash(&self) -> Option<BlockNumHash> {
let canonical_tip = self.canonical_tip();
let hash = self.fork_to_child.get(&canonical_tip.hash)?.front().copied()?;
Some(BlockNumHash { number: canonical_tip.number + 1, hash })
}
/// Returns all pending block hashes.
///
/// Pending blocks are considered blocks that are extending the canonical tip by one block
/// number and have their parent hash set to the canonical tip.
pub fn pending_blocks(&self) -> (BlockNumber, Vec<BlockHash>) {
let canonical_tip = self.canonical_tip();
let pending_blocks = self
.fork_to_child
.get(&canonical_tip.hash)
.cloned()
.unwrap_or_default()
.into_iter()
.collect();
(canonical_tip.number + 1, pending_blocks)
}
/// Last finalized block
pub const fn last_finalized_block(&self) -> BlockNumber {
self.last_finalized_block
}
/// Insert non fork block.
pub(crate) fn insert_non_fork_block(
&mut self,
block_number: BlockNumber,
block_hash: BlockHash,
chain_id: SidechainId,
) {
self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash);
self.blocks_to_chain.insert(block_hash, chain_id);
}
/// Insert block to chain and fork child indices of the new chain
pub(crate) fn insert_chain(&mut self, chain_id: SidechainId, chain: &Chain) {
for (number, block) in chain.blocks() {
// add block -> chain_id index
self.blocks_to_chain.insert(block.hash(), chain_id);
// add number -> block
self.block_number_to_block_hashes.entry(*number).or_default().insert(block.hash());
}
let first = chain.first();
// add parent block -> block index
self.fork_to_child.entry(first.parent_hash).or_default().insert_if_absent(first.hash());
}
/// Get the [`SidechainId`] for the given block hash if it exists.
pub(crate) fn get_side_chain_id(&self, block: &BlockHash) -> Option<SidechainId> {
self.blocks_to_chain.get(block).copied()
}
/// Update all block hashes. iterate over present and new list of canonical hashes and compare
/// them. Remove all mismatches, disconnect them and return all chains that needs to be
/// removed.
pub(crate) fn update_block_hashes(
&mut self,
hashes: BTreeMap<u64, BlockHash>,
) -> (BTreeSet<SidechainId>, Vec<BlockNumHash>) {
// set new canonical hashes.
self.canonical_chain.replace(hashes.clone());
let mut new_hashes = hashes.into_iter();
let mut old_hashes = self.canonical_chain().clone().into_iter();
let mut removed = Vec::new();
let mut added = Vec::new();
let mut new_hash = new_hashes.next();
let mut old_hash = old_hashes.next();
loop {
let Some(old_block_value) = old_hash else {
// end of old_hashes canonical chain. New chain has more blocks than old chain.
while let Some(new) = new_hash {
// add new blocks to added list.
added.push(new.into());
new_hash = new_hashes.next();
}
break
};
let Some(new_block_value) = new_hash else {
// Old canonical chain had more block than new chain.
// remove all present block.
// this is mostly not going to happen as reorg should make new chain in Tree.
while let Some(rem) = old_hash {
removed.push(rem);
old_hash = old_hashes.next();
}
break
};
// compare old and new canonical block number
match new_block_value.0.cmp(&old_block_value.0) {
std::cmp::Ordering::Less => {
// new chain has more past blocks than old chain
added.push(new_block_value.into());
new_hash = new_hashes.next();
}
std::cmp::Ordering::Equal => {
if new_block_value.1 != old_block_value.1 {
// remove block hash as it is different
removed.push(old_block_value);
added.push(new_block_value.into())
}
new_hash = new_hashes.next();
old_hash = old_hashes.next();
}
std::cmp::Ordering::Greater => {
// old chain has more past blocks than new chain
removed.push(old_block_value);
old_hash = old_hashes.next()
}
}
}
// remove children of removed blocks
(
removed.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| {
fold.extend(self.remove_block(number, hash));
fold
}),
added,
)
}
/// Remove chain from indices and return dependent chains that need to be removed.
/// Does the cleaning of the tree and removing blocks from the chain.
pub(crate) fn remove_chain(&mut self, chain: &Chain) -> BTreeSet<SidechainId> {
chain
.blocks()
.iter()
.flat_map(|(block_number, block)| {
let block_hash = block.hash();
self.remove_block(*block_number, block_hash)
})
.collect()
}
/// Remove Blocks from indices.
fn remove_block(
&mut self,
block_number: BlockNumber,
block_hash: BlockHash,
) -> BTreeSet<SidechainId> {
// rm number -> block
if let btree_map::Entry::Occupied(mut entry) =
self.block_number_to_block_hashes.entry(block_number)
{
let set = entry.get_mut();
set.remove(&block_hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
// rm block -> chain_id
self.blocks_to_chain.remove(&block_hash);
// rm fork -> child
let removed_fork = self.fork_to_child.remove(&block_hash);
removed_fork
.map(|fork_blocks| {
fork_blocks
.into_iter()
.filter_map(|fork_child| self.blocks_to_chain.remove(&fork_child))
.collect()
})
.unwrap_or_default()
}
/// Remove all blocks from canonical list and insert new blocks to it.
///
/// It is assumed that blocks are interconnected and that they connect to canonical chain
pub fn canonicalize_blocks(&mut self, blocks: &BTreeMap<BlockNumber, SealedBlockWithSenders>) {
if blocks.is_empty() {
return
}
// Remove all blocks from canonical chain
let first_number = *blocks.first_key_value().unwrap().0;
// this will remove all blocks numbers that are going to be replaced.
self.canonical_chain.retain(|&number, _| number < first_number);
// remove them from block to chain_id index
blocks.iter().map(|(_, b)| (b.number, b.hash(), b.parent_hash)).for_each(
|(number, hash, parent_hash)| {
// rm block -> chain_id
self.blocks_to_chain.remove(&hash);
// rm number -> block
if let btree_map::Entry::Occupied(mut entry) =
self.block_number_to_block_hashes.entry(number)
{
let set = entry.get_mut();
set.remove(&hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
// rm fork block -> hash
if let hash_map::Entry::Occupied(mut entry) = self.fork_to_child.entry(parent_hash)
{
let set = entry.get_mut();
set.remove(&hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
},
);
// insert new canonical
self.canonical_chain.extend(blocks.iter().map(|(number, block)| (*number, block.hash())))
}
/// this is function that is going to remove N number of last canonical hashes.
///
/// NOTE: This is not safe standalone, as it will not disconnect
/// blocks that depend on unwinded canonical chain. And should be
/// used when canonical chain is reinserted inside Tree.
pub(crate) fn unwind_canonical_chain(&mut self, unwind_to: BlockNumber) {
// this will remove all blocks numbers that are going to be replaced.
self.canonical_chain.retain(|num, _| *num <= unwind_to);
}
/// Used for finalization of block.
///
/// Return list of chains for removal that depend on finalized canonical chain.
pub(crate) fn finalize_canonical_blocks(
&mut self,
finalized_block: BlockNumber,
num_of_additional_canonical_hashes_to_retain: u64,
) -> BTreeSet<SidechainId> {
// get finalized chains. blocks between [self.last_finalized,finalized_block).
// Dont remove finalized_block, as sidechain can point to it.
let finalized_blocks: Vec<BlockHash> = self
.canonical_chain
.iter()
.filter(|(number, _)| *number >= self.last_finalized_block && *number < finalized_block)
.map(|(_, hash)| hash)
.collect();
// remove unneeded canonical hashes.
let remove_until =
finalized_block.saturating_sub(num_of_additional_canonical_hashes_to_retain);
self.canonical_chain.retain(|&number, _| number >= remove_until);
let mut lose_chains = BTreeSet::new();
for block_hash in finalized_blocks {
// there is a fork block.
if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) {
lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| {
if let Some(lose_chain) = self.blocks_to_chain.remove(&fork_child) {
fold.insert(lose_chain);
}
fold
});
}
}
// set last finalized block.
self.last_finalized_block = finalized_block;
lose_chains
}
/// Returns the block hash of the canonical block with the given number.
#[inline]
pub fn canonical_hash(&self, block_number: &BlockNumber) -> Option<BlockHash> {
self.canonical_chain.canonical_hash(block_number)
}
/// Returns the block number of the canonical block with the given hash.
#[inline]
pub fn canonical_number(&self, block_hash: &BlockHash) -> Option<BlockNumber> {
self.canonical_chain.canonical_number(block_hash)
}
/// get canonical tip
#[inline]
pub fn canonical_tip(&self) -> BlockNumHash {
self.canonical_chain.tip()
}
/// Canonical chain needed for execution of EVM. It should contain last 256 block hashes.
#[inline]
pub(crate) const fn canonical_chain(&self) -> &CanonicalChain {
&self.canonical_chain
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Header;
use alloy_primitives::B256;
use reth_primitives::{SealedBlock, SealedHeader};
#[test]
fn pending_block_num_hash_returns_none_if_no_fork() {
// Create a new canonical chain with a single block (represented by its number and hash).
let canonical_chain = BTreeMap::from([(0, B256::from_slice(&[1; 32]))]);
let block_indices = BlockIndices::new(0, canonical_chain);
// No fork to child blocks, so there is no pending block.
assert_eq!(block_indices.pending_block_num_hash(), None);
}
#[test]
fn pending_block_num_hash_works() {
// Create a canonical chain with multiple blocks at heights 1, 2, and 3.
let canonical_chain = BTreeMap::from([
(1, B256::from_slice(&[1; 32])),
(2, B256::from_slice(&[2; 32])),
(3, B256::from_slice(&[3; 32])),
]);
let mut block_indices = BlockIndices::new(3, canonical_chain);
// Define the hash of the parent block (the block at height 3 in the canonical chain).
let parent_hash = B256::from_slice(&[3; 32]);
// Define the hashes of two child blocks that extend the canonical chain.
let child_hash_1 = B256::from_slice(&[2; 32]);
let child_hash_2 = B256::from_slice(&[3; 32]);
// Create a set to store both child block hashes.
let mut child_set = LinkedHashSet::new();
child_set.insert(child_hash_1);
child_set.insert(child_hash_2);
// Associate the parent block hash with its children in the fork_to_child mapping.
block_indices.fork_to_child.insert(parent_hash, child_set);
// Pending block should be the first child block.
assert_eq!(
block_indices.pending_block_num_hash(),
Some(BlockNumHash { number: 4, hash: child_hash_1 })
);
}
#[test]
fn pending_blocks_returns_empty_if_no_fork() {
// Create a canonical chain with a single block at height 10.
let canonical_chain = BTreeMap::from([(10, B256::from_slice(&[1; 32]))]);
let block_indices = BlockIndices::new(0, canonical_chain);
// No child blocks are associated with the canonical tip.
assert_eq!(block_indices.pending_blocks(), (11, Vec::new()));
}
#[test]
fn pending_blocks_returns_multiple_children() {
// Define the hash of the parent block (the block at height 5 in the canonical chain).
let parent_hash = B256::from_slice(&[3; 32]);
// Create a canonical chain with a block at height 5.
let canonical_chain = BTreeMap::from([(5, parent_hash)]);
let mut block_indices = BlockIndices::new(0, canonical_chain);
// Define the hashes of two child blocks.
let child_hash_1 = B256::from_slice(&[4; 32]);
let child_hash_2 = B256::from_slice(&[5; 32]);
// Create a set to store both child block hashes.
let mut child_set = LinkedHashSet::new();
child_set.insert(child_hash_1);
child_set.insert(child_hash_2);
// Associate the parent block hash with its children.
block_indices.fork_to_child.insert(parent_hash, child_set);
// Pending blocks should be the two child blocks.
assert_eq!(block_indices.pending_blocks(), (6, vec![child_hash_1, child_hash_2]));
}
#[test]
fn pending_blocks_with_multiple_forked_chains() {
// Define hashes for parent blocks and child blocks.
let parent_hash_1 = B256::from_slice(&[6; 32]);
let parent_hash_2 = B256::from_slice(&[7; 32]);
// Create a canonical chain with blocks at heights 1 and 2.
let canonical_chain = BTreeMap::from([(1, parent_hash_1), (2, parent_hash_2)]);
let mut block_indices = BlockIndices::new(2, canonical_chain);
// Define hashes for child blocks.
let child_hash_1 = B256::from_slice(&[8; 32]);
let child_hash_2 = B256::from_slice(&[9; 32]);
// Create sets to store child blocks for each parent block.
let mut child_set_1 = LinkedHashSet::new();
let mut child_set_2 = LinkedHashSet::new();
child_set_1.insert(child_hash_1);
child_set_2.insert(child_hash_2);
// Associate parent block hashes with their child blocks.
block_indices.fork_to_child.insert(parent_hash_1, child_set_1);
block_indices.fork_to_child.insert(parent_hash_2, child_set_2);
// Check that the pending blocks are only those extending the canonical tip.
assert_eq!(block_indices.pending_blocks(), (3, vec![child_hash_2]));
}
#[test]
fn insert_non_fork_block_adds_block_correctly() {
// Create a new BlockIndices instance with an empty state.
let mut block_indices = BlockIndices::new(0, BTreeMap::new());
// Define test parameters.
let block_number = 1;
let block_hash = B256::from_slice(&[1; 32]);
let chain_id = SidechainId::from(42);
// Insert the block into the BlockIndices instance.
block_indices.insert_non_fork_block(block_number, block_hash, chain_id);
// Check that the block number to block hashes mapping includes the new block hash.
assert_eq!(
block_indices.block_number_to_block_hashes.get(&block_number),
Some(&HashSet::from([block_hash]))
);
// Check that the block hash to chain ID mapping includes the new entry.
assert_eq!(block_indices.blocks_to_chain.get(&block_hash), Some(&chain_id));
}
#[test]
fn insert_non_fork_block_combined_tests() {
// Create a new BlockIndices instance with an empty state.
let mut block_indices = BlockIndices::new(0, BTreeMap::new());
// Define test parameters.
let block_number_1 = 2;
let block_hash_1 = B256::from_slice(&[1; 32]);
let block_hash_2 = B256::from_slice(&[2; 32]);
let chain_id_1 = SidechainId::from(84);
let block_number_2 = 4;
let block_hash_3 = B256::from_slice(&[3; 32]);
let chain_id_2 = SidechainId::from(200);
// Insert multiple hashes for the same block number.
block_indices.insert_non_fork_block(block_number_1, block_hash_1, chain_id_1);
block_indices.insert_non_fork_block(block_number_1, block_hash_2, chain_id_1);
// Insert blocks with different numbers.
block_indices.insert_non_fork_block(block_number_2, block_hash_3, chain_id_2);
// Block number 1 should have two block hashes associated with it.
let mut expected_hashes_for_block_1 = HashSet::default();
expected_hashes_for_block_1.insert(block_hash_1);
expected_hashes_for_block_1.insert(block_hash_2);
assert_eq!(
block_indices.block_number_to_block_hashes.get(&block_number_1),
Some(&expected_hashes_for_block_1)
);
// Check that the block hashes for block_number_1 are associated with the correct chain ID.
assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id_1));
assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id_1));
// Block number 2 should have a single block hash associated with it.
assert_eq!(
block_indices.block_number_to_block_hashes.get(&block_number_2),
Some(&HashSet::from([block_hash_3]))
);
// Block hash 3 should be associated with the correct chain ID.
assert_eq!(block_indices.blocks_to_chain.get(&block_hash_3), Some(&chain_id_2));
}
#[test]
fn insert_chain_validates_insertion() {
// Create a new BlockIndices instance with an empty state.
let mut block_indices = BlockIndices::new(0, BTreeMap::new());
// Define test parameters.
let chain_id = SidechainId::from(42);
// Define some example blocks and their hashes.
let block_hash_1 = B256::from_slice(&[1; 32]);
let block_hash_2 = B256::from_slice(&[2; 32]);
let parent_hash = B256::from_slice(&[0; 32]);
// Define blocks with their numbers and parent hashes.
let block_1 = SealedBlockWithSenders {
block: SealedBlock::new(
SealedHeader::new(
Header { parent_hash, number: 1, ..Default::default() },
block_hash_1,
),
Default::default(),
),
..Default::default()
};
let block_2 = SealedBlockWithSenders {
block: SealedBlock::new(
SealedHeader::new(
Header { parent_hash: block_hash_1, number: 2, ..Default::default() },
block_hash_2,
),
Default::default(),
),
..Default::default()
};
// Define a chain containing the blocks.
let chain = Chain::new(vec![block_1, block_2], Default::default(), Default::default());
// Insert the chain into the BlockIndices.
block_indices.insert_chain(chain_id, &chain);
// Check that the blocks are correctly mapped to the chain ID.
assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id));
assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id));
// Check that block numbers map to their respective hashes.
let mut expected_hashes_1 = HashSet::default();
expected_hashes_1.insert(block_hash_1);
assert_eq!(block_indices.block_number_to_block_hashes.get(&1), Some(&expected_hashes_1));
let mut expected_hashes_2 = HashSet::default();
expected_hashes_2.insert(block_hash_2);
assert_eq!(block_indices.block_number_to_block_hashes.get(&2), Some(&expected_hashes_2));
// Check that the fork_to_child mapping contains the correct parent-child relationship.
// We take the first block of the chain.
let mut expected_children = LinkedHashSet::new();
expected_children.insert(block_hash_1);
assert_eq!(block_indices.fork_to_child.get(&parent_hash), Some(&expected_children));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,69 +0,0 @@
//! [`ExecutionDataProvider`] implementations used by the tree.
use alloy_eips::ForkBlock;
use alloy_primitives::{BlockHash, BlockNumber};
use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome};
use std::collections::BTreeMap;
/// Structure that combines references of required data to be an [`ExecutionDataProvider`].
#[derive(Clone, Debug)]
pub struct BundleStateDataRef<'a> {
/// The execution outcome after execution of one or more transactions and/or blocks.
pub execution_outcome: &'a ExecutionOutcome,
/// The blocks in the sidechain.
pub sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
/// The blocks in the canonical chain.
pub canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
/// Canonical fork
pub canonical_fork: ForkBlock,
}
impl ExecutionDataProvider for BundleStateDataRef<'_> {
fn execution_outcome(&self) -> &ExecutionOutcome {
self.execution_outcome
}
fn block_hash(&self, block_number: BlockNumber) -> Option<BlockHash> {
let block_hash = self.sidechain_block_hashes.get(&block_number).copied();
if block_hash.is_some() {
return block_hash;
}
self.canonical_block_hashes.get(&block_number).copied()
}
}
impl BlockExecutionForkProvider for BundleStateDataRef<'_> {
fn canonical_fork(&self) -> ForkBlock {
self.canonical_fork
}
}
/// Structure that owns the relevant data needs to be an [`ExecutionDataProvider`]
#[derive(Clone, Debug)]
pub struct ExecutionData {
/// Execution outcome.
pub execution_outcome: ExecutionOutcome,
/// Parent block hashes needs for evm BLOCKHASH opcode.
/// NOTE: it does not mean that all hashes are there but all until finalized are there.
/// Other hashes can be obtained from provider
pub parent_block_hashes: BTreeMap<BlockNumber, BlockHash>,
/// Canonical block where state forked from.
pub canonical_fork: ForkBlock,
}
impl ExecutionDataProvider for ExecutionData {
fn execution_outcome(&self) -> &ExecutionOutcome {
&self.execution_outcome
}
fn block_hash(&self, block_number: BlockNumber) -> Option<BlockHash> {
self.parent_block_hashes.get(&block_number).copied()
}
}
impl BlockExecutionForkProvider for ExecutionData {
fn canonical_fork(&self) -> ForkBlock {
self.canonical_fork
}
}

View File

@ -1,241 +0,0 @@
use alloy_eips::BlockNumHash;
use alloy_primitives::{BlockHash, BlockNumber};
use std::collections::BTreeMap;
/// This keeps track of (non-finalized) blocks of the canonical chain.
///
/// This is a wrapper type around an ordered set of block numbers and hashes that belong to the
/// canonical chain that is not yet finalized.
#[derive(Debug, Clone, Default)]
pub(crate) struct CanonicalChain {
/// All blocks of the canonical chain in order of their block number.
chain: BTreeMap<BlockNumber, BlockHash>,
}
impl CanonicalChain {
pub(crate) const fn new(chain: BTreeMap<BlockNumber, BlockHash>) -> Self {
Self { chain }
}
/// Replaces the current chain with the given one.
#[inline]
pub(crate) fn replace(&mut self, chain: BTreeMap<BlockNumber, BlockHash>) {
self.chain = chain;
}
/// Returns the block hash of the (non-finalized) canonical block with the given number.
#[inline]
pub(crate) fn canonical_hash(&self, number: &BlockNumber) -> Option<BlockHash> {
self.chain.get(number).copied()
}
/// Returns the block number of the (non-finalized) canonical block with the given hash.
#[inline]
pub(crate) fn canonical_number(&self, block_hash: &BlockHash) -> Option<BlockNumber> {
self.chain.iter().find_map(|(number, hash)| (hash == block_hash).then_some(*number))
}
/// Extends all items from the given iterator to the chain.
#[inline]
pub(crate) fn extend(&mut self, blocks: impl Iterator<Item = (BlockNumber, BlockHash)>) {
self.chain.extend(blocks)
}
/// Retains only the elements specified by the predicate.
#[inline]
pub(crate) fn retain<F>(&mut self, f: F)
where
F: FnMut(&BlockNumber, &mut BlockHash) -> bool,
{
self.chain.retain(f)
}
#[inline]
pub(crate) const fn inner(&self) -> &BTreeMap<BlockNumber, BlockHash> {
&self.chain
}
#[inline]
pub(crate) fn tip(&self) -> BlockNumHash {
self.chain
.last_key_value()
.map(|(&number, &hash)| BlockNumHash { number, hash })
.unwrap_or_default()
}
#[inline]
pub(crate) fn iter(&self) -> impl Iterator<Item = (BlockNumber, BlockHash)> + '_ {
self.chain.iter().map(|(&number, &hash)| (number, hash))
}
#[inline]
pub(crate) fn into_iter(self) -> impl Iterator<Item = (BlockNumber, BlockHash)> {
self.chain.into_iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_replace_canonical_chain() {
// Initialize a chain with some blocks
let mut initial_chain = BTreeMap::new();
initial_chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32]));
initial_chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32]));
let mut canonical_chain = CanonicalChain::new(initial_chain.clone());
// Verify initial chain state
assert_eq!(canonical_chain.chain.len(), 2);
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(1u64)),
Some(&BlockHash::from([0x01; 32]))
);
// Replace with a new chain
let mut new_chain = BTreeMap::new();
new_chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32]));
new_chain.insert(BlockNumber::from(4u64), BlockHash::from([0x04; 32]));
new_chain.insert(BlockNumber::from(5u64), BlockHash::from([0x05; 32]));
canonical_chain.replace(new_chain.clone());
// Verify replaced chain state
assert_eq!(canonical_chain.chain.len(), 3);
assert!(!canonical_chain.chain.contains_key(&BlockNumber::from(1u64)));
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(3u64)),
Some(&BlockHash::from([0x03; 32]))
);
}
#[test]
fn test_canonical_hash_canonical_chain() {
// Initialize a chain with some blocks
let mut chain = BTreeMap::new();
chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32]));
chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32]));
chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32]));
// Create an instance of a canonical chain
let canonical_chain = CanonicalChain::new(chain.clone());
// Check that the function returns the correct hash for a given block number
let block_number = BlockNumber::from(2u64);
let expected_hash = BlockHash::from([0x02; 32]);
assert_eq!(canonical_chain.canonical_hash(&block_number), Some(expected_hash));
// Check that a non-existent block returns None
let non_existent_block = BlockNumber::from(5u64);
assert_eq!(canonical_chain.canonical_hash(&non_existent_block), None);
}
#[test]
fn test_canonical_number_canonical_chain() {
// Initialize a chain with some blocks
let mut chain = BTreeMap::new();
chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32]));
chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32]));
chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32]));
// Create an instance of a canonical chain
let canonical_chain = CanonicalChain::new(chain.clone());
// Check that the function returns the correct block number for a given block hash
let block_hash = BlockHash::from([0x02; 32]);
let expected_number = BlockNumber::from(2u64);
assert_eq!(canonical_chain.canonical_number(&block_hash), Some(expected_number));
// Check that a non-existent block hash returns None
let non_existent_hash = BlockHash::from([0x05; 32]);
assert_eq!(canonical_chain.canonical_number(&non_existent_hash), None);
}
#[test]
fn test_extend_canonical_chain() {
// Initialize an empty chain
let mut canonical_chain = CanonicalChain::new(BTreeMap::new());
// Create an iterator with some blocks
let blocks = vec![
(BlockNumber::from(1u64), BlockHash::from([0x01; 32])),
(BlockNumber::from(2u64), BlockHash::from([0x02; 32])),
]
.into_iter();
// Extend the chain with the created blocks
canonical_chain.extend(blocks);
// Check if the blocks were added correctly
assert_eq!(canonical_chain.chain.len(), 2);
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(1u64)),
Some(&BlockHash::from([0x01; 32]))
);
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(2u64)),
Some(&BlockHash::from([0x02; 32]))
);
// Test extending with additional blocks again
let more_blocks = vec![(BlockNumber::from(3u64), BlockHash::from([0x03; 32]))].into_iter();
canonical_chain.extend(more_blocks);
assert_eq!(canonical_chain.chain.len(), 3);
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(3u64)),
Some(&BlockHash::from([0x03; 32]))
);
}
#[test]
fn test_retain_canonical_chain() {
// Initialize a chain with some blocks
let mut chain = BTreeMap::new();
chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32]));
chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32]));
chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32]));
// Create an instance of CanonicalChain
let mut canonical_chain = CanonicalChain::new(chain);
// Retain only blocks with even block numbers
canonical_chain.retain(|number, _| number % 2 == 0);
// Check if the chain only contains the block with number 2
assert_eq!(canonical_chain.chain.len(), 1);
assert_eq!(
canonical_chain.chain.get(&BlockNumber::from(2u64)),
Some(&BlockHash::from([0x02; 32]))
);
// Ensure that the blocks with odd numbers were removed
assert_eq!(canonical_chain.chain.get(&BlockNumber::from(1u64)), None);
assert_eq!(canonical_chain.chain.get(&BlockNumber::from(3u64)), None);
}
#[test]
fn test_tip_canonical_chain() {
// Initialize a chain with some blocks
let mut chain = BTreeMap::new();
chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32]));
chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32]));
chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32]));
// Create an instance of a canonical chain
let canonical_chain = CanonicalChain::new(chain);
// Call the tip method and verify the returned value
let tip = canonical_chain.tip();
assert_eq!(tip.number, BlockNumber::from(3u64));
assert_eq!(tip.hash, BlockHash::from([0x03; 32]));
// Test with an empty chain
let empty_chain = CanonicalChain::new(BTreeMap::new());
let empty_tip = empty_chain.tip();
assert_eq!(empty_tip.number, BlockNumber::default());
assert_eq!(empty_tip.hash, BlockHash::default());
}
}

View File

@ -1,311 +0,0 @@
//! A chain in a [`BlockchainTree`][super::BlockchainTree].
//!
//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent
//! blocks, as well as a list of the blocks the chain is composed of.
use super::externals::TreeExternals;
use crate::BundleStateDataRef;
use alloy_eips::ForkBlock;
use alloy_primitives::{BlockHash, BlockNumber};
use reth_blockchain_tree_api::{
error::{BlockchainTreeError, InsertBlockErrorKind},
BlockAttachment, BlockValidationKind,
};
use reth_consensus::{ConsensusError, PostExecutionInput};
use reth_evm::execute::{BlockExecutorProvider, Executor};
use reth_execution_errors::BlockExecutionError;
use reth_execution_types::{Chain, ExecutionOutcome};
use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader};
use reth_provider::{
providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes},
DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError,
StateRootProvider, TryIntoHistoricalStateProvider,
};
use reth_revm::database::StateProviderDatabase;
use reth_trie::{updates::TrieUpdates, TrieInput};
use reth_trie_parallel::root::ParallelStateRoot;
use std::{
collections::BTreeMap,
ops::{Deref, DerefMut},
time::Instant,
};
/// A chain in the blockchain tree that has functionality to execute blocks and append them to
/// itself.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct AppendableChain {
chain: Chain,
}
impl Deref for AppendableChain {
type Target = Chain;
fn deref(&self) -> &Self::Target {
&self.chain
}
}
impl DerefMut for AppendableChain {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.chain
}
}
impl AppendableChain {
/// Create a new appendable chain from a given chain.
pub const fn new(chain: Chain) -> Self {
Self { chain }
}
/// Get the chain.
pub fn into_inner(self) -> Chain {
self.chain
}
/// Create a new chain that forks off of the canonical chain.
///
/// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root
/// of the block.
pub fn new_canonical_fork<N, E>(
block: SealedBlockWithSenders,
parent_header: &SealedHeader,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
canonical_fork: ForkBlock,
externals: &TreeExternals<N, E>,
block_attachment: BlockAttachment,
block_validation_kind: BlockValidationKind,
) -> Result<Self, InsertBlockErrorKind>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
let execution_outcome = ExecutionOutcome::default();
let empty = BTreeMap::new();
let state_provider = BundleStateDataRef {
execution_outcome: &execution_outcome,
sidechain_block_hashes: &empty,
canonical_block_hashes,
canonical_fork,
};
let (bundle_state, trie_updates) = Self::validate_and_execute(
block.clone(),
parent_header,
state_provider,
externals,
block_attachment,
block_validation_kind,
)?;
Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates)))
}
/// Create a new chain that forks off of an existing sidechain.
///
/// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork.
pub(crate) fn new_chain_fork<N, E>(
&self,
block: SealedBlockWithSenders,
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
canonical_fork: ForkBlock,
externals: &TreeExternals<N, E>,
block_validation_kind: BlockValidationKind,
) -> Result<Self, InsertBlockErrorKind>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
let parent_number =
block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?;
let parent = self.blocks().get(&parent_number).ok_or(
BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number },
)?;
let mut execution_outcome = self.execution_outcome().clone();
// Revert state to the state after execution of the parent block
execution_outcome.revert_to(parent.number);
// Revert changesets to get the state of the parent that we need to apply the change.
let bundle_state_data = BundleStateDataRef {
execution_outcome: &execution_outcome,
sidechain_block_hashes: &side_chain_block_hashes,
canonical_block_hashes,
canonical_fork,
};
let (block_state, _) = Self::validate_and_execute(
block.clone(),
parent,
bundle_state_data,
externals,
BlockAttachment::HistoricalFork,
block_validation_kind,
)?;
// extending will also optimize few things, mostly related to selfdestruct and wiping of
// storage.
execution_outcome.extend(block_state);
// remove all receipts and reverts (except the last one), as they belong to the chain we
// forked from and not the new chain we are creating.
let size = execution_outcome.receipts().len();
execution_outcome.receipts_mut().drain(0..size - 1);
execution_outcome.state_mut().take_n_reverts(size - 1);
execution_outcome.set_first_block(block.number);
// If all is okay, return new chain back. Present chain is not modified.
Ok(Self::new(Chain::from_block(block, execution_outcome, None)))
}
/// Validate and execute the given block that _extends the canonical chain_, validating its
/// state root after execution if possible and requested.
///
/// Note: State root validation is limited to blocks that extend the canonical chain and is
/// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine
/// if the state can and should be validated.
/// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can
/// cache the trie state updates.
/// - [`BlockValidationKind`] determines if the state root __should__ be validated.
fn validate_and_execute<EDP, N, E>(
block: SealedBlockWithSenders,
parent_block: &SealedHeader,
bundle_state_data_provider: EDP,
externals: &TreeExternals<N, E>,
block_attachment: BlockAttachment,
block_validation_kind: BlockValidationKind,
) -> Result<(ExecutionOutcome, Option<TrieUpdates>), BlockExecutionError>
where
EDP: FullExecutionDataProvider,
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
// some checks are done before blocks comes here.
externals.consensus.validate_header_against_parent(&block, parent_block)?;
// get the state provider.
let canonical_fork = bundle_state_data_provider.canonical_fork();
// SAFETY: For block execution and parallel state root computation below we open multiple
// independent database transactions. Upon opening the database transaction the consistent
// view will check a current tip in the database and throw an error if it doesn't match
// the one recorded during initialization.
// It is safe to use consistent view without any special error handling as long as
// we guarantee that plain state cannot change during processing of new payload.
// The usage has to be re-evaluated if that was ever to change.
let consistent_view =
ConsistentDbView::new_with_latest_tip(externals.provider_factory.clone())?;
let state_provider = consistent_view
.provider_ro()?
// State root calculation can take a while, and we're sure no write transaction
// will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509.
.disable_long_read_transaction_safety()
.try_into_history_at_block(canonical_fork.number)?;
let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider);
let db = StateProviderDatabase::new(&provider);
let executor = externals.executor_factory.executor(db);
let block_hash = block.hash();
let block = block.unseal();
let state = executor.execute(&block)?;
externals.consensus.validate_block_post_execution(
&block,
PostExecutionInput::new(&state.receipts, &state.requests),
)?;
let initial_execution_outcome = ExecutionOutcome::from((state, block.number));
// check state root if the block extends the canonical chain __and__ if state root
// validation was requested.
if block_validation_kind.is_exhaustive() {
// calculate and check state root
let start = Instant::now();
let (state_root, trie_updates) = if block_attachment.is_canonical() {
let mut execution_outcome =
provider.block_execution_data_provider.execution_outcome().clone();
execution_outcome.extend(initial_execution_outcome.clone());
ParallelStateRoot::new(
consistent_view,
TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())),
)
.incremental_root_with_updates()
.map(|(root, updates)| (root, Some(updates)))
.map_err(ProviderError::from)?
} else {
let hashed_state = provider.hashed_post_state(initial_execution_outcome.state());
let state_root = provider.state_root(hashed_state)?;
(state_root, None)
};
if block.state_root != state_root {
return Err(ConsensusError::BodyStateRootDiff(
GotExpected { got: state_root, expected: block.state_root }.into(),
)
.into())
}
tracing::debug!(
target: "blockchain_tree::chain",
number = block.number,
hash = %block_hash,
elapsed = ?start.elapsed(),
"Validated state root"
);
Ok((initial_execution_outcome, trie_updates))
} else {
Ok((initial_execution_outcome, None))
}
}
/// Validate and execute the given block, and append it to this chain.
///
/// This expects that the block's ancestors can be traced back to the `canonical_fork` (the
/// first parent block of the `block`'s chain that is in the canonical chain).
///
/// In other words, expects a gap less (side-) chain: [`canonical_fork..block`] in order to be
/// able to __execute__ the block.
///
/// CAUTION: This will only perform state root check if it's possible: if the `canonical_fork`
/// is the canonical head, or: state root check can't be performed if the given canonical is
/// __not__ the canonical head.
#[track_caller]
#[allow(clippy::too_many_arguments)]
pub(crate) fn append_block<N, E>(
&mut self,
block: SealedBlockWithSenders,
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
externals: &TreeExternals<N, E>,
canonical_fork: ForkBlock,
block_attachment: BlockAttachment,
block_validation_kind: BlockValidationKind,
) -> Result<(), InsertBlockErrorKind>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
let parent_block = self.chain.tip();
let bundle_state_data = BundleStateDataRef {
execution_outcome: self.execution_outcome(),
sidechain_block_hashes: &side_chain_block_hashes,
canonical_block_hashes,
canonical_fork,
};
let (block_state, _) = Self::validate_and_execute(
block.clone(),
parent_block,
bundle_state_data,
externals,
block_attachment,
block_validation_kind,
)?;
// extend the state.
self.chain.append_block(block, block_state);
Ok(())
}
}

View File

@ -1,91 +0,0 @@
//! Blockchain tree configuration
/// The configuration for the blockchain tree.
#[derive(Clone, Copy, Debug)]
pub struct BlockchainTreeConfig {
/// Number of blocks after the last finalized block that we are storing.
///
/// It should be more than the finalization window for the canonical chain.
max_blocks_in_chain: u64,
/// The number of blocks that can be re-orged (finalization windows)
max_reorg_depth: u64,
/// The number of unconnected blocks that we are buffering
max_unconnected_blocks: u32,
/// Number of additional block hashes to save in blockchain tree. For `BLOCKHASH` EVM opcode we
/// need last 256 block hashes.
///
/// The total number of block hashes retained in-memory will be
/// `max(additional_canonical_block_hashes, max_reorg_depth)`, and for Ethereum that would
/// be 256. It covers both number of blocks required for reorg, and number of blocks
/// required for `BLOCKHASH` EVM opcode.
num_of_additional_canonical_block_hashes: u64,
}
impl Default for BlockchainTreeConfig {
fn default() -> Self {
// The defaults for Ethereum mainnet
Self {
// Gasper allows reorgs of any length from 1 to 64.
max_reorg_depth: 64,
// This default is just an assumption. Has to be greater than the `max_reorg_depth`.
max_blocks_in_chain: 65,
// EVM requires that last 256 block hashes are available.
num_of_additional_canonical_block_hashes: 256,
// max unconnected blocks.
max_unconnected_blocks: 200,
}
}
}
impl BlockchainTreeConfig {
/// Create tree configuration.
pub fn new(
max_reorg_depth: u64,
max_blocks_in_chain: u64,
num_of_additional_canonical_block_hashes: u64,
max_unconnected_blocks: u32,
) -> Self {
assert!(
max_reorg_depth <= max_blocks_in_chain,
"Side chain size should be more than finalization window"
);
Self {
max_blocks_in_chain,
max_reorg_depth,
num_of_additional_canonical_block_hashes,
max_unconnected_blocks,
}
}
/// Return the maximum reorg depth.
pub const fn max_reorg_depth(&self) -> u64 {
self.max_reorg_depth
}
/// Return the maximum number of blocks in one chain.
pub const fn max_blocks_in_chain(&self) -> u64 {
self.max_blocks_in_chain
}
/// Return number of additional canonical block hashes that we need to retain
/// in order to have enough information for EVM execution.
pub const fn num_of_additional_canonical_block_hashes(&self) -> u64 {
self.num_of_additional_canonical_block_hashes
}
/// Return total number of canonical hashes that we need to retain in order to have enough
/// information for reorg and EVM execution.
///
/// It is calculated as the maximum of `max_reorg_depth` (which is the number of blocks required
/// for the deepest reorg possible according to the consensus protocol) and
/// `num_of_additional_canonical_block_hashes` (which is the number of block hashes needed to
/// satisfy the `BLOCKHASH` opcode in the EVM. See [`crate::BundleStateDataRef`]).
pub fn num_of_canonical_hashes(&self) -> u64 {
self.max_reorg_depth.max(self.num_of_additional_canonical_block_hashes)
}
/// Return max number of unconnected blocks that we are buffering
pub const fn max_unconnected_blocks(&self) -> u32 {
self.max_unconnected_blocks
}
}

View File

@ -1,106 +0,0 @@
//! Blockchain tree externals.
use alloy_primitives::{BlockHash, BlockNumber};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_db::{static_file::BlockHashMask, tables};
use reth_db_api::{cursor::DbCursorRO, transaction::DbTx};
use reth_node_types::NodeTypesWithDB;
use reth_primitives::StaticFileSegment;
use reth_provider::{
providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory,
StaticFileProviderFactory, StatsReader,
};
use reth_storage_errors::provider::ProviderResult;
use std::{collections::BTreeMap, sync::Arc};
pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes};
/// A container for external components.
///
/// This is a simple container for external components used throughout the blockchain tree
/// implementation:
///
/// - A handle to the database
/// - A handle to the consensus engine
/// - The executor factory to execute blocks with
#[derive(Debug)]
pub struct TreeExternals<N: NodeTypesWithDB, E> {
/// The provider factory, used to commit the canonical chain, or unwind it.
pub(crate) provider_factory: ProviderFactory<N>,
/// The consensus engine.
pub(crate) consensus: Arc<dyn FullConsensus<Error = ConsensusError>>,
/// The executor factory to execute blocks with.
pub(crate) executor_factory: E,
}
impl<N: ProviderNodeTypes, E> TreeExternals<N, E> {
/// Create new tree externals.
pub fn new(
provider_factory: ProviderFactory<N>,
consensus: Arc<dyn FullConsensus<Error = ConsensusError>>,
executor_factory: E,
) -> Self {
Self { provider_factory, consensus, executor_factory }
}
}
impl<N: ProviderNodeTypes, E> TreeExternals<N, E> {
/// Fetches the latest canonical block hashes by walking backwards from the head.
///
/// Returns the hashes sorted by increasing block numbers
pub(crate) fn fetch_latest_canonical_hashes(
&self,
num_hashes: usize,
) -> ProviderResult<BTreeMap<BlockNumber, BlockHash>> {
// Fetch the latest canonical hashes from the database
let mut hashes = self
.provider_factory
.provider()?
.tx_ref()
.cursor_read::<tables::CanonicalHeaders>()?
.walk_back(None)?
.take(num_hashes)
.collect::<Result<BTreeMap<BlockNumber, BlockHash>, _>>()?;
// Fetch the same number of latest canonical hashes from the static_files and merge them
// with the database hashes. It is needed due to the fact that we're writing
// directly to static_files in pipeline sync, but to the database in live sync,
// which means that the latest canonical hashes in the static file might be more recent
// than in the database, and vice versa, or even some ranges of the latest
// `num_hashes` blocks may be in database, and some ranges in static_files.
let static_file_provider = self.provider_factory.static_file_provider();
let total_headers = static_file_provider.count_entries::<tables::Headers>()? as u64;
if total_headers > 0 {
let range =
total_headers.saturating_sub(1).saturating_sub(num_hashes as u64)..total_headers;
hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate(
StaticFileSegment::Headers,
range,
|cursor, number| cursor.get_one::<BlockHashMask>(number.into()),
|_| true,
)?));
}
// We may have fetched more than `num_hashes` hashes, so we need to truncate the result to
// the requested number.
let hashes = hashes.into_iter().rev().take(num_hashes).collect();
Ok(hashes)
}
pub(crate) fn fetch_latest_finalized_block_number(
&self,
) -> ProviderResult<Option<BlockNumber>> {
self.provider_factory.provider()?.last_finalized_block_number()
}
pub(crate) fn save_finalized_block_number(
&self,
block_number: BlockNumber,
) -> ProviderResult<()> {
let provider_rw = self.provider_factory.provider_rw()?;
provider_rw.save_finalized_block_number(block_number)?;
provider_rw.commit()?;
Ok(())
}
}

View File

@ -1,59 +0,0 @@
//! Implementation of a tree-like structure for blockchains.
//!
//! The [`BlockchainTree`] can validate, execute, and revert blocks in multiple competing
//! sidechains. This structure is used for Reth's sync mode at the tip instead of the pipeline, and
//! is the primary executor and validator of payloads sent from the consensus layer.
//!
//! Blocks and their resulting state transitions are kept in-memory until they are persisted.
//!
//! ## Feature Flags
//!
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
/// Re-export of the blockchain tree API.
pub use reth_blockchain_tree_api::*;
pub mod blockchain_tree;
pub use blockchain_tree::BlockchainTree;
pub mod block_indices;
pub use block_indices::BlockIndices;
pub mod chain;
pub use chain::AppendableChain;
pub mod config;
pub use config::BlockchainTreeConfig;
pub mod externals;
pub use externals::TreeExternals;
pub mod shareable;
pub use shareable::ShareableBlockchainTree;
mod bundle;
pub use bundle::{BundleStateDataRef, ExecutionData};
/// Buffer of not executed blocks.
pub mod block_buffer;
mod canonical_chain;
/// Common blockchain tree metrics.
pub mod metrics;
pub use block_buffer::BlockBuffer;
/// Implementation of Tree traits that does nothing.
pub mod noop;
mod state;
use aquamarine as _;

View File

@ -1,153 +0,0 @@
use metrics::Histogram;
use reth_metrics::{
metrics::{Counter, Gauge},
Metrics,
};
use std::time::{Duration, Instant};
/// Metrics for the blockchain tree block buffer
#[derive(Metrics)]
#[metrics(scope = "blockchain_tree.block_buffer")]
pub struct BlockBufferMetrics {
/// Total blocks in the block buffer
pub blocks: Gauge,
}
#[derive(Debug)]
pub(crate) struct MakeCanonicalDurationsRecorder {
start: Instant,
pub(crate) actions: Vec<(MakeCanonicalAction, Duration)>,
latest: Option<Duration>,
current_metrics: MakeCanonicalMetrics,
}
impl Default for MakeCanonicalDurationsRecorder {
fn default() -> Self {
Self {
start: Instant::now(),
actions: Vec::new(),
latest: None,
current_metrics: MakeCanonicalMetrics::default(),
}
}
}
impl MakeCanonicalDurationsRecorder {
/// Records the duration since last record, saves it for future logging and instantly reports as
/// a metric with `action` label.
pub(crate) fn record_relative(&mut self, action: MakeCanonicalAction) {
let elapsed = self.start.elapsed();
let duration = elapsed - self.latest.unwrap_or_default();
self.actions.push((action, duration));
self.current_metrics.record(action, duration);
self.latest = Some(elapsed);
}
}
/// Metrics for the entire blockchain tree
#[derive(Metrics)]
#[metrics(scope = "blockchain_tree")]
pub struct TreeMetrics {
/// Total number of sidechains (not including the canonical chain)
pub sidechains: Gauge,
/// The highest block number in the canonical chain
pub canonical_chain_height: Gauge,
/// The number of reorgs
pub reorgs: Counter,
/// The latest reorg depth
pub latest_reorg_depth: Gauge,
/// Longest sidechain height
pub longest_sidechain_height: Gauge,
/// The number of times cached trie updates were used for insert.
pub trie_updates_insert_cached: Counter,
/// The number of times trie updates were recomputed for insert.
pub trie_updates_insert_recomputed: Counter,
}
/// Represents actions for making a canonical chain.
#[derive(Debug, Copy, Clone)]
pub(crate) enum MakeCanonicalAction {
/// Cloning old blocks for canonicalization.
CloneOldBlocks,
/// Finding the canonical header.
FindCanonicalHeader,
/// Splitting the chain for canonicalization.
SplitChain,
/// Splitting chain forks for canonicalization.
SplitChainForks,
/// Merging all chains for canonicalization.
MergeAllChains,
/// Updating the canonical index during canonicalization.
UpdateCanonicalIndex,
/// Retrieving (cached or recomputed) state trie updates
RetrieveStateTrieUpdates,
/// Committing the canonical chain to the database.
CommitCanonicalChainToDatabase,
/// Reverting the canonical chain from the database.
RevertCanonicalChainFromDatabase,
/// Inserting an old canonical chain.
InsertOldCanonicalChain,
/// Clearing trie updates of other children chains after fork choice update.
ClearTrieUpdatesForOtherChildren,
}
/// Canonicalization metrics
#[derive(Metrics)]
#[metrics(scope = "blockchain_tree.make_canonical")]
struct MakeCanonicalMetrics {
/// Duration of the clone old blocks action.
clone_old_blocks: Histogram,
/// Duration of the find canonical header action.
find_canonical_header: Histogram,
/// Duration of the split chain action.
split_chain: Histogram,
/// Duration of the split chain forks action.
split_chain_forks: Histogram,
/// Duration of the merge all chains action.
merge_all_chains: Histogram,
/// Duration of the update canonical index action.
update_canonical_index: Histogram,
/// Duration of the retrieve state trie updates action.
retrieve_state_trie_updates: Histogram,
/// Duration of the commit canonical chain to database action.
commit_canonical_chain_to_database: Histogram,
/// Duration of the revert canonical chain from database action.
revert_canonical_chain_from_database: Histogram,
/// Duration of the insert old canonical chain action.
insert_old_canonical_chain: Histogram,
/// Duration of the clear trie updates of other children chains after fork choice update
/// action.
clear_trie_updates_for_other_children: Histogram,
}
impl MakeCanonicalMetrics {
/// Records the duration for the given action.
pub(crate) fn record(&self, action: MakeCanonicalAction, duration: Duration) {
match action {
MakeCanonicalAction::CloneOldBlocks => self.clone_old_blocks.record(duration),
MakeCanonicalAction::FindCanonicalHeader => self.find_canonical_header.record(duration),
MakeCanonicalAction::SplitChain => self.split_chain.record(duration),
MakeCanonicalAction::SplitChainForks => self.split_chain_forks.record(duration),
MakeCanonicalAction::MergeAllChains => self.merge_all_chains.record(duration),
MakeCanonicalAction::UpdateCanonicalIndex => {
self.update_canonical_index.record(duration)
}
MakeCanonicalAction::RetrieveStateTrieUpdates => {
self.retrieve_state_trie_updates.record(duration)
}
MakeCanonicalAction::CommitCanonicalChainToDatabase => {
self.commit_canonical_chain_to_database.record(duration)
}
MakeCanonicalAction::RevertCanonicalChainFromDatabase => {
self.revert_canonical_chain_from_database.record(duration)
}
MakeCanonicalAction::InsertOldCanonicalChain => {
self.insert_old_canonical_chain.record(duration)
}
MakeCanonicalAction::ClearTrieUpdatesForOtherChildren => {
self.clear_trie_updates_for_other_children.record(duration)
}
}
}
}

View File

@ -1,140 +0,0 @@
use alloy_eips::BlockNumHash;
use alloy_primitives::{BlockHash, BlockNumber};
use reth_blockchain_tree_api::{
self,
error::{BlockchainTreeError, CanonicalError, InsertBlockError, ProviderError},
BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome,
InsertPayloadOk,
};
use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader};
use reth_provider::{
BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications,
CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider,
};
use reth_storage_errors::provider::ProviderResult;
use std::collections::BTreeMap;
/// A `BlockchainTree` that does nothing.
///
/// Caution: this is only intended for testing purposes, or for wiring components together.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct NoopBlockchainTree {
/// Broadcast channel for canon state changes notifications.
pub canon_state_notification_sender: Option<CanonStateNotificationSender>,
}
impl NoopBlockchainTree {
/// Create a new `NoopBlockchainTree` with a canon state notification sender.
pub const fn with_canon_state_notifications(
canon_state_notification_sender: CanonStateNotificationSender,
) -> Self {
Self { canon_state_notification_sender: Some(canon_state_notification_sender) }
}
}
impl BlockchainTreeEngine for NoopBlockchainTree {
fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> {
Ok(())
}
fn insert_block(
&self,
block: SealedBlockWithSenders,
_validation_kind: BlockValidationKind,
) -> Result<InsertPayloadOk, InsertBlockError> {
Err(InsertBlockError::tree_error(
BlockchainTreeError::BlockHashNotFoundInChain { block_hash: block.hash() },
block.block,
))
}
fn finalize_block(&self, _finalized_block: BlockNumber) -> ProviderResult<()> {
Ok(())
}
fn connect_buffered_blocks_to_canonical_hashes_and_finalize(
&self,
_last_finalized_block: BlockNumber,
) -> Result<(), CanonicalError> {
Ok(())
}
fn update_block_hashes_and_clear_buffered(
&self,
) -> Result<BTreeMap<BlockNumber, BlockHash>, CanonicalError> {
Ok(BTreeMap::new())
}
fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> {
Ok(())
}
fn make_canonical(&self, block_hash: BlockHash) -> Result<CanonicalOutcome, CanonicalError> {
Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into())
}
}
impl BlockchainTreeViewer for NoopBlockchainTree {
fn header_by_hash(&self, _hash: BlockHash) -> Option<SealedHeader> {
None
}
fn block_by_hash(&self, _hash: BlockHash) -> Option<SealedBlock> {
None
}
fn block_with_senders_by_hash(&self, _hash: BlockHash) -> Option<SealedBlockWithSenders> {
None
}
fn buffered_header_by_hash(&self, _block_hash: BlockHash) -> Option<SealedHeader> {
None
}
fn is_canonical(&self, _block_hash: BlockHash) -> Result<bool, ProviderError> {
Ok(false)
}
fn lowest_buffered_ancestor(&self, _hash: BlockHash) -> Option<SealedBlockWithSenders> {
None
}
fn canonical_tip(&self) -> BlockNumHash {
Default::default()
}
fn pending_block_num_hash(&self) -> Option<BlockNumHash> {
None
}
fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec<Receipt>)> {
None
}
fn receipts_by_block_hash(&self, _block_hash: BlockHash) -> Option<Vec<Receipt>> {
None
}
}
impl BlockchainTreePendingStateProvider for NoopBlockchainTree {
fn find_pending_state_provider(
&self,
_block_hash: BlockHash,
) -> Option<Box<dyn FullExecutionDataProvider>> {
None
}
}
impl NodePrimitivesProvider for NoopBlockchainTree {
type Primitives = EthPrimitives;
}
impl CanonStateSubscriptions for NoopBlockchainTree {
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications {
self.canon_state_notification_sender
.as_ref()
.map(|sender| sender.subscribe())
.unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe())
}
}

View File

@ -1,205 +0,0 @@
//! Wrapper around `BlockchainTree` that allows for it to be shared.
use crate::externals::TreeNodeTypes;
use super::BlockchainTree;
use alloy_eips::BlockNumHash;
use alloy_primitives::{BlockHash, BlockNumber};
use parking_lot::RwLock;
use reth_blockchain_tree_api::{
error::{CanonicalError, InsertBlockError},
BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome,
InsertPayloadOk,
};
use reth_evm::execute::BlockExecutorProvider;
use reth_node_types::NodeTypesWithDB;
use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader};
use reth_provider::{
providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications,
CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError,
};
use reth_storage_errors::provider::ProviderResult;
use std::{collections::BTreeMap, sync::Arc};
use tracing::trace;
/// Shareable blockchain tree that is behind a `RwLock`
#[derive(Clone, Debug)]
pub struct ShareableBlockchainTree<N: NodeTypesWithDB, E> {
/// `BlockchainTree`
pub tree: Arc<RwLock<BlockchainTree<N, E>>>,
}
impl<N: NodeTypesWithDB, E> ShareableBlockchainTree<N, E> {
/// Create a new shareable database.
pub fn new(tree: BlockchainTree<N, E>) -> Self {
Self { tree: Arc::new(RwLock::new(tree)) }
}
}
impl<N, E> BlockchainTreeEngine for ShareableBlockchainTree<N, E>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> {
let mut tree = self.tree.write();
// Blockchain tree metrics shouldn't be updated here, see
// `BlockchainTree::update_chains_metrics` documentation.
tree.buffer_block(block)
}
fn insert_block(
&self,
block: SealedBlockWithSenders,
validation_kind: BlockValidationKind,
) -> Result<InsertPayloadOk, InsertBlockError> {
trace!(target: "blockchain_tree", hash = %block.hash(), number = block.number, parent_hash = %block.parent_hash, "Inserting block");
let mut tree = self.tree.write();
let res = tree.insert_block(block, validation_kind);
tree.update_chains_metrics();
res
}
fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> {
trace!(target: "blockchain_tree", finalized_block, "Finalizing block");
let mut tree = self.tree.write();
tree.finalize_block(finalized_block)?;
tree.update_chains_metrics();
Ok(())
}
fn connect_buffered_blocks_to_canonical_hashes_and_finalize(
&self,
last_finalized_block: BlockNumber,
) -> Result<(), CanonicalError> {
trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree");
let mut tree = self.tree.write();
let res =
tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block);
tree.update_chains_metrics();
Ok(res?)
}
fn update_block_hashes_and_clear_buffered(
&self,
) -> Result<BTreeMap<BlockNumber, BlockHash>, CanonicalError> {
let mut tree = self.tree.write();
let res = tree.update_block_hashes_and_clear_buffered();
tree.update_chains_metrics();
Ok(res?)
}
fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> {
trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes");
let mut tree = self.tree.write();
let res = tree.connect_buffered_blocks_to_canonical_hashes();
tree.update_chains_metrics();
Ok(res?)
}
fn make_canonical(&self, block_hash: BlockHash) -> Result<CanonicalOutcome, CanonicalError> {
trace!(target: "blockchain_tree", %block_hash, "Making block canonical");
let mut tree = self.tree.write();
let res = tree.make_canonical(block_hash);
tree.update_chains_metrics();
res
}
}
impl<N, E> BlockchainTreeViewer for ShareableBlockchainTree<N, E>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
fn header_by_hash(&self, hash: BlockHash) -> Option<SealedHeader> {
trace!(target: "blockchain_tree", ?hash, "Returning header by hash");
self.tree.read().sidechain_block_by_hash(hash).map(|b| b.sealed_header().clone())
}
fn block_by_hash(&self, block_hash: BlockHash) -> Option<SealedBlock> {
trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash");
self.tree.read().sidechain_block_by_hash(block_hash).cloned()
}
fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option<SealedBlockWithSenders> {
trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash");
self.tree.read().block_with_senders_by_hash(block_hash).cloned()
}
fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option<SealedHeader> {
self.tree.read().get_buffered_block(&block_hash).map(|b| b.sealed_header().clone())
}
fn is_canonical(&self, hash: BlockHash) -> Result<bool, ProviderError> {
trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical");
self.tree.read().is_block_hash_canonical(&hash)
}
fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option<SealedBlockWithSenders> {
trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor");
self.tree.read().lowest_buffered_ancestor(&hash).cloned()
}
fn canonical_tip(&self) -> BlockNumHash {
trace!(target: "blockchain_tree", "Returning canonical tip");
self.tree.read().block_indices().canonical_tip()
}
fn pending_block_num_hash(&self) -> Option<BlockNumHash> {
trace!(target: "blockchain_tree", "Returning first pending block");
self.tree.read().block_indices().pending_block_num_hash()
}
fn pending_block(&self) -> Option<SealedBlock> {
trace!(target: "blockchain_tree", "Returning first pending block");
self.tree.read().pending_block().cloned()
}
fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec<Receipt>)> {
let tree = self.tree.read();
let pending_block = tree.pending_block()?.clone();
let receipts =
tree.receipts_by_block_hash(pending_block.hash())?.into_iter().cloned().collect();
Some((pending_block, receipts))
}
fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<Vec<Receipt>> {
let tree = self.tree.read();
Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect())
}
}
impl<N, E> BlockchainTreePendingStateProvider for ShareableBlockchainTree<N, E>
where
N: TreeNodeTypes,
E: BlockExecutorProvider<Primitives = N::Primitives>,
{
fn find_pending_state_provider(
&self,
block_hash: BlockHash,
) -> Option<Box<dyn FullExecutionDataProvider>> {
trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider");
let provider = self.tree.read().post_state_data(block_hash)?;
Some(Box::new(provider))
}
}
impl<N, E> NodePrimitivesProvider for ShareableBlockchainTree<N, E>
where
N: ProviderNodeTypes,
E: Send + Sync,
{
type Primitives = N::Primitives;
}
impl<N, E> CanonStateSubscriptions for ShareableBlockchainTree<N, E>
where
N: TreeNodeTypes,
E: Send + Sync,
{
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications {
trace!(target: "blockchain_tree", "Registered subscriber for canonical state");
self.tree.read().subscribe_canon_state()
}
}

View File

@ -1,430 +0,0 @@
//! Blockchain tree state.
use crate::{AppendableChain, BlockBuffer, BlockIndices};
use alloy_primitives::{BlockHash, BlockNumber};
use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders};
use std::collections::{BTreeMap, HashMap};
/// Container to hold the state of the blockchain tree.
#[derive(Debug)]
pub(crate) struct TreeState {
/// Keeps track of new unique identifiers for chains
block_chain_id_generator: u64,
/// The tracked chains and their current data.
pub(crate) chains: HashMap<SidechainId, AppendableChain>,
/// Indices to block and their connection to the canonical chain.
///
/// This gets modified by the tree itself and is read from engine API/RPC to access the pending
/// block for example.
pub(crate) block_indices: BlockIndices,
/// Unconnected block buffer.
pub(crate) buffered_blocks: BlockBuffer,
}
impl TreeState {
/// Initializes the tree state with the given last finalized block number and last canonical
/// hashes.
pub(crate) fn new(
last_finalized_block_number: BlockNumber,
last_canonical_hashes: impl IntoIterator<Item = (BlockNumber, BlockHash)>,
buffer_limit: u32,
) -> Self {
Self {
block_chain_id_generator: 0,
chains: Default::default(),
block_indices: BlockIndices::new(
last_finalized_block_number,
BTreeMap::from_iter(last_canonical_hashes),
),
buffered_blocks: BlockBuffer::new(buffer_limit),
}
}
/// Issues a new unique identifier for a new sidechain.
#[inline]
fn next_id(&mut self) -> SidechainId {
let id = self.block_chain_id_generator;
self.block_chain_id_generator += 1;
SidechainId(id)
}
/// Expose internal indices of the `BlockchainTree`.
#[inline]
pub(crate) const fn block_indices(&self) -> &BlockIndices {
&self.block_indices
}
/// Returns the block with matching hash from any side-chain.
///
/// Caution: This will not return blocks from the canonical chain.
#[inline]
pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> {
self.block_with_senders_by_hash(block_hash).map(|block| &block.block)
}
/// Returns the block with matching hash from any side-chain.
///
/// Caution: This will not return blocks from the canonical chain.
#[inline]
pub(crate) fn block_with_senders_by_hash(
&self,
block_hash: BlockHash,
) -> Option<&SealedBlockWithSenders> {
let id = self.block_indices.get_side_chain_id(&block_hash)?;
let chain = self.chains.get(&id)?;
chain.block_with_senders(block_hash)
}
/// Returns the block's receipts with matching hash from any side-chain.
///
/// Caution: This will not return blocks from the canonical chain.
pub(crate) fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<Vec<&Receipt>> {
let id = self.block_indices.get_side_chain_id(&block_hash)?;
let chain = self.chains.get(&id)?;
chain.receipts_by_block_hash(block_hash)
}
/// Insert a chain into the tree.
///
/// Inserts a chain into the tree and builds the block indices.
pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option<SidechainId> {
if chain.is_empty() {
return None
}
let chain_id = self.next_id();
self.block_indices.insert_chain(chain_id, &chain);
// add chain_id -> chain index
self.chains.insert(chain_id, chain);
Some(chain_id)
}
/// Checks the block buffer for the given block.
pub(crate) fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> {
self.buffered_blocks.block(hash)
}
/// Gets the lowest ancestor for the given block in the block buffer.
pub(crate) fn lowest_buffered_ancestor(
&self,
hash: &BlockHash,
) -> Option<&SealedBlockWithSenders> {
self.buffered_blocks.lowest_ancestor(hash)
}
}
/// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub(crate) struct SidechainId(u64);
impl From<SidechainId> for u64 {
fn from(value: SidechainId) -> Self {
value.0
}
}
#[cfg(test)]
impl From<u64> for SidechainId {
fn from(value: u64) -> Self {
Self(value)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::canonical_chain::CanonicalChain;
use alloy_primitives::B256;
use reth_execution_types::Chain;
use reth_provider::ExecutionOutcome;
#[test]
fn test_tree_state_initialization() {
// Set up some dummy data for initialization
let last_finalized_block_number = 10u64;
let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())];
let buffer_limit = 5;
// Initialize the tree state
let tree_state = TreeState::new(
last_finalized_block_number,
last_canonical_hashes.clone(),
buffer_limit,
);
// Verify the tree state after initialization
assert_eq!(tree_state.block_chain_id_generator, 0);
assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number);
assert_eq!(
*tree_state.block_indices.canonical_chain().inner(),
*CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner()
);
assert!(tree_state.chains.is_empty());
assert!(tree_state.buffered_blocks.lru.is_empty());
}
#[test]
fn test_tree_state_next_id() {
// Initialize the tree state
let mut tree_state = TreeState::new(0, vec![], 5);
// Generate a few sidechain IDs
let first_id = tree_state.next_id();
let second_id = tree_state.next_id();
// Verify the generated sidechain IDs and the updated generator state
assert_eq!(first_id, SidechainId(0));
assert_eq!(second_id, SidechainId(1));
assert_eq!(tree_state.block_chain_id_generator, 2);
}
#[test]
fn test_tree_state_insert_chain() {
// Initialize tree state
let mut tree_state = TreeState::new(0, vec![], 5);
// Create a chain with two blocks
let block: SealedBlockWithSenders = Default::default();
let block1_hash = B256::random();
let block2_hash = B256::random();
let mut block1 = block.clone();
let mut block2 = block;
block1.block.set_hash(block1_hash);
block1.block.set_block_number(9);
block2.block.set_hash(block2_hash);
block2.block.set_block_number(10);
let chain = AppendableChain::new(Chain::new(
[block1, block2],
Default::default(),
Default::default(),
));
// Insert the chain into the TreeState
let chain_id = tree_state.insert_chain(chain).unwrap();
// Verify the chain ID and that it was added to the chains collection
assert_eq!(chain_id, SidechainId(0));
assert!(tree_state.chains.contains_key(&chain_id));
// Ensure that the block indices are updated
assert_eq!(
tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(),
SidechainId(0)
);
assert_eq!(
tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(),
SidechainId(0)
);
// Ensure that the block chain ID generator was updated
assert_eq!(tree_state.block_chain_id_generator, 1);
// Create an empty chain
let chain_empty = AppendableChain::new(Chain::default());
// Insert the empty chain into the tree state
let chain_id = tree_state.insert_chain(chain_empty);
// Ensure that the empty chain was not inserted
assert!(chain_id.is_none());
// Nothing should have changed and no new chain should have been added
assert!(tree_state.chains.contains_key(&SidechainId(0)));
assert!(!tree_state.chains.contains_key(&SidechainId(1)));
assert_eq!(
tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(),
SidechainId(0)
);
assert_eq!(
tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(),
SidechainId(0)
);
assert_eq!(tree_state.block_chain_id_generator, 1);
}
#[test]
fn test_block_by_hash_side_chain() {
// Initialize a tree state with some dummy data
let mut tree_state = TreeState::new(0, vec![], 5);
// Create two side-chain blocks with random hashes
let block1_hash = B256::random();
let block2_hash = B256::random();
let mut block1: SealedBlockWithSenders = Default::default();
let mut block2: SealedBlockWithSenders = Default::default();
block1.block.set_hash(block1_hash);
block1.block.set_block_number(9);
block2.block.set_hash(block2_hash);
block2.block.set_block_number(10);
// Create an chain with these blocks
let chain = AppendableChain::new(Chain::new(
vec![block1.clone(), block2.clone()],
Default::default(),
Default::default(),
));
// Insert the side chain into the TreeState
tree_state.insert_chain(chain).unwrap();
// Retrieve the blocks by their hashes
let retrieved_block1 = tree_state.block_by_hash(block1_hash);
assert_eq!(*retrieved_block1.unwrap(), block1.block);
let retrieved_block2 = tree_state.block_by_hash(block2_hash);
assert_eq!(*retrieved_block2.unwrap(), block2.block);
// Test block_by_hash with a random hash that doesn't exist
let non_existent_hash = B256::random();
let result = tree_state.block_by_hash(non_existent_hash);
// Ensure that no block is found
assert!(result.is_none());
}
#[test]
fn test_block_with_senders_by_hash() {
// Initialize a tree state with some dummy data
let mut tree_state = TreeState::new(0, vec![], 5);
// Create two side-chain blocks with random hashes
let block1_hash = B256::random();
let block2_hash = B256::random();
let mut block1: SealedBlockWithSenders = Default::default();
let mut block2: SealedBlockWithSenders = Default::default();
block1.block.set_hash(block1_hash);
block1.block.set_block_number(9);
block2.block.set_hash(block2_hash);
block2.block.set_block_number(10);
// Create a chain with these blocks
let chain = AppendableChain::new(Chain::new(
vec![block1.clone(), block2.clone()],
Default::default(),
Default::default(),
));
// Insert the side chain into the TreeState
tree_state.insert_chain(chain).unwrap();
// Test to retrieve the blocks with senders by their hashes
let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash);
assert_eq!(*retrieved_block1.unwrap(), block1);
let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash);
assert_eq!(*retrieved_block2.unwrap(), block2);
// Test block_with_senders_by_hash with a random hash that doesn't exist
let non_existent_hash = B256::random();
let result = tree_state.block_with_senders_by_hash(non_existent_hash);
// Ensure that no block is found
assert!(result.is_none());
}
#[test]
fn test_get_buffered_block() {
// Initialize a tree state with some dummy data
let mut tree_state = TreeState::new(0, vec![], 5);
// Create a block with a random hash and add it to the buffer
let block_hash = B256::random();
let mut block: SealedBlockWithSenders = Default::default();
block.block.set_hash(block_hash);
// Add the block to the buffered blocks in the TreeState
tree_state.buffered_blocks.insert_block(block.clone());
// Test get_buffered_block to retrieve the block by its hash
let retrieved_block = tree_state.get_buffered_block(&block_hash);
assert_eq!(*retrieved_block.unwrap(), block);
// Test get_buffered_block with a non-existent hash
let non_existent_hash = B256::random();
let result = tree_state.get_buffered_block(&non_existent_hash);
// Ensure that no block is found
assert!(result.is_none());
}
#[test]
fn test_lowest_buffered_ancestor() {
// Initialize a tree state with some dummy data
let mut tree_state = TreeState::new(0, vec![], 5);
// Create blocks with random hashes and set up parent-child relationships
let ancestor_hash = B256::random();
let descendant_hash = B256::random();
let mut ancestor_block: SealedBlockWithSenders = Default::default();
let mut descendant_block: SealedBlockWithSenders = Default::default();
ancestor_block.block.set_hash(ancestor_hash);
descendant_block.block.set_hash(descendant_hash);
descendant_block.block.set_parent_hash(ancestor_hash);
// Insert the blocks into the buffer
tree_state.buffered_blocks.insert_block(ancestor_block.clone());
tree_state.buffered_blocks.insert_block(descendant_block.clone());
// Test lowest_buffered_ancestor for the descendant block
let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash);
assert!(lowest_ancestor.is_some());
assert_eq!(lowest_ancestor.unwrap().block.hash(), ancestor_hash);
// Test lowest_buffered_ancestor with a non-existent hash
let non_existent_hash = B256::random();
let result = tree_state.lowest_buffered_ancestor(&non_existent_hash);
// Ensure that no ancestor is found
assert!(result.is_none());
}
#[test]
fn test_receipts_by_block_hash() {
// Initialize a tree state with some dummy data
let mut tree_state = TreeState::new(0, vec![], 5);
// Create a block with a random hash and receipts
let block_hash = B256::random();
let receipt1 = Receipt::default();
let receipt2 = Receipt::default();
let mut block: SealedBlockWithSenders = Default::default();
block.block.set_hash(block_hash);
let receipts = vec![receipt1, receipt2];
// Create a chain with the block and its receipts
let chain = AppendableChain::new(Chain::new(
vec![block.clone()],
ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() },
Default::default(),
));
// Insert the chain into the TreeState
tree_state.insert_chain(chain).unwrap();
// Test receipts_by_block_hash for the inserted block
let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash);
assert!(retrieved_receipts.is_some());
// Check if the correct receipts are returned
let receipts_ref: Vec<&Receipt> = receipts.iter().collect();
assert_eq!(retrieved_receipts.unwrap(), receipts_ref);
// Test receipts_by_block_hash with a non-existent block hash
let non_existent_hash = B256::random();
let result = tree_state.receipts_by_block_hash(non_existent_hash);
// Ensure that no receipts are found
assert!(result.is_none());
}
}