feat: BlockchainTree (#1212)

Co-authored-by: Dragan Rakita <draganrakita@192.168.1.4>
This commit is contained in:
rakita
2023-03-14 19:17:14 +01:00
committed by GitHub
parent 06db495d96
commit 237fd5ce6e
54 changed files with 3409 additions and 367 deletions

2
Cargo.lock generated
View File

@ -4598,9 +4598,11 @@ dependencies = [
name = "reth-executor"
version = "0.1.0"
dependencies = [
"aquamarine",
"async-trait",
"auto_impl",
"hash-db",
"parking_lot 0.12.1",
"plain_hasher",
"reth-db",
"reth-interfaces",

View File

@ -7,7 +7,7 @@ use reth_net_nat::NatResolver;
use reth_network::NetworkConfigBuilder;
use reth_primitives::{ChainSpec, NodeRecord};
use reth_staged_sync::Config;
use std::path::PathBuf;
use std::{path::PathBuf, sync::Arc};
/// Parameters for configuring the network more granularity via CLI
#[derive(Debug, Args)]
@ -50,7 +50,11 @@ pub struct NetworkArgs {
impl NetworkArgs {
/// Build a [`NetworkConfigBuilder`] from a [`Config`] and a [`ChainSpec`], in addition to the
/// values in this option struct.
pub fn network_config(&self, config: &Config, chain_spec: ChainSpec) -> NetworkConfigBuilder {
pub fn network_config(
&self,
config: &Config,
chain_spec: Arc<ChainSpec>,
) -> NetworkConfigBuilder {
let peers_file = (!self.no_persist_peers).then_some(&self.peers_file);
let network_config_builder = config
.network_config(self.nat, peers_file.map(|f| f.as_ref().to_path_buf()))

View File

@ -63,7 +63,7 @@ pub struct ImportCommand {
default_value = "mainnet",
value_parser = genesis_value_parser
)]
chain: ChainSpec,
chain: Arc<ChainSpec>,
/// The path to a block file for import.
///
@ -140,7 +140,7 @@ impl ImportCommand {
.build(file_client.clone(), consensus.clone(), db)
.into_task();
let factory = reth_executor::Factory::new(Arc::new(self.chain.clone()));
let factory = reth_executor::Factory::new(self.chain.clone());
let mut pipeline = Pipeline::builder()
.with_sync_state_updater(file_client)

View File

@ -36,7 +36,7 @@ pub struct InitCommand {
default_value = "mainnet",
value_parser = genesis_value_parser
)]
chain: ChainSpec,
chain: Arc<ChainSpec>,
}
impl InitCommand {

View File

@ -207,8 +207,8 @@ impl<'a, DB: Database> DbTool<'a, DB> {
let chain = random_block_range(0..len, Default::default(), 0..64);
self.db.update(|tx| {
chain.iter().try_for_each(|block| {
insert_canonical_block(tx, block, true)?;
chain.into_iter().try_for_each(|block| {
insert_canonical_block(tx, block, None, true)?;
Ok::<_, eyre::Error>(())
})
})??;

View File

@ -90,7 +90,7 @@ pub struct Command {
default_value = "mainnet",
value_parser = genesis_value_parser
)]
chain: ChainSpec,
chain: Arc<ChainSpec>,
/// Enable Prometheus metrics.
///
@ -443,7 +443,7 @@ impl Command {
builder = builder.with_max_block(max_block)
}
let factory = reth_executor::Factory::new(Arc::new(self.chain.clone()));
let factory = reth_executor::Factory::new(self.chain.clone());
let pipeline = builder
.with_sync_state_updater(updater.clone())
.add_stages(

View File

@ -39,7 +39,7 @@ pub struct Command {
default_value = "mainnet",
value_parser = chain_spec_value_parser
)]
chain: ChainSpec,
chain: Arc<ChainSpec>,
/// Disable the discovery service.
#[command(flatten)]

View File

@ -54,7 +54,7 @@ pub struct Command {
default_value = "mainnet",
value_parser = chain_spec_value_parser
)]
chain: ChainSpec,
chain: Arc<ChainSpec>,
/// Enable Prometheus metrics.
///
@ -171,7 +171,7 @@ impl Command {
stage.execute(&mut tx, input).await?;
}
StageEnum::Execution => {
let factory = reth_executor::Factory::new(Arc::new(self.chain.clone()));
let factory = reth_executor::Factory::new(self.chain.clone());
let mut stage = ExecutionStage::new(factory, 10_000);
stage.commit_threshold = num_blocks;
if !self.skip_unwind {

View File

@ -140,13 +140,13 @@ pub async fn run_test(path: PathBuf) -> eyre::Result<TestOutcome> {
// insert genesis
let header: SealedHeader = suite.genesis_block_header.into();
let genesis_block = SealedBlock { header, body: vec![], ommers: vec![], withdrawals: None };
reth_provider::insert_canonical_block(&tx, &genesis_block, has_block_reward)?;
reth_provider::insert_canonical_block(&tx, genesis_block, None, has_block_reward)?;
let mut last_block = None;
suite.blocks.iter().try_for_each(|block| -> eyre::Result<()> {
let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?;
reth_provider::insert_canonical_block(&tx, &decoded, has_block_reward)?;
last_block = Some(decoded.number);
reth_provider::insert_canonical_block(&tx, decoded, None, has_block_reward)?;
Ok(())
})?;

View File

@ -1,4 +1,6 @@
//! Consensus for ethereum network
use std::sync::Arc;
use crate::validation;
use reth_interfaces::consensus::{Consensus, ConsensusError, ForkchoiceState};
use reth_primitives::{ChainSpec, Hardfork, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT, U256};
@ -15,13 +17,13 @@ pub struct BeaconConsensus {
/// Watcher over the forkchoice state
forkchoice_state_rx: watch::Receiver<ForkchoiceState>,
/// Configuration
chain_spec: ChainSpec,
chain_spec: Arc<ChainSpec>,
}
impl BeaconConsensus {
/// Create a new instance of [BeaconConsensus]
pub fn new(
chain_spec: ChainSpec,
chain_spec: Arc<ChainSpec>,
forkchoice_state_rx: watch::Receiver<ForkchoiceState>,
) -> Self {
Self { chain_spec, forkchoice_state_rx }
@ -92,14 +94,14 @@ impl Consensus for BeaconConsensus {
#[cfg(test)]
mod test {
use super::BeaconConsensus;
use reth_interfaces::consensus::Consensus;
use reth_primitives::{ChainSpecBuilder, U256};
use super::BeaconConsensus;
use std::sync::Arc;
#[test]
fn test_has_block_reward_before_paris() {
let chain_spec = ChainSpecBuilder::mainnet().build();
let chain_spec = Arc::new(ChainSpecBuilder::mainnet().build());
let (consensus, _) = BeaconConsensus::builder().build(chain_spec);
assert!(consensus.has_block_reward(U256::ZERO, U256::ZERO));
}

View File

@ -13,7 +13,7 @@ impl BeaconConsensusBuilder {
/// [watch::channel] for updating the forkchoice state.
pub fn build(
self,
chain_spec: ChainSpec,
chain_spec: Arc<ChainSpec>,
) -> (Arc<BeaconConsensus>, watch::Sender<ForkchoiceState>) {
let (forkchoice_state_tx, forkchoice_state_rx) = watch::channel(ForkchoiceState::default());
let inner = Arc::new(BeaconConsensus::new(chain_spec, forkchoice_state_rx));

View File

@ -6,6 +6,12 @@ license = "MIT OR Apache-2.0"
repository = "https://github.com/paradigmxyz/reth"
readme = "README.md"
[package.metadata.cargo-udeps.ignore]
normal = [
# Used for diagrams in docs
"aquamarine",
]
[dependencies]
# reth
reth-primitives = { path = "../primitives" }
@ -26,6 +32,9 @@ auto_impl = "1.0"
tracing = "0.1.37"
tokio = { version = "1.21.2", features = ["sync"] }
# mics
aquamarine = "0.2.1" #docs
triehash = "0.8"
# See to replace hashers to simplify libraries
plain_hasher = "0.2"
@ -38,3 +47,7 @@ sha3 = { version = "0.10", default-features = false }
[dev-dependencies]
reth-db = { path = "../storage/db", features = ["test-utils"] }
reth-interfaces = { path = "../interfaces", features = ["test-utils"] }
reth-primitives = { path = "../primitives", features = ["test-utils"] }
reth-provider = { path = "../storage/provider", features = ["test-utils"] }
parking_lot = "0.12"

View File

@ -0,0 +1,312 @@
//! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`]
use super::chain::{BlockChainId, Chain, ForkBlock};
use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders};
use std::collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet};
/// Internal indices of the blocks and chains. This is main connection
/// between blocks, chains and canonical chain.
///
/// It contains list of canonical block hashes, forks to childs blocks
/// and block hash to chain id.
pub struct BlockIndices {
/// Last finalized block.
last_finalized_block: BlockNumber,
/// For EVM's "BLOCKHASH" opcode we require last 256 block hashes. So we need to specify
/// at least `additional_canonical_block_hashes`+`max_reorg_depth`, for eth that would be
/// 256+64.
num_of_additional_canonical_block_hashes: u64,
/// Canonical chain. Contains N number (depends on `finalization_depth`) of blocks.
/// These blocks are found in fork_to_child but not inside `blocks_to_chain` or
/// `number_to_block` as those are chain specific indices.
canonical_chain: BTreeMap<BlockNumber, BlockHash>,
/// Index needed when discarding the chain, so we can remove connected chains from tree.
/// NOTE: It contains just a blocks that are forks as a key and not all blocks.
fork_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
/// Block hashes and side chain they belong
blocks_to_chain: HashMap<BlockHash, BlockChainId>,
/// Utility index. Block number to block hash. Can be used for
/// RPC to fetch all pending block in chain by its number.
index_number_to_block: HashMap<BlockNumber, HashSet<BlockHash>>,
}
impl BlockIndices {
/// Create new block indices structure
pub fn new(
last_finalized_block: BlockNumber,
num_of_additional_canonical_block_hashes: u64,
canonical_chain: BTreeMap<BlockNumber, BlockHash>,
) -> Self {
Self {
last_finalized_block,
num_of_additional_canonical_block_hashes,
fork_to_child: Default::default(),
canonical_chain,
blocks_to_chain: Default::default(),
index_number_to_block: Default::default(),
}
}
/// Return number of additional canonical block hashes that we need
/// to have to be able to have enought information for EVM execution.
pub fn num_of_additional_canonical_block_hashes(&self) -> u64 {
self.num_of_additional_canonical_block_hashes
}
/// Return fork to child indices
pub fn fork_to_child(&self) -> &HashMap<BlockHash, HashSet<BlockHash>> {
&self.fork_to_child
}
/// Return block to chain id
pub fn blocks_to_chain(&self) -> &HashMap<BlockHash, BlockChainId> {
&self.blocks_to_chain
}
/// Returns `true` if the Tree knowns the block hash.
pub fn contains_pending_block_hash(&self, block_hash: BlockHash) -> bool {
self.blocks_to_chain.contains_key(&block_hash)
}
/// Check if block hash belongs to canonical chain.
pub fn is_block_hash_canonical(&self, block_hash: &BlockHash) -> bool {
self.canonical_chain.range(self.last_finalized_block..).any(|(_, &h)| h == *block_hash)
}
/// Last finalized block
pub fn last_finalized_block(&self) -> BlockNumber {
self.last_finalized_block
}
/// Insert non fork block.
pub fn insert_non_fork_block(
&mut self,
block_number: BlockNumber,
block_hash: BlockHash,
chain_id: BlockChainId,
) {
self.index_number_to_block.entry(block_number).or_default().insert(block_hash);
self.blocks_to_chain.insert(block_hash, chain_id);
}
/// Insert block to chain and fork child indices of the new chain
pub fn insert_chain(&mut self, chain_id: BlockChainId, chain: &Chain) {
for (number, block) in chain.blocks().iter() {
// add block -> chain_id index
self.blocks_to_chain.insert(block.hash(), chain_id);
// add number -> block
self.index_number_to_block.entry(*number).or_default().insert(block.hash());
}
let first = chain.first();
// add parent block -> block index
self.fork_to_child.entry(first.parent_hash).or_default().insert(first.hash());
}
/// Get the chain ID the block belongs to
pub fn get_blocks_chain_id(&self, block: &BlockHash) -> Option<BlockChainId> {
self.blocks_to_chain.get(block).cloned()
}
/// Update all block hashes. iterate over present and new list of canonical hashes and compare
/// them. Remove all missmatches, disconnect them and return all chains that needs to be
/// removed.
pub fn update_block_hashes(
&mut self,
hashes: BTreeMap<u64, BlockHash>,
) -> BTreeSet<BlockChainId> {
let mut new_hashes = hashes.iter();
let mut old_hashes = self.canonical_chain().clone().into_iter();
let mut remove = Vec::new();
let mut new_hash = new_hashes.next();
let mut old_hash = old_hashes.next();
loop {
let Some(old_block_value) = old_hash else {
// end of old_hashes canonical chain. New chain has more block then old chain.
break
};
let Some(new_block_value) = new_hash else {
// Old canonical chain had more block than new chain.
// remove all present block.
// this is mostly not going to happen as reorg should make new chain in Tree.
while let Some(rem) = old_hash {
remove.push(rem);
old_hash = old_hashes.next();
}
break;
};
// compare old and new canonical block number
match new_block_value.0.cmp(&old_block_value.0) {
std::cmp::Ordering::Less => {
// new chain has more past blocks than old chain
new_hash = new_hashes.next();
}
std::cmp::Ordering::Equal => {
if *new_block_value.1 != old_block_value.1 {
// remove block hash as it is different
remove.push(old_block_value);
}
new_hash = new_hashes.next();
old_hash = old_hashes.next();
}
std::cmp::Ordering::Greater => {
// old chain has more past blocks that new chain
remove.push(old_block_value);
old_hash = old_hashes.next()
}
}
}
self.canonical_chain = hashes;
remove.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| {
fold.extend(self.remove_block(number, hash));
fold
})
}
/// Remove chain from indices and return dependent chains that needs to be removed.
/// Does the cleaning of the tree and removing blocks from the chain.
pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet<BlockChainId> {
let mut lose_chains = BTreeSet::new();
for (block_number, block) in chain.blocks().iter() {
let block_hash = block.hash();
lose_chains.extend(self.remove_block(*block_number, block_hash))
}
lose_chains
}
/// Remove Blocks from indices.
fn remove_block(
&mut self,
block_number: BlockNumber,
block_hash: BlockHash,
) -> BTreeSet<BlockChainId> {
// rm number -> block
if let Entry::Occupied(mut entry) = self.index_number_to_block.entry(block_number) {
let set = entry.get_mut();
set.remove(&block_hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
// rm block -> chain_id
self.blocks_to_chain.remove(&block_hash);
// rm fork -> child
let removed_fork = self.fork_to_child.remove(&block_hash);
removed_fork
.map(|fork_blocks| {
fork_blocks
.into_iter()
.filter_map(|fork_child| self.blocks_to_chain.remove(&fork_child))
.collect()
})
.unwrap_or_default()
}
/// Remove all blocks from canonical list and insert new blocks to it.
///
/// It is assumed that blocks are interconnected and that they connect to canonical chain
pub fn canonicalize_blocks(&mut self, blocks: &BTreeMap<BlockNumber, SealedBlockWithSenders>) {
if blocks.is_empty() {
return
}
// Remove all blocks from canonical chain
let first_number = *blocks.first_key_value().unwrap().0;
// this will remove all blocks numbers that are going to be replaced.
self.canonical_chain.retain(|num, _| *num < first_number);
// remove them from block to chain_id index
blocks.iter().map(|(_, b)| (b.number, b.hash(), b.parent_hash)).for_each(
|(number, hash, parent_hash)| {
// rm block -> chain_id
self.blocks_to_chain.remove(&hash);
// rm number -> block
if let Entry::Occupied(mut entry) = self.index_number_to_block.entry(number) {
let set = entry.get_mut();
set.remove(&hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
// rm fork block -> hash
if let Entry::Occupied(mut entry) = self.fork_to_child.entry(parent_hash) {
let set = entry.get_mut();
set.remove(&hash);
// remove set if empty
if set.is_empty() {
entry.remove();
}
}
},
);
// insert new canonical
self.canonical_chain.extend(blocks.iter().map(|(number, block)| (*number, block.hash())))
}
/// Used for finalization of block.
/// Return list of chains for removal that depend on finalized canonical chain.
pub fn finalize_canonical_blocks(
&mut self,
finalized_block: BlockNumber,
) -> BTreeSet<BlockChainId> {
// get finalized chains. blocks between [self.last_finalized,finalized_block).
// Dont remove finalized_block, as sidechain can point to it.
let finalized_blocks: Vec<BlockHash> = self
.canonical_chain
.iter()
.filter(|(&number, _)| number >= self.last_finalized_block && number < finalized_block)
.map(|(_, hash)| *hash)
.collect();
// remove unneeded canonical hashes.
let remove_until =
finalized_block.saturating_sub(self.num_of_additional_canonical_block_hashes);
self.canonical_chain.retain(|&number, _| number >= remove_until);
let mut lose_chains = BTreeSet::new();
for block_hash in finalized_blocks.into_iter() {
// there is a fork block.
if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) {
lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| {
if let Some(lose_chain) = self.blocks_to_chain.remove(&fork_child) {
fold.insert(lose_chain);
}
fold
});
}
}
// set last finalized block.
self.last_finalized_block = finalized_block;
lose_chains
}
/// get canonical hash
pub fn canonical_hash(&self, block_number: &BlockNumber) -> Option<BlockHash> {
self.canonical_chain.get(block_number).cloned()
}
/// get canonical tip
pub fn canonical_tip(&self) -> ForkBlock {
let (&number, &hash) =
self.canonical_chain.last_key_value().expect("There is always the canonical chain");
ForkBlock { number, hash }
}
/// Canonical chain needs for execution of EVM. It should contains last 256 block hashes.
pub fn canonical_chain(&self) -> &BTreeMap<BlockNumber, BlockHash> {
&self.canonical_chain
}
}

View File

@ -0,0 +1,441 @@
//! Handles substate and list of blocks.
//! have functions to split, branch and append the chain.
use crate::{
execution_result::ExecutionResult,
substate::{SubStateData, SubStateWithProvider},
};
use reth_interfaces::{consensus::Consensus, executor::Error as ExecError, Error};
use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders, SealedHeader, U256};
use reth_provider::{BlockExecutor, ExecutorFactory, StateProvider};
use std::collections::BTreeMap;
/// Internal to BlockchainTree chain identification.
pub(crate) type BlockChainId = u64;
/// Side chain that contain it state and connect to block found in canonical chain.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct Chain {
/// Chain substate. Updated state after execution all blocks in chain.
substate: SubStateData,
/// Changesets for block and transaction. Will be used to update tables in database.
changesets: Vec<ExecutionResult>,
/// Blocks in this chain
blocks: BTreeMap<BlockNumber, SealedBlockWithSenders>,
}
/// Contains fork block and hash.
#[derive(Clone, Copy)]
pub struct ForkBlock {
/// Block number of block that chains branches from
pub number: u64,
/// Block hash of block that chains branches from
pub hash: BlockHash,
}
impl ForkBlock {
/// Return the number hash tuple.
pub fn num_hash(&self) -> (BlockNumber, BlockHash) {
(self.number, self.hash)
}
}
impl Chain {
/// Return blocks found in chain
pub fn blocks(&self) -> &BTreeMap<BlockNumber, SealedBlockWithSenders> {
&self.blocks
}
/// Into inner components
pub fn into_inner(
self,
) -> (BTreeMap<BlockNumber, SealedBlockWithSenders>, Vec<ExecutionResult>, SubStateData) {
(self.blocks, self.changesets, self.substate)
}
/// Return execution results of blocks
pub fn changesets(&self) -> &Vec<ExecutionResult> {
&self.changesets
}
/// Return fork block number and hash.
pub fn fork_block(&self) -> ForkBlock {
let tip = self.first();
ForkBlock { number: tip.number.saturating_sub(1), hash: tip.parent_hash }
}
/// Block fork number
pub fn fork_block_number(&self) -> BlockNumber {
self.first().number.saturating_sub(1)
}
/// Block fork hash
pub fn fork_block_hash(&self) -> BlockHash {
self.first().parent_hash
}
/// First block in chain.
pub fn first(&self) -> &SealedBlockWithSenders {
self.blocks.first_key_value().expect("Chain has at least one block for first").1
}
/// Return tip of the chain. Chain always have at least one block inside
pub fn tip(&self) -> &SealedBlockWithSenders {
self.last()
}
/// Return tip of the chain. Chain always have at least one block inside
pub fn last(&self) -> &SealedBlockWithSenders {
self.blocks.last_key_value().expect("Chain has at least one block for last").1
}
/// Create new chain with given blocks and execution result.
pub fn new(blocks: Vec<(SealedBlockWithSenders, ExecutionResult)>) -> Self {
let (blocks, changesets): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::<BTreeMap<_, _>>();
let mut substate = SubStateData::default();
substate.apply(&changesets);
Self { substate, changesets, blocks }
}
/// Create new chain that joins canonical block
/// If parent block is the tip mark chain fork.
pub fn new_canonical_fork<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
block: &SealedBlockWithSenders,
parent_header: &SealedHeader,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
provider: &SP,
consensus: &C,
factory: &EF,
) -> Result<Self, Error> {
// substate
let substate = SubStateData::default();
let empty = BTreeMap::new();
let substate_with_sp =
SubStateWithProvider::new(&substate, provider, &empty, canonical_block_hashes);
let changeset = Self::validate_and_execute(
block.clone(),
parent_header,
substate_with_sp,
consensus,
factory,
)?;
Ok(Self::new(vec![(block.clone(), changeset)]))
}
/// Create new chain that branches out from existing side chain.
pub fn new_chain_fork<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
&self,
block: SealedBlockWithSenders,
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
provider: &SP,
consensus: &C,
factory: &EF,
) -> Result<Self, Error> {
let parent_number = block.number - 1;
let parent = self
.blocks
.get(&parent_number)
.ok_or(ExecError::BlockNumberNotFoundInChain { block_number: parent_number })?;
// revert changesets
let revert_from = self.changesets.len() - (self.tip().number - parent.number) as usize;
let mut substate = self.substate.clone();
// Revert changesets to get the state of the parent that we need to apply the change.
substate.revert(&self.changesets[revert_from..]);
let substate_with_sp = SubStateWithProvider::new(
&substate,
provider,
&side_chain_block_hashes,
canonical_block_hashes,
);
let changeset = Self::validate_and_execute(
block.clone(),
parent,
substate_with_sp,
consensus,
factory,
)?;
substate.apply_one(&changeset);
let chain = Self {
substate,
changesets: vec![changeset],
blocks: BTreeMap::from([(block.number, block)]),
};
// if all is okay, return new chain back. Present chain is not modified.
Ok(chain)
}
/// Validate and execute block and return execution result or error.
fn validate_and_execute<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
block: SealedBlockWithSenders,
parent_block: &SealedHeader,
substate: SubStateWithProvider<'_, SP>,
consensus: &C,
factory: &EF,
) -> Result<ExecutionResult, Error> {
consensus.validate_header(&block, U256::MAX)?;
consensus.pre_validate_header(&block, parent_block)?;
consensus.pre_validate_block(&block)?;
let (unseal, senders) = block.into_components();
let unseal = unseal.unseal();
let res = factory.with_sp(substate).execute_and_verify_receipt(
&unseal,
U256::MAX,
Some(senders),
)?;
Ok(res)
}
/// Append block to this chain
pub fn append_block<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
&mut self,
block: SealedBlockWithSenders,
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
provider: &SP,
consensus: &C,
factory: &EF,
) -> Result<(), Error> {
let (_, parent_block) = self.blocks.last_key_value().expect("Chain has at least one block");
let changeset = Self::validate_and_execute(
block.clone(),
parent_block,
SubStateWithProvider::new(
&self.substate,
provider,
&side_chain_block_hashes,
canonical_block_hashes,
),
consensus,
factory,
)?;
self.substate.apply_one(&changeset);
self.changesets.push(changeset);
self.blocks.insert(block.number, block);
Ok(())
}
/// Merge two chains into one by appending received chain to the current one.
/// Take substate from newest one.
pub fn append_chain(&mut self, chain: Chain) -> Result<(), Error> {
let chain_tip = self.tip();
if chain_tip.hash != chain.fork_block_hash() {
return Err(ExecError::AppendChainDoesntConnect {
chain_tip: chain_tip.num_hash(),
other_chain_fork: chain.fork_block().num_hash(),
}
.into())
}
self.blocks.extend(chain.blocks.into_iter());
self.changesets.extend(chain.changesets.into_iter());
self.substate = chain.substate;
Ok(())
}
/// Split chain at the number or hash, block with given number will be included at first chain.
/// If any chain is empty (Does not have blocks) None will be returned.
///
/// If block hash is not found ChainSplit::NoSplitPending is returned.
///
/// Subtate state will be only found in second chain. First change substate will be
/// invalid.
pub fn split(mut self, split_at: SplitAt) -> ChainSplit {
let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key();
let block_number = match split_at {
SplitAt::Hash(block_hash) => {
let block_number = self.blocks.iter().find_map(|(num, block)| {
if block.hash() == block_hash {
Some(*num)
} else {
None
}
});
let Some(block_number) = block_number else { return ChainSplit::NoSplitPending(self)};
// If block number is same as tip whole chain is becoming canonical.
if block_number == chain_tip {
return ChainSplit::NoSplitCanonical(self)
}
block_number
}
SplitAt::Number(block_number) => {
if block_number >= chain_tip {
return ChainSplit::NoSplitCanonical(self)
}
if block_number < *self.blocks.first_entry().expect("chain is never empty").key() {
return ChainSplit::NoSplitPending(self)
}
block_number
}
};
let higher_number_blocks = self.blocks.split_off(&(block_number + 1));
let (first_changesets, second_changeset) = self.changesets.split_at(self.blocks.len());
ChainSplit::Split {
canonical: Chain {
substate: SubStateData::default(),
changesets: first_changesets.to_vec(),
blocks: self.blocks,
},
pending: Chain {
substate: self.substate,
changesets: second_changeset.to_vec(),
blocks: higher_number_blocks,
},
}
}
}
/// Used in spliting the chain.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SplitAt {
/// Split at block number.
Number(BlockNumber),
/// Split at block hash.
Hash(BlockHash),
}
/// Result of spliting chain.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ChainSplit {
/// Chain is not splited. Pending chain is returned.
/// Given block split is higher than last block.
/// Or in case of split by hash when hash is unknown.
NoSplitPending(Chain),
/// Chain is not splited. Canonical chain is returned.
/// Given block split is lower than first block.
NoSplitCanonical(Chain),
/// Chain is splited in two.
/// Given block split is contained in first chain.
Split {
/// Left contains lower block number that get canonicalized.
/// And substate is empty and not usable.
canonical: Chain,
/// Right contains higher block number, that is still pending.
/// And substate from original chain is moved here.
pending: Chain,
},
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substate::AccountSubState;
use reth_primitives::{H160, H256};
use reth_provider::execution_result::AccountInfoChangeSet;
#[test]
fn chain_apend() {
let block = SealedBlockWithSenders::default();
let block1_hash = H256([0x01; 32]);
let block2_hash = H256([0x02; 32]);
let block3_hash = H256([0x03; 32]);
let block4_hash = H256([0x04; 32]);
let mut block1 = block.clone();
let mut block2 = block.clone();
let mut block3 = block.clone();
let mut block4 = block.clone();
block1.block.header.hash = block1_hash;
block2.block.header.hash = block2_hash;
block3.block.header.hash = block3_hash;
block4.block.header.hash = block4_hash;
block3.block.header.header.parent_hash = block2_hash;
let mut chain1 = Chain {
substate: Default::default(),
changesets: vec![],
blocks: BTreeMap::from([(1, block1.clone()), (2, block2.clone())]),
};
let chain2 = Chain {
substate: Default::default(),
changesets: vec![],
blocks: BTreeMap::from([(3, block3.clone()), (4, block4.clone())]),
};
assert_eq!(chain1.append_chain(chain2.clone()), Ok(()));
// chain1 got changed so this will fail
assert!(chain1.append_chain(chain2).is_err());
}
#[test]
fn test_number_split() {
let mut substate = SubStateData::default();
let mut account = AccountSubState::default();
account.info.nonce = 10;
substate.accounts.insert(H160([1; 20]), account);
let mut exec1 = ExecutionResult::default();
exec1.block_changesets.insert(H160([2; 20]), AccountInfoChangeSet::default());
let mut exec2 = ExecutionResult::default();
exec2.block_changesets.insert(H160([3; 20]), AccountInfoChangeSet::default());
let mut block1 = SealedBlockWithSenders::default();
let block1_hash = H256([15; 32]);
block1.hash = block1_hash;
block1.senders.push(H160([4; 20]));
let mut block2 = SealedBlockWithSenders::default();
let block2_hash = H256([16; 32]);
block2.hash = block2_hash;
block2.senders.push(H160([4; 20]));
let chain = Chain {
substate: substate.clone(),
changesets: vec![exec1.clone(), exec2.clone()],
blocks: BTreeMap::from([(1, block1.clone()), (2, block2.clone())]),
};
let chain_split1 = Chain {
substate: SubStateData::default(),
changesets: vec![exec1],
blocks: BTreeMap::from([(1, block1.clone())]),
};
let chain_split2 = Chain {
substate,
changesets: vec![exec2.clone()],
blocks: BTreeMap::from([(2, block2.clone())]),
};
// split in two
assert_eq!(
chain.clone().split(SplitAt::Hash(block1_hash)),
ChainSplit::Split { canonical: chain_split1.clone(), pending: chain_split2.clone() }
);
// split at unknown block hash
assert_eq!(
chain.clone().split(SplitAt::Hash(H256([100; 32]))),
ChainSplit::NoSplitPending(chain.clone())
);
// split at higher number
assert_eq!(
chain.clone().split(SplitAt::Number(10)),
ChainSplit::NoSplitCanonical(chain.clone())
);
// split at lower number
assert_eq!(
chain.clone().split(SplitAt::Number(0)),
ChainSplit::NoSplitPending(chain.clone())
);
}
}

View File

@ -0,0 +1,918 @@
//! Implementation of [`BlockchainTree`]
pub mod block_indices;
pub mod chain;
use self::{
block_indices::BlockIndices,
chain::{ChainSplit, SplitAt},
};
use chain::{BlockChainId, Chain, ForkBlock};
use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx};
use reth_interfaces::{consensus::Consensus, executor::Error as ExecError, Error};
use reth_primitives::{BlockHash, BlockNumber, ChainSpec, SealedBlock, SealedBlockWithSenders};
use reth_provider::{
ExecutorFactory, HeaderProvider, ShareableDatabase, StateProvider, StateProviderFactory,
Transaction,
};
use std::{
collections::{BTreeMap, HashMap},
sync::Arc,
};
#[cfg_attr(doc, aquamarine::aquamarine)]
/// Tree of chains and its identifications.
///
/// Mermaid flowchart represent all blocks that can appear in blockchain.
/// Green blocks belong to canonical chain and are saved inside database table, they are our main
/// chain. Pending blocks and sidechains are found in memory inside [`BlockchainTree`].
/// Both pending and sidechains have same mechanisms only difference is when they got committed to
/// database. For pending it is just append operation but for sidechains they need to move current
/// canonical blocks to BlockchainTree flush sidechain to the database to become canonical chain.
/// ```mermaid
/// flowchart BT
/// subgraph canonical chain
/// CanonState:::state
/// block0canon:::canon -->block1canon:::canon -->block2canon:::canon -->block3canon:::canon --> block4canon:::canon --> block5canon:::canon
/// end
/// block5canon --> block6pending1:::pending
/// block5canon --> block6pending2:::pending
/// subgraph sidechain2
/// S2State:::state
/// block3canon --> block4s2:::sidechain --> block5s2:::sidechain
/// end
/// subgraph sidechain1
/// S1State:::state
/// block2canon --> block3s1:::sidechain --> block4s1:::sidechain --> block5s1:::sidechain --> block6s1:::sidechain
/// end
/// classDef state fill:#1882C4
/// classDef canon fill:#8AC926
/// classDef pending fill:#FFCA3A
/// classDef sidechain fill:#FF595E
/// ```
///
///
/// main functions:
/// * insert_block: Connect block to chain, execute it and if valid insert block inside tree.
/// * finalize_block: Remove chains that join to now finalized block, as chain becomes invalid.
/// * make_canonical: Check if we have the hash of block that we want to finalize and commit it to
/// db. If we dont have the block, pipeline syncing should start to fetch the blocks from p2p. Do
/// reorg in tables if canonical chain if needed.
pub struct BlockchainTree<DB: Database, C: Consensus, EF: ExecutorFactory> {
/// chains and present data
chains: HashMap<BlockChainId, Chain>,
/// Static blockchain id generator
block_chain_id_generator: u64,
/// Indices to block and their connection.
block_indices: BlockIndices,
/// Number of block after finalized block that we are storing. It should be more then
/// finalization window
max_blocks_in_chain: u64,
/// Finalization windows. Number of blocks that can be reorged
max_reorg_depth: u64,
/// Externals
externals: Externals<DB, C, EF>,
}
/// Container for external abstractions.
struct Externals<DB: Database, C: Consensus, EF: ExecutorFactory> {
/// Save sidechain, do reorgs and push new block to canonical chain that is inside db.
db: DB,
/// Consensus checks
consensus: C,
/// Create executor to execute blocks.
executor_factory: EF,
/// Chain spec
chain_spec: Arc<ChainSpec>,
}
impl<DB: Database, C: Consensus, EF: ExecutorFactory> Externals<DB, C, EF> {
/// Return sharable database helper structure.
fn sharable_db(&self) -> ShareableDatabase<&DB> {
ShareableDatabase::new(&self.db, self.chain_spec.clone())
}
}
/// Helper structure that wraps chains and indices to search for block hash accross the chains.
pub struct BlockHashes<'a> {
/// Chains
pub chains: &'a mut HashMap<BlockChainId, Chain>,
/// Indices
pub indices: &'a BlockIndices,
}
impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF> {
/// New blockchain tree
pub fn new(
db: DB,
consensus: C,
executor_factory: EF,
chain_spec: Arc<ChainSpec>,
max_reorg_depth: u64,
max_blocks_in_chain: u64,
num_of_additional_canonical_block_hashes: u64,
) -> Result<Self, Error> {
if max_reorg_depth > max_blocks_in_chain {
panic!("Side chain size should be more then finalization window");
}
let last_canonical_hashes = db
.tx()?
.cursor_read::<tables::CanonicalHeaders>()?
.walk_back(None)?
.take((max_reorg_depth + num_of_additional_canonical_block_hashes) as usize)
.collect::<Result<Vec<(BlockNumber, BlockHash)>, _>>()?;
// TODO(rakita) save last finalized block inside database but for now just take
// tip-max_reorg_depth
// task: https://github.com/paradigmxyz/reth/issues/1712
let (last_finalized_block_number, _) =
if last_canonical_hashes.len() > max_reorg_depth as usize {
last_canonical_hashes[max_reorg_depth as usize]
} else {
// it is in reverse order from tip to N
last_canonical_hashes.last().cloned().unwrap_or_default()
};
let externals = Externals { db, consensus, executor_factory, chain_spec };
Ok(Self {
externals,
block_chain_id_generator: 0,
chains: Default::default(),
block_indices: BlockIndices::new(
last_finalized_block_number,
num_of_additional_canonical_block_hashes,
BTreeMap::from_iter(last_canonical_hashes.into_iter()),
),
max_blocks_in_chain,
max_reorg_depth,
})
}
/// Fork side chain or append the block if parent is the top of the chain
fn fork_side_chain(
&mut self,
block: SealedBlockWithSenders,
chain_id: BlockChainId,
) -> Result<(), Error> {
let block_hashes = self.all_chain_hashes(chain_id);
// get canonical fork.
let canonical_fork =
self.canonical_fork(chain_id).ok_or(ExecError::BlockChainIdConsistency { chain_id })?;
// get chain that block needs to join to.
let parent_chain = self
.chains
.get_mut(&chain_id)
.ok_or(ExecError::BlockChainIdConsistency { chain_id })?;
let chain_tip = parent_chain.tip().hash();
let canonical_block_hashes = self.block_indices.canonical_chain();
// get canonical tip
let (_, canonical_tip_hash) =
canonical_block_hashes.last_key_value().map(|(i, j)| (*i, *j)).unwrap_or_default();
let db = self.externals.sharable_db();
let provider = if canonical_fork.hash == canonical_tip_hash {
Box::new(db.latest()?) as Box<dyn StateProvider>
} else {
Box::new(db.history_by_block_number(canonical_fork.number)?) as Box<dyn StateProvider>
};
// append the block if it is continuing the chain.
if chain_tip == block.parent_hash {
let block_hash = block.hash();
let block_number = block.number;
parent_chain.append_block(
block,
block_hashes,
canonical_block_hashes,
&provider,
&self.externals.consensus,
&self.externals.executor_factory,
)?;
drop(provider);
self.block_indices.insert_non_fork_block(block_number, block_hash, chain_id)
} else {
let chain = parent_chain.new_chain_fork(
block,
block_hashes,
canonical_block_hashes,
&provider,
&self.externals.consensus,
&self.externals.executor_factory,
)?;
// release the lifetime with a drop
drop(provider);
self.insert_chain(chain);
}
Ok(())
}
/// Fork canonical chain by creating new chain
pub fn fork_canonical_chain(&mut self, block: SealedBlockWithSenders) -> Result<(), Error> {
let canonical_block_hashes = self.block_indices.canonical_chain();
let (_, canonical_tip) =
canonical_block_hashes.last_key_value().map(|(i, j)| (*i, *j)).unwrap_or_default();
// create state provider
let db = self.externals.sharable_db();
let parent_header = db
.header(&block.parent_hash)?
.ok_or(ExecError::CanonicalChain { block_hash: block.parent_hash })?;
let provider = if block.parent_hash == canonical_tip {
Box::new(db.latest()?) as Box<dyn StateProvider>
} else {
Box::new(db.history_by_block_number(block.number - 1)?) as Box<dyn StateProvider>
};
let parent_header = parent_header.seal(block.parent_hash);
let chain = Chain::new_canonical_fork(
&block,
&parent_header,
canonical_block_hashes,
&provider,
&self.externals.consensus,
&self.externals.executor_factory,
)?;
drop(provider);
self.insert_chain(chain);
Ok(())
}
/// Get all block hashes from chain that are not canonical. This is one time operation per
/// block. Reason why this is not caches is to save memory.
fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap<BlockNumber, BlockHash> {
// find chain and iterate over it,
let mut chain_id = chain_id;
let mut hashes = BTreeMap::new();
loop {
let Some(chain) = self.chains.get(&chain_id) else { return hashes };
hashes.extend(chain.blocks().values().map(|b| (b.number, b.hash())));
let fork_block = chain.fork_block_hash();
if let Some(next_chain_id) = self.block_indices.get_blocks_chain_id(&fork_block) {
chain_id = next_chain_id;
} else {
// if there is no fork block that point to other chains, break the loop.
// it means that this fork joins to canonical block.
break
}
}
hashes
}
/// Getting the canonical fork would tell use what kind of Provider we should execute block on.
/// If it is latest state provider or history state provider
/// Return None if chain_id is not known.
fn canonical_fork(&self, chain_id: BlockChainId) -> Option<ForkBlock> {
let mut chain_id = chain_id;
let mut fork;
loop {
// chain fork block
fork = self.chains.get(&chain_id)?.fork_block();
// get fork block chain
if let Some(fork_chain_id) = self.block_indices.get_blocks_chain_id(&fork.hash) {
chain_id = fork_chain_id;
continue
}
break
}
if self.block_indices.canonical_hash(&fork.number) == Some(fork.hash) {
Some(fork)
} else {
None
}
}
/// Insert chain to tree and ties the blocks to it.
/// Helper function that handles indexing and inserting.
fn insert_chain(&mut self, chain: Chain) -> BlockChainId {
let chain_id = self.block_chain_id_generator;
self.block_chain_id_generator += 1;
self.block_indices.insert_chain(chain_id, &chain);
// add chain_id -> chain index
self.chains.insert(chain_id, chain);
chain_id
}
/// Insert block inside tree. recover transaction signers and
/// internaly call [`BlockchainTree::insert_block_with_senders`] fn.
pub fn insert_block(&mut self, block: SealedBlock) -> Result<bool, Error> {
let block = block.seal_with_senders().ok_or(ExecError::SenderRecoveryError)?;
self.insert_block_with_senders(&block)
}
/// Insert block with senders inside tree.
/// Returns `true` if:
/// 1. It is part of the blockchain tree
/// 2. It is part of the canonical chain
/// 3. Its parent is part of the blockchain tree and we can fork at the parent
/// 4. Its parent is part of the canonical chain and we can fork at the parent
/// Otherwise will return `false`, indicating that neither the block nor its parent
/// is part of the chain or any sidechains. This means that if block becomes canonical
/// we need to fetch the missing blocks over p2p.
pub fn insert_block_with_senders(
&mut self,
block: &SealedBlockWithSenders,
) -> Result<bool, Error> {
// check if block number is inside pending block slide
let last_finalized_block = self.block_indices.last_finalized_block();
if block.number <= last_finalized_block {
return Err(ExecError::PendingBlockIsFinalized {
block_number: block.number,
block_hash: block.hash(),
last_finalized: last_finalized_block,
}
.into())
}
// we will not even try to insert blocks that are too far in future.
if block.number > last_finalized_block + self.max_blocks_in_chain {
return Err(ExecError::PendingBlockIsInFuture {
block_number: block.number,
block_hash: block.hash(),
last_finalized: last_finalized_block,
}
.into())
}
// check if block is already inside Tree
if self.block_indices.contains_pending_block_hash(block.hash()) {
// block is known return that is inserted
return Ok(true)
}
// check if block is part of canonical chain
if self.block_indices.canonical_hash(&block.number) == Some(block.hash()) {
// block is part of canonical chain
return Ok(true)
}
// check if block parent can be found in Tree
if let Some(parent_chain) = self.block_indices.get_blocks_chain_id(&block.parent_hash) {
self.fork_side_chain(block.clone(), parent_chain)?;
// TODO save pending block to database
// https://github.com/paradigmxyz/reth/issues/1713
return Ok(true)
}
// if not found, check if the parent can be found inside canonical chain.
if Some(block.parent_hash) == self.block_indices.canonical_hash(&(block.number - 1)) {
// create new chain that points to that block
self.fork_canonical_chain(block.clone())?;
// TODO save pending block to database
// https://github.com/paradigmxyz/reth/issues/1713
return Ok(true)
}
// NOTE: Block doesn't have a parent, and if we receive this block in `make_canonical`
// function this could be a trigger to initiate p2p syncing, as we are missing the
// parent.
Ok(false)
}
/// Do finalization of blocks. Remove them from tree
pub fn finalize_block(&mut self, finalized_block: BlockNumber) {
let mut remove_chains = self.block_indices.finalize_canonical_blocks(finalized_block);
while let Some(chain_id) = remove_chains.pop_first() {
if let Some(chain) = self.chains.remove(&chain_id) {
remove_chains.extend(self.block_indices.remove_chain(&chain));
}
}
}
/// Update canonical hashes. Reads last N canonical blocks from database and update all indices.
pub fn update_canonical_hashes(
&mut self,
last_finalized_block: BlockNumber,
) -> Result<(), Error> {
self.finalize_block(last_finalized_block);
let num_of_canonical_hashes =
self.max_reorg_depth + self.block_indices.num_of_additional_canonical_block_hashes();
let last_canonical_hashes = self
.externals
.db
.tx()?
.cursor_read::<tables::CanonicalHeaders>()?
.walk_back(None)?
.take(num_of_canonical_hashes as usize)
.collect::<Result<BTreeMap<BlockNumber, BlockHash>, _>>()?;
let mut remove_chains = self.block_indices.update_block_hashes(last_canonical_hashes);
// remove all chains that got discarded
while let Some(chain_id) = remove_chains.first() {
if let Some(chain) = self.chains.remove(chain_id) {
remove_chains.extend(self.block_indices.remove_chain(&chain));
}
}
Ok(())
}
/// Split chain and return canonical part of it. Pending part reinsert inside tree
/// with same chain_id.
fn split_chain(&mut self, chain_id: BlockChainId, chain: Chain, split_at: SplitAt) -> Chain {
match chain.split(split_at) {
ChainSplit::Split { canonical, pending } => {
// rest of splited chain is inserted back with same chain_id.
self.block_indices.insert_chain(chain_id, &pending);
self.chains.insert(chain_id, pending);
canonical
}
ChainSplit::NoSplitCanonical(canonical) => canonical,
ChainSplit::NoSplitPending(_) => {
panic!("Should not happen as block indices guarantee structure of blocks")
}
}
}
/// Make block and its parent canonical. Unwind chains to database if necessary.
///
/// If block is already part of canonical chain return Ok.
pub fn make_canonical(&mut self, block_hash: &BlockHash) -> Result<(), Error> {
let chain_id = if let Some(chain_id) = self.block_indices.get_blocks_chain_id(block_hash) {
chain_id
} else {
// If block is already canonical don't return error.
if self.block_indices.is_block_hash_canonical(block_hash) {
return Ok(())
}
return Err(ExecError::BlockHashNotFoundInChain { block_hash: *block_hash }.into())
};
let chain = self.chains.remove(&chain_id).expect("To be present");
// we are spliting chain as there is possibility that only part of chain get canonicalized.
let canonical = self.split_chain(chain_id, chain, SplitAt::Hash(*block_hash));
let mut block_fork = canonical.fork_block();
let mut block_fork_number = canonical.fork_block_number();
let mut chains_to_promote = vec![canonical];
// loop while fork blocks are found in Tree.
while let Some(chain_id) = self.block_indices.get_blocks_chain_id(&block_fork.hash) {
let chain = self.chains.remove(&chain_id).expect("To fork to be present");
block_fork = chain.fork_block();
let canonical = self.split_chain(chain_id, chain, SplitAt::Number(block_fork_number));
block_fork_number = canonical.fork_block_number();
chains_to_promote.push(canonical);
}
let old_tip = self.block_indices.canonical_tip();
// Merge all chain into one chain.
let mut new_canon_chain = chains_to_promote.pop().expect("There is at least one block");
for chain in chains_to_promote.into_iter().rev() {
new_canon_chain.append_chain(chain).expect("We have just build the chain.");
}
// update canonical index
self.block_indices.canonicalize_blocks(new_canon_chain.blocks());
// if joins to the tip
if new_canon_chain.fork_block_hash() == old_tip.hash {
// append to database
self.commit_canonical(new_canon_chain)?;
} else {
// it forks to canonical block that is not the tip.
let canon_fork = new_canon_chain.fork_block();
// sanity check
if self.block_indices.canonical_hash(&canon_fork.number) != Some(canon_fork.hash) {
unreachable!("all chains should point to canonical chain.");
}
// revert `N` blocks from current canonical chain and put them inside BlockchanTree
// This is main reorgs on tables.
let old_canon_chain = self.revert_canonical(canon_fork.number)?;
self.commit_canonical(new_canon_chain)?;
// TODO we can potentially merge now reverted canonical chain with
// one of the chain from the tree. Low priority.
// insert old canonical chain to BlockchainTree.
self.insert_chain(old_canon_chain);
}
Ok(())
}
/// Commit chain for it to become canonical. Assume we are doing pending operation to db.
fn commit_canonical(&mut self, chain: Chain) -> Result<(), Error> {
let mut tx = Transaction::new(&self.externals.db)?;
let new_tip = chain.tip().number;
let (blocks, changesets, _) = chain.into_inner();
for item in blocks.into_iter().zip(changesets.into_iter()) {
let ((_, block), changeset) = item;
tx.insert_block(block, self.externals.chain_spec.as_ref(), changeset)
.map_err(|e| ExecError::CanonicalCommit { inner: e.to_string() })?;
}
// update pipeline progress.
tx.update_pipeline_stages(new_tip)
.map_err(|e| ExecError::PipelineStatusUpdate { inner: e.to_string() })?;
tx.commit()?;
Ok(())
}
/// Revert canonical blocks from database and insert them to pending table
/// Revert should be non inclusive, and revert_until should stay in db.
/// Return the chain that represent reverted canonical blocks.
fn revert_canonical(&mut self, revert_until: BlockNumber) -> Result<Chain, Error> {
// read data that is needed for new sidechain
let mut tx = Transaction::new(&self.externals.db)?;
// read block and execution result from database. and remove traces of block from tables.
let blocks_and_execution = tx
.take_block_and_execution_range(
self.externals.chain_spec.as_ref(),
(revert_until + 1)..,
)
.map_err(|e| ExecError::CanonicalRevert { inner: e.to_string() })?;
// update pipeline progress.
tx.update_pipeline_stages(revert_until)
.map_err(|e| ExecError::PipelineStatusUpdate { inner: e.to_string() })?;
tx.commit()?;
let chain = Chain::new(blocks_and_execution);
Ok(chain)
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use parking_lot::Mutex;
use reth_db::{
mdbx::{test_utils::create_test_rw_db, Env, WriteMap},
transaction::DbTxMut,
};
use reth_interfaces::test_utils::TestConsensus;
use reth_primitives::{hex_literal::hex, proofs::EMPTY_ROOT, ChainSpecBuilder, H256, MAINNET};
use reth_provider::{
execution_result::ExecutionResult, insert_block, test_utils::blocks::BlockChainTestData,
BlockExecutor,
};
struct TestFactory {
exec_result: Arc<Mutex<Vec<ExecutionResult>>>,
chain_spec: Arc<ChainSpec>,
}
impl TestFactory {
fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { exec_result: Arc::new(Mutex::new(Vec::new())), chain_spec }
}
fn extend(&self, exec_res: Vec<ExecutionResult>) {
self.exec_result.lock().extend(exec_res.into_iter());
}
}
struct TestExecutor(Option<ExecutionResult>);
impl<SP: StateProvider> BlockExecutor<SP> for TestExecutor {
fn execute(
&mut self,
_block: &reth_primitives::Block,
_total_difficulty: reth_primitives::U256,
_senders: Option<Vec<reth_primitives::Address>>,
) -> Result<ExecutionResult, ExecError> {
self.0.clone().ok_or(ExecError::VerificationFailed)
}
fn execute_and_verify_receipt(
&mut self,
_block: &reth_primitives::Block,
_total_difficulty: reth_primitives::U256,
_senders: Option<Vec<reth_primitives::Address>>,
) -> Result<ExecutionResult, ExecError> {
self.0.clone().ok_or(ExecError::VerificationFailed)
}
}
impl ExecutorFactory for TestFactory {
type Executor<T: StateProvider> = TestExecutor;
fn with_sp<SP: StateProvider>(&self, _sp: SP) -> Self::Executor<SP> {
let exec_res = self.exec_result.lock().pop();
TestExecutor(exec_res)
}
fn chain_spec(&self) -> &ChainSpec {
self.chain_spec.as_ref()
}
}
type TestExternals = (Arc<Env<WriteMap>>, TestConsensus, TestFactory, Arc<ChainSpec>);
fn externals(exec_res: Vec<ExecutionResult>) -> TestExternals {
let db = create_test_rw_db();
let consensus = TestConsensus::default();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.shanghai_activated()
.build(),
);
let executor_factory = TestFactory::new(chain_spec.clone());
executor_factory.extend(exec_res);
(db, consensus, executor_factory, chain_spec)
}
fn setup(mut genesis: SealedBlock, externals: &TestExternals) {
// insert genesis to db.
genesis.header.header.number = 10;
genesis.header.header.state_root = EMPTY_ROOT;
let tx_mut = externals.0.tx_mut().unwrap();
insert_block(&tx_mut, genesis.clone(), None, false, Some((0, 0))).unwrap();
// insert first 10 blocks
for i in 0..10 {
tx_mut.put::<tables::CanonicalHeaders>(i, H256([100 + i as u8; 32])).unwrap();
}
tx_mut.commit().unwrap();
}
/// Test data structure that will check tree internals
#[derive(Default, Debug)]
struct TreeTester {
/// Number of chains
chain_num: Option<usize>,
/// Check block to chain index
block_to_chain: Option<HashMap<BlockHash, BlockChainId>>,
/// Check fork to child index
fork_to_child: Option<HashMap<BlockHash, HashSet<BlockHash>>>,
}
impl TreeTester {
fn with_chain_num(mut self, chain_num: usize) -> Self {
self.chain_num = Some(chain_num);
self
}
fn with_block_to_chain(mut self, block_to_chain: HashMap<BlockHash, BlockChainId>) -> Self {
self.block_to_chain = Some(block_to_chain);
self
}
fn with_fork_to_child(
mut self,
fork_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
) -> Self {
self.fork_to_child = Some(fork_to_child);
self
}
fn assert<DB: Database, C: Consensus, EF: ExecutorFactory>(
self,
tree: &BlockchainTree<DB, C, EF>,
) {
if let Some(chain_num) = self.chain_num {
assert_eq!(tree.chains.len(), chain_num);
}
if let Some(block_to_chain) = self.block_to_chain {
assert_eq!(*tree.block_indices.blocks_to_chain(), block_to_chain);
}
if let Some(fork_to_child) = self.fork_to_child {
assert_eq!(*tree.block_indices.fork_to_child(), fork_to_child);
}
}
}
#[test]
fn sanity_path() {
let data = BlockChainTestData::default();
let (mut block1, exec1) = data.blocks[0].clone();
block1.number = 11;
block1.state_root =
H256(hex!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd"));
let (mut block2, exec2) = data.blocks[1].clone();
block2.number = 12;
block2.state_root =
H256(hex!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8"));
// test pops execution results from vector, so order is from last to first.ß
let externals = externals(vec![exec2.clone(), exec1.clone(), exec2.clone(), exec1.clone()]);
// last finalized block would be number 9.
setup(data.genesis, &externals);
// make tree
let (db, consensus, exec_factory, chain_spec) = externals;
let mut tree =
BlockchainTree::new(db, consensus, exec_factory, chain_spec, 1, 2, 3).unwrap();
// genesis block 10 is already canonical
assert_eq!(tree.make_canonical(&H256::zero()), Ok(()));
// insert block2 hits max chain size
assert_eq!(
tree.insert_block_with_senders(&block2),
Err(ExecError::PendingBlockIsInFuture {
block_number: block2.number,
block_hash: block2.hash(),
last_finalized: 9,
}
.into())
);
// make genesis block 10 as finalized
tree.finalize_block(10);
// block 2 parent is not known.
assert_eq!(tree.insert_block_with_senders(&block2), Ok(false));
// insert block1
assert_eq!(tree.insert_block_with_senders(&block1), Ok(true));
// already inserted block will return true.
assert_eq!(tree.insert_block_with_senders(&block1), Ok(true));
// insert block2
assert_eq!(tree.insert_block_with_senders(&block2), Ok(true));
// Trie state:
// b2 (pending block)
// |
// |
// b1 (pending block)
// /
// /
// g1 (canonical blocks)
// |
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block1.hash, 0), (block2.hash, 0)]))
.with_fork_to_child(HashMap::from([(block1.parent_hash, HashSet::from([block1.hash]))]))
.assert(&tree);
// make block1 canonical
assert_eq!(tree.make_canonical(&block1.hash()), Ok(()));
// make block2 canonical
assert_eq!(tree.make_canonical(&block2.hash()), Ok(()));
// Trie state:
// b2 (canonical block)
// |
// |
// b1 (canonical block)
// |
// |
// g1 (canonical blocks)
// |
TreeTester::default()
.with_chain_num(0)
.with_block_to_chain(HashMap::from([]))
.with_fork_to_child(HashMap::from([]))
.assert(&tree);
let mut block1a = block1.clone();
let block1a_hash = H256([0x33; 32]);
block1a.hash = block1a_hash;
let mut block2a = block2.clone();
let block2a_hash = H256([0x34; 32]);
block2a.hash = block2a_hash;
// reinsert two blocks that point to canonical chain
assert_eq!(tree.insert_block_with_senders(&block1a), Ok(true));
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block1a_hash, 1)]))
.with_fork_to_child(HashMap::from([(
block1.parent_hash,
HashSet::from([block1a_hash]),
)]))
.assert(&tree);
assert_eq!(tree.insert_block_with_senders(&block2a), Ok(true));
// Trie state:
// b2 b2a (side chain)
// | /
// | /
// b1 b1a (side chain)
// | /
// |/
// g1 (10)
// |
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([(block1a_hash, 1), (block2a_hash, 2)]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2a_hash])),
]))
.assert(&tree);
// make b2a canonical
assert_eq!(tree.make_canonical(&block2a_hash), Ok(()));
// Trie state:
// b2a b2 (side chain)
// | /
// | /
// b1 b1a (side chain)
// | /
// |/
// g1 (10)
// |
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([(block1a_hash, 1), (block2.hash, 3)]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2.hash])),
]))
.assert(&tree);
assert_eq!(tree.make_canonical(&block1a_hash), Ok(()));
// Trie state:
// b2a b2 (side chain)
// | /
// | /
// b1a b1 (side chain)
// | /
// |/
// g1 (10)
// |
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1.hash, 4),
(block2a_hash, 4),
(block2.hash, 3),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1.hash])),
(block1.hash(), HashSet::from([block2.hash])),
]))
.assert(&tree);
// make b2 canonical
assert_eq!(tree.make_canonical(&block2.hash()), Ok(()));
// Trie state:
// b2 b2a (side chain)
// | /
// | /
// b1 b1a (side chain)
// | /
// |/
// g1 (10)
// |
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([(block1a_hash, 5), (block2a_hash, 4)]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2a_hash])),
]))
.assert(&tree);
// finalize b1 that would make b1a removed from tree
tree.finalize_block(11);
// Trie state:
// b2 b2a (side chain)
// | /
// | /
// b1 (canon)
// |
// g1 (10)
// |
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block2a_hash, 4)]))
.with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))]))
.assert(&tree);
// update canonical block to b2, this would make b2a be removed
assert_eq!(tree.update_canonical_hashes(12), Ok(()));
// Trie state:
// b2 (canon)
// |
// b1 (canon)
// |
// g1 (10)
// |
TreeTester::default()
.with_chain_num(0)
.with_block_to_chain(HashMap::from([]))
.with_fork_to_child(HashMap::from([]))
.assert(&tree);
}
}

View File

@ -8,9 +8,11 @@
//! Reth executor executes transaction in block of data.
pub mod eth_dao_fork;
pub mod substate;
/// Execution result types.
pub use reth_provider::execution_result;
pub mod blockchain_tree;
/// Executor
pub mod executor;

View File

@ -0,0 +1,306 @@
//! Substate for blockchain trees
use reth_interfaces::{provider::ProviderError, Result};
use reth_primitives::{Account, Address, BlockHash, BlockNumber, Bytecode, Bytes, H256, U256};
use reth_provider::{AccountProvider, BlockHashProvider, StateProvider};
use std::collections::{hash_map::Entry, BTreeMap, HashMap};
use crate::execution_result::{AccountInfoChangeSet, ExecutionResult};
/// Memory backend, storing all state values in a `Map` in memory.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SubStateData {
/// Account info where None means it is not existing. Not existing state is needed for Pre
/// TANGERINE forks. `code` is always `None`, and bytecode can be found in `contracts`.
pub accounts: HashMap<Address, AccountSubState>,
/// New bytecodes
pub bytecodes: HashMap<H256, (u32, Bytecode)>,
}
impl SubStateData {
/// Apply changesets to substate.
pub fn apply(&mut self, changesets: &[ExecutionResult]) {
for changeset in changesets {
self.apply_one(changeset)
}
}
/// Apply one changeset to substate.
pub fn apply_one(&mut self, changeset: &ExecutionResult) {
for tx_changeset in changeset.tx_changesets.iter() {
// apply accounts
for (address, account_change) in tx_changeset.changeset.iter() {
// revert account
self.apply_account(address, &account_change.account);
// revert its storage
self.apply_storage(address, &account_change.storage);
}
// apply bytecodes
for (hash, bytecode) in tx_changeset.new_bytecodes.iter() {
self.bytecodes.entry(*hash).or_insert((0, Bytecode(bytecode.clone()))).0 += 1;
}
}
// apply block reward
for (address, change) in changeset.block_changesets.iter() {
self.apply_account(address, change)
}
}
/// Apply account changeset to substate
fn apply_account(&mut self, address: &Address, change: &AccountInfoChangeSet) {
match change {
AccountInfoChangeSet::Created { new } => match self.accounts.entry(*address) {
Entry::Vacant(entry) => {
entry.insert(AccountSubState::created_account(*new));
}
Entry::Occupied(mut entry) => {
let account = entry.get_mut();
// increment counter
account.inc_storage_counter();
account.info = *new;
}
},
AccountInfoChangeSet::Destroyed { .. } => {
// set selfdestructed account
let account = self.accounts.entry(*address).or_default();
account.inc_storage_counter();
account.info = Default::default();
account.storage.clear();
}
AccountInfoChangeSet::Changed { old, .. } => {
self.accounts.entry(*address).or_default().info = *old;
}
AccountInfoChangeSet::NoChange { is_empty } => {
if *is_empty {
self.accounts.entry(*address).or_default();
}
}
}
}
/// Apply storage changeset to substate
fn apply_storage(&mut self, address: &Address, storage: &BTreeMap<U256, (U256, U256)>) {
if let Entry::Occupied(mut entry) = self.accounts.entry(*address) {
let account_storage = &mut entry.get_mut().storage;
for (key, (_, new_value)) in storage {
let key = H256(key.to_be_bytes());
account_storage.insert(key, *new_value);
}
}
}
/// Revert to old state in substate. Changesets will be reverted in reverse order,
pub fn revert(&mut self, changesets: &[ExecutionResult]) {
for changeset in changesets.iter().rev() {
// revert block changeset
for (address, change) in changeset.block_changesets.iter() {
self.revert_account(address, change)
}
for tx_changeset in changeset.tx_changesets.iter() {
// revert bytecodes
for (hash, _) in tx_changeset.new_bytecodes.iter() {
match self.bytecodes.entry(*hash) {
Entry::Vacant(_) => panic!("Bytecode should be present"),
Entry::Occupied(mut entry) => {
let (cnt, _) = entry.get_mut();
*cnt -= 1;
if *cnt == 0 {
entry.remove_entry();
}
}
}
}
// revert accounts
for (address, account_change) in tx_changeset.changeset.iter() {
// revert account
self.revert_account(address, &account_change.account);
// revert its storage
self.revert_storage(address, &account_change.storage);
}
}
}
}
/// Revert storage
fn revert_storage(&mut self, address: &Address, storage: &BTreeMap<U256, (U256, U256)>) {
if let Entry::Occupied(mut entry) = self.accounts.entry(*address) {
let account_storage = &mut entry.get_mut().storage;
for (key, (old_value, _)) in storage {
let key = H256(key.to_be_bytes());
account_storage.insert(key, *old_value);
}
}
}
/// Revert account
fn revert_account(&mut self, address: &Address, change: &AccountInfoChangeSet) {
match change {
AccountInfoChangeSet::Created { .. } => {
match self.accounts.entry(*address) {
Entry::Vacant(_) => {
// We inserted this account in apply fn.
panic!("It should be present, something is broken");
}
Entry::Occupied(mut entry) => {
let val = entry.get_mut();
if val.decr_storage_counter() {
// remove account that we didn't change from substate
entry.remove_entry();
return
}
val.info = Account::default();
val.storage.clear();
}
};
}
AccountInfoChangeSet::Destroyed { old } => match self.accounts.entry(*address) {
Entry::Vacant(_) => {
// We inserted this account in apply fn.
panic!("It should be present, something is broken");
}
Entry::Occupied(mut entry) => {
let val = entry.get_mut();
// Contrary to Created we are not removing this account as we dont know if
// this account was changer or not by `Changed` changeset.
val.decr_storage_counter();
val.info = *old;
}
},
AccountInfoChangeSet::Changed { old, .. } => {
self.accounts.entry(*address).or_default().info = *old;
}
AccountInfoChangeSet::NoChange { is_empty: _ } => {
// do nothing
}
}
}
}
/// Account changes in substate
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct AccountSubState {
/// New account state
pub info: Account,
/// If account is selfdestructed or newly created, storage will be cleared.
/// and we dont need to ask the provider for data.
/// As we need to have as
pub storage_is_clear: Option<u32>,
/// storage slots
pub storage: HashMap<H256, U256>,
}
impl AccountSubState {
/// Increment storage counter to mark this storage was cleared
pub fn inc_storage_counter(&mut self) {
self.storage_is_clear = Some(self.storage_is_clear.unwrap_or_default() + 1);
}
/// Decrement storage counter to represent that changeset that cleared storage was reverted.
pub fn decr_storage_counter(&mut self) -> bool {
let Some(cnt) = self.storage_is_clear else { return false};
if cnt == 1 {
self.storage_is_clear = None;
return true
}
false
}
/// Account is created
pub fn created_account(info: Account) -> Self {
Self { info, storage_is_clear: Some(1), storage: HashMap::new() }
}
/// Should we ask the provider for storage data
pub fn ask_provider(&self) -> bool {
self.storage_is_clear.is_none()
}
}
/// Wrapper around substate and provider, it decouples the database that can be Latest or historical
/// with substate changes that happened previously.
pub struct SubStateWithProvider<'a, SP: StateProvider> {
/// Substate
substate: &'a SubStateData,
/// Provider
provider: SP,
/// side chain block hashes
sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
/// Last N canonical hashes,
canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
}
impl<'a, SP: StateProvider> SubStateWithProvider<'a, SP> {
/// Create new substate with provider
pub fn new(
substate: &'a SubStateData,
provider: SP,
sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
) -> Self {
Self { substate, provider, sidechain_block_hashes, canonical_block_hashes }
}
}
/* Implement StateProvider traits */
impl<'a, SP: StateProvider> BlockHashProvider for SubStateWithProvider<'a, SP> {
fn block_hash(&self, number: U256) -> Result<Option<H256>> {
// All block numbers fit inside u64 and revm checks if it is last 256 block numbers.
let block_number = number.as_limbs()[0];
if let Some(sidechain_block_hash) = self.sidechain_block_hashes.get(&block_number).cloned()
{
return Ok(Some(sidechain_block_hash))
}
Ok(Some(
self.canonical_block_hashes
.get(&block_number)
.cloned()
.ok_or(ProviderError::BlockchainTreeBlockHash { block_number })?,
))
}
}
impl<'a, SP: StateProvider> AccountProvider for SubStateWithProvider<'a, SP> {
fn basic_account(&self, address: Address) -> Result<Option<Account>> {
if let Some(account) = self.substate.accounts.get(&address).map(|acc| acc.info) {
return Ok(Some(account))
}
self.provider.basic_account(address)
}
}
impl<'a, SP: StateProvider> StateProvider for SubStateWithProvider<'a, SP> {
fn storage(
&self,
account: Address,
storage_key: reth_primitives::StorageKey,
) -> Result<Option<reth_primitives::StorageValue>> {
if let Some(substate_account) = self.substate.accounts.get(&account) {
if let Some(storage) = substate_account.storage.get(&storage_key) {
return Ok(Some(*storage))
}
if !substate_account.ask_provider() {
return Ok(Some(U256::ZERO))
}
}
self.provider.storage(account, storage_key)
}
/// Get account and storage proofs.
fn proof(
&self,
_address: Address,
_keys: &[H256],
) -> Result<(Vec<Bytes>, H256, Vec<Vec<Bytes>>)> {
Err(ProviderError::HistoryStateRoot.into())
}
fn bytecode_by_hash(&self, code_hash: H256) -> Result<Option<Bytecode>> {
if let Some((_, bytecode)) = self.substate.bytecodes.get(&code_hash).cloned() {
return Ok(Some(bytecode))
}
self.provider.bytecode_by_hash(code_hash)
}
}

View File

@ -1,4 +1,4 @@
use reth_primitives::{Bloom, H256};
use reth_primitives::{BlockHash, BlockNumber, Bloom, H256};
use thiserror::Error;
/// BlockExecutor Errors
@ -34,4 +34,34 @@ pub enum Error {
BlockGasUsed { got: u64, expected: u64 },
#[error("Provider error")]
ProviderError,
#[error("BlockChainId can't be found in BlockchainTree with internal index {chain_id}")]
BlockChainIdConsistency { chain_id: u64 },
#[error(
"Appending chain on fork (other_chain_fork:?) is not possible as the tip is {chain_tip:?}"
)]
AppendChainDoesntConnect { chain_tip: (u64, H256), other_chain_fork: (u64, H256) },
#[error("Canonical chain header #{block_hash} can't be found ")]
CanonicalChain { block_hash: BlockHash },
#[error("Can't insert #{block_number} {block_hash} as last finalized block number is {last_finalized}")]
PendingBlockIsFinalized {
block_hash: BlockHash,
block_number: BlockNumber,
last_finalized: BlockNumber,
},
#[error("Can't insert block #{block_number} {block_hash} to far in future, as last finalized block number is {last_finalized}")]
PendingBlockIsInFuture {
block_hash: BlockHash,
block_number: BlockNumber,
last_finalized: BlockNumber,
},
#[error("Block number #{block_number} not found in blockchain tree chain")]
BlockNumberNotFoundInChain { block_number: BlockNumber },
#[error("Block hash {block_hash} not found in blockchain tree chain")]
BlockHashNotFoundInChain { block_hash: BlockHash },
#[error("Transaction error on revert: {inner:?}")]
CanonicalRevert { inner: String },
#[error("Transaction error on commit: {inner:?}")]
CanonicalCommit { inner: String },
#[error("Transaction error on pipeline status update: {inner:?}")]
PipelineStatusUpdate { inner: String },
}

View File

@ -68,12 +68,23 @@ pub enum ProviderError {
/// Reached the end of the transaction sender table.
#[error("Got to the end of the transaction sender table")]
EndOfTransactionSenderTable,
/// Missing block hash in BlockchainTree
#[error("Missing block hash for block #{block_number:?} in blockchain tree")]
BlockchainTreeBlockHash { block_number: BlockNumber },
/// Some error occurred while interacting with the state tree.
#[error("Unknown error occurred while interacting with the state tree.")]
StateTree,
#[error("Unknown error occurred while interacting with the state trie.")]
StateTrie,
#[error("History state root, can't be calculated")]
HistoryStateRoot,
/// Thrown when required header related data was not found but was required.
#[error("requested data not found")]
HeaderNotFound,
/// Mismatch of sender and transaction
#[error("Mismatch of sender and transaction id {tx_id}")]
MismatchOfTransactionAndSenderId { tx_id: TxNumber },
/// Block body wrong transaction count
#[error("Stored block indices does not match transaction count")]
BlockBodyTransactionCount,
/// Thrown when the cache service task dropped
#[error("cache service task stopped")]
CacheServiceUnavailable,

View File

@ -15,6 +15,7 @@ use secp256k1::{SecretKey, SECP256K1};
use std::{
collections::HashSet,
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
sync::Arc,
};
/// reexports for convenience
@ -54,7 +55,7 @@ pub struct NetworkConfig<C> {
/// How to configure the [SessionManager](crate::session::SessionManager).
pub sessions_config: SessionsConfig,
/// The chain spec
pub chain_spec: ChainSpec,
pub chain_spec: Arc<ChainSpec>,
/// The [`ForkFilter`] to use at launch for authenticating sessions.
///
/// See also <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#stale-software-examples>
@ -139,7 +140,7 @@ pub struct NetworkConfigBuilder {
/// How to configure the sessions manager
sessions_config: Option<SessionsConfig>,
/// The network's chain spec
chain_spec: ChainSpec,
chain_spec: Arc<ChainSpec>,
/// The default mode of the network.
network_mode: NetworkMode,
/// The executor to use for spawning tasks.
@ -165,7 +166,7 @@ impl NetworkConfigBuilder {
listener_addr: None,
peers_config: None,
sessions_config: None,
chain_spec: MAINNET.clone(),
chain_spec: Arc::new(MAINNET.clone()),
network_mode: Default::default(),
executor: None,
hello_message: None,
@ -179,7 +180,7 @@ impl NetworkConfigBuilder {
}
/// Sets the chain spec.
pub fn chain_spec(mut self, chain_spec: ChainSpec) -> Self {
pub fn chain_spec(mut self, chain_spec: Arc<ChainSpec>) -> Self {
self.chain_spec = chain_spec;
self
}
@ -417,6 +418,7 @@ mod tests {
// remove any `next` fields we would have by removing all hardforks
chain_spec.hardforks = BTreeMap::new();
let chain_spec = Arc::new(chain_spec);
// check that the forkid is initialized with the genesis and no other forks
let genesis_fork_hash = ForkHash::from(chain_spec.genesis_hash());

View File

@ -90,6 +90,7 @@ arbitrary = [
"dep:proptest",
"dep:proptest-derive",
]
test-utils = []
[[bench]]
name = "recover_ecdsa_crit"

View File

@ -1,4 +1,4 @@
use crate::{Header, SealedHeader, TransactionSigned, Withdrawal, H256};
use crate::{Address, Header, SealedHeader, TransactionSigned, Withdrawal, H256};
use ethers_core::types::BlockNumber;
use reth_codecs::derive_arbitrary;
use reth_rlp::{Decodable, DecodeError, Encodable, RlpDecodable, RlpEncodable};
@ -12,10 +12,10 @@ use std::{fmt, fmt::Formatter, ops::Deref, str::FromStr};
/// Ethereum full block.
///
/// Withdrawals can be optionally included at the end of the RLP encoded message.
#[derive_arbitrary(rlp, 25)]
#[derive(
Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable,
)]
#[derive_arbitrary(rlp, 25)]
#[rlp(trailing)]
pub struct Block {
/// Block header.
@ -28,6 +28,18 @@ pub struct Block {
pub withdrawals: Option<Vec<Withdrawal>>,
}
impl Block {
/// Create SealedBLock that will create all header hashes.
pub fn seal_slow(self) -> SealedBlock {
SealedBlock {
header: self.header.seal_slow(),
body: self.body,
ommers: self.ommers.into_iter().map(|o| o.seal_slow()).collect(),
withdrawals: self.withdrawals,
}
}
}
impl Deref for Block {
type Target = Header;
fn deref(&self) -> &Self::Target {
@ -65,6 +77,17 @@ impl SealedBlock {
(self.header, self.body, self.ommers)
}
/// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders].
pub fn senders(&self) -> Option<Vec<Address>> {
self.body.iter().map(|tx| tx.recover_signer()).collect::<Option<Vec<Address>>>()
}
/// Seal sealed block with recovered transaction senders.
pub fn seal_with_senders(self) -> Option<SealedBlockWithSenders> {
let senders = self.senders()?;
Some(SealedBlockWithSenders { block: self, senders })
}
/// Unseal the block
pub fn unseal(self) -> Block {
Block {
@ -83,6 +106,52 @@ impl Deref for SealedBlock {
}
}
#[cfg(any(test, feature = "test-utils"))]
impl std::ops::DerefMut for SealedBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.header
}
}
/// Sealed block with senders recovered from transactions.
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct SealedBlockWithSenders {
/// Sealed block
pub block: SealedBlock,
/// List of senders that match trasanctions from block.
pub senders: Vec<Address>,
}
impl SealedBlockWithSenders {
/// New sealed block with sender. Return none if len of tx and senders does not match
pub fn new(block: SealedBlock, senders: Vec<Address>) -> Option<Self> {
if block.body.len() != senders.len() {
None
} else {
Some(Self { block, senders })
}
}
/// Split Structure to its components
pub fn into_components(self) -> (SealedBlock, Vec<Address>) {
(self.block, self.senders)
}
}
impl Deref for SealedBlockWithSenders {
type Target = SealedBlock;
fn deref(&self) -> &Self::Target {
&self.block
}
}
#[cfg(any(test, feature = "test-utils"))]
impl std::ops::DerefMut for SealedBlockWithSenders {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.block
}
}
/// Either a block hash _or_ a block number
#[derive_arbitrary(rlp)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]

View File

@ -8,7 +8,7 @@ use ethers_core::types::{Block, H256 as EthersH256, H64};
use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact};
use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_STRING_CODE};
use serde::{Deserialize, Serialize};
use std::ops::Deref;
use std::ops::{Deref, DerefMut};
/// Describes the current head block.
///
@ -284,9 +284,9 @@ impl Decodable for Header {
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SealedHeader {
/// Locked Header fields.
header: Header,
pub header: Header,
/// Locked Header hash.
hash: BlockHash,
pub hash: BlockHash,
}
impl SealedHeader {
@ -404,6 +404,12 @@ impl Deref for SealedHeader {
}
}
impl DerefMut for SealedHeader {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.header
}
}
/// Represents the direction for a headers request depending on the `reverse` field of the request.
/// > The response must contain a number of block headers, of rising number when reverse is 0,
/// > falling when 1

View File

@ -38,7 +38,9 @@ pub use proofs::ProofCheckpoint;
pub use account::{Account, Bytecode};
pub use bits::H512;
pub use block::{Block, BlockHashOrNumber, BlockId, BlockNumberOrTag, SealedBlock};
pub use block::{
Block, BlockHashOrNumber, BlockId, BlockNumberOrTag, SealedBlock, SealedBlockWithSenders,
};
pub use bloom::Bloom;
pub use chain::{
AllGenesisFormats, Chain, ChainInfo, ChainSpec, ChainSpecBuilder, ForkCondition, GOERLI,
@ -124,3 +126,6 @@ pub fn keccak256(data: impl AsRef<[u8]>) -> H256 {
hasher.finalize(&mut buf);
buf.into()
}
#[cfg(any(test, feature = "arbitrary"))]
pub use arbitrary;

View File

@ -871,6 +871,11 @@ impl TransactionSignedEcRecovered {
self.signed_transaction
}
/// Desolve Self to its component
pub fn to_components(self) -> (TransactionSigned, Address) {
(self.signed_transaction, self.signer)
}
/// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the
/// signer.
pub fn from_signed_transaction(signed_transaction: TransactionSigned, signer: Address) -> Self {

View File

@ -53,13 +53,13 @@ impl<Client: HeaderProvider + BlockProvider + StateProviderFactory + EvmEnvProvi
/// Create new instance of [EngineApi].
pub fn new(
client: Client,
chain_spec: ChainSpec,
chain_spec: Arc<ChainSpec>,
message_rx: mpsc::UnboundedReceiver<EngineApiMessage>,
forkchoice_state_tx: watch::Sender<ForkchoiceState>,
) -> Self {
Self {
client,
chain_spec: Arc::new(chain_spec),
chain_spec,
message_rx: UnboundedReceiverStream::new(message_rx),
forkchoice_state_tx,
}

View File

@ -1,10 +1,10 @@
use reth_primitives::{AllGenesisFormats, ChainSpec, GOERLI, MAINNET, SEPOLIA};
use std::path::PathBuf;
use std::{path::PathBuf, sync::Arc};
/// Clap value parser for [ChainSpec]s that takes either a built-in chainspec or the path
/// to a custom one.
pub fn chain_spec_value_parser(s: &str) -> Result<ChainSpec, eyre::Error> {
Ok(match s {
pub fn chain_spec_value_parser(s: &str) -> Result<Arc<ChainSpec>, eyre::Error> {
Ok(Arc::new(match s {
"mainnet" => MAINNET.clone(),
"goerli" => GOERLI.clone(),
"sepolia" => SEPOLIA.clone(),
@ -12,13 +12,13 @@ pub fn chain_spec_value_parser(s: &str) -> Result<ChainSpec, eyre::Error> {
let raw = std::fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned()))?;
serde_json::from_str(&raw)?
}
})
}))
}
/// Clap value parser for [ChainSpec]s that takes either a built-in genesis format or the path
/// to a custom one.
pub fn genesis_value_parser(s: &str) -> Result<ChainSpec, eyre::Error> {
Ok(match s {
pub fn genesis_value_parser(s: &str) -> Result<Arc<ChainSpec>, eyre::Error> {
Ok(Arc::new(match s {
"mainnet" => MAINNET.clone(),
"goerli" => GOERLI.clone(),
"sepolia" => SEPOLIA.clone(),
@ -27,7 +27,7 @@ pub fn genesis_value_parser(s: &str) -> Result<ChainSpec, eyre::Error> {
let genesis: AllGenesisFormats = serde_json::from_str(&raw)?;
genesis.into()
}
})
}))
}
#[cfg(test)]

View File

@ -37,7 +37,7 @@ pub enum InitDatabaseError {
#[allow(clippy::field_reassign_with_default)]
pub fn init_genesis<DB: Database>(
db: Arc<DB>,
chain: ChainSpec,
chain: Arc<ChainSpec>,
) -> Result<H256, InitDatabaseError> {
let genesis = chain.genesis();
@ -85,6 +85,8 @@ pub fn init_genesis<DB: Database>(
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::{init_genesis, InitDatabaseError};
use reth_db::mdbx::test_utils::create_test_rw_db;
use reth_primitives::{
@ -94,7 +96,7 @@ mod tests {
#[test]
fn success_init_genesis_mainnet() {
let db = create_test_rw_db();
let genesis_hash = init_genesis(db, MAINNET.clone()).unwrap();
let genesis_hash = init_genesis(db, Arc::new(MAINNET.clone())).unwrap();
// actual, expected
assert_eq!(genesis_hash, MAINNET_GENESIS);
@ -103,7 +105,7 @@ mod tests {
#[test]
fn success_init_genesis_goerli() {
let db = create_test_rw_db();
let genesis_hash = init_genesis(db, GOERLI.clone()).unwrap();
let genesis_hash = init_genesis(db, Arc::new(GOERLI.clone())).unwrap();
// actual, expected
assert_eq!(genesis_hash, GOERLI_GENESIS);
@ -112,7 +114,7 @@ mod tests {
#[test]
fn success_init_genesis_sepolia() {
let db = create_test_rw_db();
let genesis_hash = init_genesis(db, SEPOLIA.clone()).unwrap();
let genesis_hash = init_genesis(db, Arc::new(SEPOLIA.clone())).unwrap();
// actual, expected
assert_eq!(genesis_hash, SEPOLIA_GENESIS);
@ -121,10 +123,10 @@ mod tests {
#[test]
fn fail_init_inconsistent_db() {
let db = create_test_rw_db();
init_genesis(db.clone(), SEPOLIA.clone()).unwrap();
init_genesis(db.clone(), Arc::new(SEPOLIA.clone())).unwrap();
// Try to init db with a different genesis block
let genesis_hash = init_genesis(db, MAINNET.clone());
let genesis_hash = init_genesis(db, Arc::new(MAINNET.clone()));
assert_eq!(
genesis_hash.unwrap_err(),

View File

@ -57,7 +57,7 @@ async fn can_peer_with_geth() {
assert_eq!(geth_peer_id, peer_id);
}
async fn init_geth() -> (CliqueGethInstance, ChainSpec) {
async fn init_geth() -> (CliqueGethInstance, Arc<ChainSpec>) {
// first create a signer that we will fund so we can make transactions
let chain_id = 13337u64;
let data_dir = tempfile::tempdir().expect("should be able to create temp geth datadir");
@ -123,5 +123,5 @@ async fn init_geth() -> (CliqueGethInstance, ChainSpec) {
let block = clique.provider.get_block_number().await.unwrap();
assert!(block > U64::zero());
(clique, chainspec)
(clique, Arc::new(chainspec))
}

View File

@ -35,7 +35,7 @@ metrics = "0.20.1"
# misc
serde = { version = "1.0", optional = true }
thiserror = "1.0.37"
aquamarine = "0.2.1"
aquamarine = "0.2.1" #docs
itertools = "0.10.5"
rayon = "1.6.0"
num-traits = "0.2.15"

View File

@ -324,8 +324,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
insert_canonical_block(tx.deref_mut(), &genesis, true).unwrap();
insert_canonical_block(tx.deref_mut(), &block, true).unwrap();
insert_canonical_block(tx.deref_mut(), genesis, None, true).unwrap();
insert_canonical_block(tx.deref_mut(), block.clone(), None, true).unwrap();
tx.commit().unwrap();
// insert pre state
@ -413,8 +413,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
insert_canonical_block(tx.deref_mut(), &genesis, true).unwrap();
insert_canonical_block(tx.deref_mut(), &block, true).unwrap();
insert_canonical_block(tx.deref_mut(), genesis, None, true).unwrap();
insert_canonical_block(tx.deref_mut(), block.clone(), None, true).unwrap();
tx.commit().unwrap();
// variables
@ -480,8 +480,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
insert_canonical_block(tx.deref_mut(), &genesis, true).unwrap();
insert_canonical_block(tx.deref_mut(), &block, true).unwrap();
insert_canonical_block(tx.deref_mut(), genesis, None, true).unwrap();
insert_canonical_block(tx.deref_mut(), block.clone(), None, true).unwrap();
tx.commit().unwrap();
// variables

View File

@ -5,7 +5,7 @@ use reth_db::{
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives::{keccak256, Account, Address};
use reth_primitives::keccak256;
use reth_provider::Transaction;
use std::{collections::BTreeMap, fmt::Debug, ops::Range};
use tracing::*;
@ -63,12 +63,12 @@ impl AccountHashingStage {
pub fn seed<DB: Database>(
tx: &mut Transaction<'_, DB>,
opts: SeedOpts,
) -> Result<Vec<(Address, Account)>, StageError> {
) -> Result<Vec<(reth_primitives::Address, reth_primitives::Account)>, StageError> {
use reth_db::models::AccountBeforeTx;
use reth_interfaces::test_utils::generators::{
random_block_range, random_eoa_account_range,
};
use reth_primitives::{H256, U256};
use reth_primitives::{Account, H256, U256};
use reth_provider::insert_canonical_block;
let blocks = random_block_range(opts.blocks, H256::zero(), opts.txs);
@ -76,7 +76,7 @@ impl AccountHashingStage {
let transitions = std::cmp::min(opts.transitions, num_transitions);
for block in blocks {
insert_canonical_block(&**tx, &block, true).unwrap();
insert_canonical_block(&**tx, block, None, true).unwrap();
}
let mut accounts = random_eoa_account_range(opts.accounts);
{
@ -203,37 +203,8 @@ impl<DB: Database> Stage<DB> for AccountHashingStage {
let from_transition_rev = tx.get_block_transition(input.unwind_to)?;
let to_transition_rev = tx.get_block_transition(input.stage_progress)?;
let mut hashed_accounts = tx.cursor_write::<tables::HashedAccount>()?;
// Aggregate all transition changesets and and make list of account that have been changed.
tx.cursor_read::<tables::AccountChangeSet>()?
.walk_range(from_transition_rev..to_transition_rev)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.rev()
// fold all account to get the old balance/nonces and account that needs to be removed
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, Option<Account>>, (_, account_before)| {
accounts.insert(account_before.address, account_before.info);
accounts
},
)
.into_iter()
// hash addresses and collect it inside sorted BTreeMap.
// We are doing keccak only once per address.
.map(|(address, account)| (keccak256(address), account))
.collect::<BTreeMap<_, _>>()
.into_iter()
// Apply values to HashedState (if Account is None remove it);
.try_for_each(|(hashed_address, account)| -> Result<(), StageError> {
if let Some(account) = account {
hashed_accounts.upsert(hashed_address, account)?;
} else if hashed_accounts.seek_exact(hashed_address)?.is_some() {
hashed_accounts.delete_current()?;
}
Ok(())
})?;
tx.unwind_account_hashing(from_transition_rev..to_transition_rev)?;
Ok(UnwindOutput { stage_progress: input.unwind_to })
}
@ -283,6 +254,7 @@ mod tests {
ExecInput, ExecOutput, UnwindInput,
};
use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx};
use reth_primitives::Address;
pub(crate) struct AccountHashingTestRunner {
pub(crate) tx: TestTransaction,

View File

@ -1,13 +1,13 @@
use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput};
use num_traits::Zero;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
cursor::DbDupCursorRO,
database::Database,
models::TransitionIdAddress,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives::{keccak256, Address, StorageEntry, H256, U256};
use reth_primitives::{keccak256, Address, StorageEntry};
use reth_provider::Transaction;
use std::{collections::BTreeMap, fmt::Debug};
use tracing::*;
@ -154,47 +154,10 @@ impl<DB: Database> Stage<DB> for StorageHashingStage {
let from_transition_rev = tx.get_block_transition(input.unwind_to)?;
let to_transition_rev = tx.get_block_transition(input.stage_progress)?;
let mut hashed_storage = tx.cursor_dup_write::<tables::HashedStorage>()?;
// Aggregate all transition changesets and make list of accounts that have been changed.
tx.cursor_read::<tables::StorageChangeSet>()?
.walk_range(
tx.unwind_storage_hashing(
TransitionIdAddress((from_transition_rev, Address::zero()))..
TransitionIdAddress((to_transition_rev, Address::zero())),
)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.rev()
// fold all account to get the old balance/nonces and account that needs to be removed
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<(Address, H256), U256>,
(TransitionIdAddress((_, address)), storage_entry)| {
accounts.insert((address, storage_entry.key), storage_entry.value);
accounts
},
)
.into_iter()
// hash addresses and collect it inside sorted BTreeMap.
// We are doing keccak only once per address.
.map(|((address, key), value)| ((keccak256(address), keccak256(key)), value))
.collect::<BTreeMap<_, _>>()
.into_iter()
// Apply values to HashedStorage (if Value is zero just remove it);
.try_for_each(|((hashed_address, key), value)| -> Result<(), StageError> {
if hashed_storage
.seek_by_key_subkey(hashed_address, key)?
.filter(|entry| entry.key == key)
.is_some()
{
hashed_storage.delete_current()?;
}
if value != U256::ZERO {
hashed_storage.upsert(hashed_address, StorageEntry { key, value })?;
}
Ok(())
})?;
)?;
Ok(UnwindOutput { stage_progress: input.unwind_to })
}
@ -209,7 +172,7 @@ mod tests {
};
use assert_matches::assert_matches;
use reth_db::{
cursor::DbCursorRW,
cursor::{DbCursorRO, DbCursorRW},
mdbx::{tx::Tx, WriteMap, RW},
models::{StoredBlockBody, TransitionIdAddress},
};

View File

@ -1,16 +1,7 @@
use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput};
use reth_db::{
cursor::{DbCursorRO, DbCursorRW},
database::{Database, DatabaseGAT},
models::ShardedKey,
tables,
transaction::{DbTx, DbTxMut, DbTxMutGAT},
TransitionList,
};
use reth_db::database::Database;
use reth_provider::Transaction;
use reth_primitives::{Address, TransitionId};
use std::{collections::BTreeMap, fmt::Debug};
use std::fmt::Debug;
use tracing::*;
/// The [`StageId`] of the account history indexing stage.
@ -18,7 +9,7 @@ pub const INDEX_ACCOUNT_HISTORY: StageId = StageId("IndexAccountHistory");
/// Stage is indexing history the account changesets generated in
/// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information
/// on index sharding take a look at [`tables::AccountHistory`]
/// on index sharding take a look at [`reth_db::tables::AccountHistory`]
#[derive(Debug)]
pub struct IndexAccountHistoryStage {
/// Number of blocks after which the control
@ -75,84 +66,25 @@ impl<DB: Database> Stage<DB> for IndexAccountHistoryStage {
let from_transition_rev = tx.get_block_transition(input.unwind_to)?;
let to_transition_rev = tx.get_block_transition(input.stage_progress)?;
let mut cursor = tx.cursor_write::<tables::AccountHistory>()?;
tx.unwind_account_history_indices(from_transition_rev..to_transition_rev)?;
let account_changeset = tx
.cursor_read::<tables::AccountChangeSet>()?
.walk(Some(from_transition_rev))?
.take_while(|res| res.as_ref().map(|(k, _)| *k < to_transition_rev).unwrap_or_default())
.collect::<Result<Vec<_>, _>>()?;
let last_indices = account_changeset
.into_iter()
// reverse so we can get lowest transition id where we need to unwind account.
.rev()
// fold all account and get last transition index
.fold(BTreeMap::new(), |mut accounts: BTreeMap<Address, u64>, (index, account)| {
// we just need address and lowest transition id.
accounts.insert(account.address, index);
accounts
});
// try to unwind the index
for (address, rem_index) in last_indices {
let shard_part = unwind_account_history_shards::<DB>(&mut cursor, address, rem_index)?;
// check last shard_part, if present, items needs to be reinserted.
if !shard_part.is_empty() {
// there are items in list
tx.put::<tables::AccountHistory>(
ShardedKey::new(address, u64::MAX),
TransitionList::new(shard_part)
.expect("There is at least one element in list and it is sorted."),
)?;
}
}
// from HistoryIndex higher than that number.
Ok(UnwindOutput { stage_progress: input.unwind_to })
}
}
/// Unwind all history shards. For boundary shard, remove it from database and
/// return last part of shard with still valid items. If all full shard were removed, return list
/// would be empty.
pub fn unwind_account_history_shards<DB: Database>(
cursor: &mut <<DB as DatabaseGAT<'_>>::TXMut as DbTxMutGAT<'_>>::CursorMut<
tables::AccountHistory,
>,
address: Address,
transition_id: TransitionId,
) -> Result<Vec<usize>, StageError> {
let mut item = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?;
while let Some((sharded_key, list)) = item {
// there is no more shard for address
if sharded_key.key != address {
break
}
cursor.delete_current()?;
// check first item and if it is more and eq than `transition_id` delete current
// item.
let first = list.iter(0).next().expect("List can't empty");
if first >= transition_id as usize {
item = cursor.prev()?;
continue
} else if transition_id <= sharded_key.highest_transition_id {
// if first element is in scope whole list would be removed.
// so at least this first element is present.
return Ok(list.iter(0).take_while(|i| *i < transition_id as usize).collect::<Vec<_>>())
} else {
let new_list = list.iter(0).collect::<Vec<_>>();
return Ok(new_list)
}
}
Ok(Vec::new())
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
use crate::test_utils::{TestTransaction, PREV_STAGE_ID};
use reth_db::models::{sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx};
use reth_db::{
models::{sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx, ShardedKey},
tables,
transaction::DbTxMut,
TransitionList,
};
use reth_primitives::{hex_literal::hex, H160};
const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001"));

View File

@ -1,15 +1,8 @@
use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput};
use reth_db::{
cursor::{DbCursorRO, DbCursorRW},
database::{Database, DatabaseGAT},
models::storage_sharded_key::StorageShardedKey,
tables,
transaction::{DbTx, DbTxMut, DbTxMutGAT},
TransitionList,
};
use reth_primitives::{Address, TransitionId, H256};
use reth_db::{database::Database, models::TransitionIdAddress};
use reth_primitives::Address;
use reth_provider::Transaction;
use std::{collections::BTreeMap, fmt::Debug};
use std::fmt::Debug;
use tracing::*;
/// The [`StageId`] of the storage history indexing stage.
@ -17,7 +10,7 @@ pub const INDEX_STORAGE_HISTORY: StageId = StageId("IndexStorageHistory");
/// Stage is indexing history the account changesets generated in
/// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information
/// on index sharding take a look at [`tables::StorageHistory`].
/// on index sharding take a look at [`reth_db::tables::StorageHistory`].
#[derive(Debug)]
pub struct IndexStorageHistoryStage {
/// Number of blocks after which the control
@ -74,94 +67,32 @@ impl<DB: Database> Stage<DB> for IndexStorageHistoryStage {
let from_transition_rev = tx.get_block_transition(input.unwind_to)?;
let to_transition_rev = tx.get_block_transition(input.stage_progress)?;
let mut cursor = tx.cursor_write::<tables::StorageHistory>()?;
let storage_changesets = tx
.cursor_read::<tables::StorageChangeSet>()?
.walk(Some((from_transition_rev, Address::zero()).into()))?
.take_while(|res| {
res.as_ref().map(|(k, _)| k.transition_id() < to_transition_rev).unwrap_or_default()
})
.collect::<Result<Vec<_>, _>>()?;
let last_indices = storage_changesets
.into_iter()
// reverse so we can get lowest transition id where we need to unwind account.
.rev()
// fold all storages and get last transition index
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<(Address, H256), u64>, (index, storage)| {
// we just need address and lowest transition id.
accounts.insert((index.address(), storage.key), index.transition_id());
accounts
},
);
for ((address, storage_key), rem_index) in last_indices {
let shard_part =
unwind_storage_history_shards::<DB>(&mut cursor, address, storage_key, rem_index)?;
// check last shard_part, if present, items needs to be reinserted.
if !shard_part.is_empty() {
// there are items in list
tx.put::<tables::StorageHistory>(
StorageShardedKey::new(address, storage_key, u64::MAX),
TransitionList::new(shard_part)
.expect("There is at least one element in list and it is sorted."),
tx.unwind_storage_history_indices(
TransitionIdAddress((from_transition_rev, Address::zero()))..
TransitionIdAddress((to_transition_rev, Address::zero())),
)?;
}
}
Ok(UnwindOutput { stage_progress: input.unwind_to })
}
}
/// Unwind all history shards. For boundary shard, remove it from database and
/// return last part of shard with still valid items. If all full shard were removed, return list
/// would be empty but this does not mean that there is none shard left but that there is no
/// splitted shards.
pub fn unwind_storage_history_shards<DB: Database>(
cursor: &mut <<DB as DatabaseGAT<'_>>::TXMut as DbTxMutGAT<'_>>::CursorMut<
tables::StorageHistory,
>,
address: Address,
storage_key: H256,
transition_id: TransitionId,
) -> Result<Vec<usize>, StageError> {
let mut item = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?;
while let Some((storage_sharded_key, list)) = item {
// there is no more shard for address
if storage_sharded_key.address != address ||
storage_sharded_key.sharded_key.key != storage_key
{
// there is no more shard for address and storage_key.
break
}
cursor.delete_current()?;
// check first item and if it is more and eq than `transition_id` delete current
// item.
let first = list.iter(0).next().expect("List can't empty");
if first >= transition_id as usize {
item = cursor.prev()?;
continue
} else if transition_id <= storage_sharded_key.sharded_key.highest_transition_id {
// if first element is in scope whole list would be removed.
// so at least this first element is present.
return Ok(list.iter(0).take_while(|i| *i < transition_id as usize).collect::<Vec<_>>())
} else {
return Ok(list.iter(0).collect::<Vec<_>>())
}
}
Ok(Vec::new())
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
use crate::test_utils::{TestTransaction, PREV_STAGE_ID};
use reth_db::models::{
storage_sharded_key::NUM_OF_INDICES_IN_SHARD, ShardedKey, TransitionIdAddress,
use reth_db::{
models::{
storage_sharded_key::{StorageShardedKey, NUM_OF_INDICES_IN_SHARD},
ShardedKey, TransitionIdAddress,
},
tables,
transaction::DbTxMut,
TransitionList,
};
use reth_primitives::{hex_literal::hex, StorageEntry, H160, U256};
use reth_primitives::{hex_literal::hex, StorageEntry, H160, H256, U256};
const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001"));
const STORAGE_KEY: H256 =

View File

@ -111,9 +111,9 @@ impl<DB: Database> Stage<DB> for TransactionLookupStage {
) -> Result<UnwindOutput, StageError> {
info!(target: "sync::stages::transaction_lookup", to_block = input.unwind_to, "Unwinding");
// Cursors to unwind tx hash to number
let mut body_cursor = tx.cursor_write::<tables::BlockBodies>()?;
let mut body_cursor = tx.cursor_read::<tables::BlockBodies>()?;
let mut tx_hash_number_cursor = tx.cursor_write::<tables::TxHashNumber>()?;
let mut transaction_cursor = tx.cursor_write::<tables::Transactions>()?;
let mut transaction_cursor = tx.cursor_read::<tables::Transactions>()?;
let mut rev_walker = body_cursor.walk_back(None)?;
while let Some((number, body)) = rev_walker.next().transpose()? {
if number <= input.unwind_to {

View File

@ -1,4 +1,5 @@
use reth_db::{
common::KeyValue,
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
mdbx::{
test_utils::{create_test_db, create_test_db_with_path},
@ -88,9 +89,8 @@ impl TestTransaction {
})
}
#[allow(clippy::type_complexity)]
/// Return full table as Vec
pub fn table<T: Table>(&self) -> Result<Vec<(T::Key, T::Value)>, DbError>
pub fn table<T: Table>(&self) -> Result<Vec<KeyValue<T>>, DbError>
where
T::Key: Default + Ord,
{

View File

@ -1,7 +1,9 @@
/// Alias type containing key value pairs.
pub type KeyValue<T> = (<T as Table>::Key, <T as Table>::Value);
/// Alias type for a `(key, value)` result coming from a cursor.
pub type PairResult<T> = Result<Option<(<T as Table>::Key, <T as Table>::Value)>, Error>;
pub type PairResult<T> = Result<Option<KeyValue<T>>, Error>;
/// Alias type for a `(key, value)` result coming from an iterator.
pub type IterPairResult<T> = Option<Result<(<T as Table>::Key, <T as Table>::Value), Error>>;
pub type IterPairResult<T> = Option<Result<KeyValue<T>, Error>>;
/// Alias type for a value result coming from a cursor without its key.
pub type ValueOnlyResult<T> = Result<Option<<T as Table>::Value>, Error>;

View File

@ -159,6 +159,15 @@ impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> Walker<'cursor, 'tx, T,
}
}
impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRW<'tx, T> + DbCursorRO<'tx, T>>
Walker<'cursor, 'tx, T, CURSOR>
{
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), Error> {
self.cursor.delete_current()
}
}
/// Provides a reverse iterator to `Cursor` when handling `Table`.
/// Also check [`Walker`]
pub struct ReverseWalker<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> {
@ -183,6 +192,15 @@ impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> ReverseWalker<'cursor,
}
}
impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRW<'tx, T> + DbCursorRO<'tx, T>>
ReverseWalker<'cursor, 'tx, T, CURSOR>
{
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), Error> {
self.cursor.delete_current()
}
}
impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> std::iter::Iterator
for ReverseWalker<'cursor, 'tx, T, CURSOR>
{
@ -268,6 +286,15 @@ impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRO<'tx, T>> RangeWalker<'cursor, 't
}
}
impl<'cursor, 'tx, T: Table, CURSOR: DbCursorRW<'tx, T> + DbCursorRO<'tx, T>>
RangeWalker<'cursor, 'tx, T, CURSOR>
{
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), Error> {
self.cursor.delete_current()
}
}
/// Provides an iterator to `Cursor` when handling a `DupSort` table.
///
/// Reason why we have two lifetimes is to distinguish between `'cursor` lifetime
@ -282,6 +309,15 @@ pub struct DupWalker<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> {
pub _tx_phantom: PhantomData<&'tx T>,
}
impl<'cursor, 'tx, T: DupSort, CURSOR: DbCursorRW<'tx, T> + DbDupCursorRO<'tx, T>>
DupWalker<'cursor, 'tx, T, CURSOR>
{
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), Error> {
self.cursor.delete_current()
}
}
impl<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> std::iter::Iterator
for DupWalker<'cursor, 'tx, T, CURSOR>
{

View File

@ -43,7 +43,7 @@ pub trait Database: for<'a> DatabaseGAT<'a> {
/// the end of the execution.
fn update<T, F>(&self, f: F) -> Result<T, Error>
where
F: Fn(&<Self as DatabaseGAT<'_>>::TXMut) -> T,
F: FnOnce(&<Self as DatabaseGAT<'_>>::TXMut) -> T,
{
let tx = self.tx_mut()?;

View File

@ -3,6 +3,7 @@
use std::{borrow::Cow, collections::Bound, marker::PhantomData, ops::RangeBounds};
use crate::{
common::{PairResult, ValueOnlyResult},
cursor::{
DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, DupWalker, RangeWalker,
ReverseWalker, Walker,
@ -13,13 +14,6 @@ use crate::{
};
use reth_libmdbx::{self, Error as MDBXError, TransactionKind, WriteFlags, RO, RW};
/// Alias type for a `(key, value)` result coming from a cursor.
pub type PairResult<T> = Result<Option<(<T as Table>::Key, <T as Table>::Value)>, Error>;
/// Alias type for a `(key, value)` result coming from an iterator.
pub type IterPairResult<T> = Option<Result<(<T as Table>::Key, <T as Table>::Value), Error>>;
/// Alias type for a value result coming from a cursor without its key.
pub type ValueOnlyResult<T> = Result<Option<<T as Table>::Value>, Error>;
/// Read only Cursor.
pub type CursorRO<'tx, T> = Cursor<'tx, RO, T>;
/// Read write cursor.

View File

@ -23,7 +23,7 @@ pub type NumTransactions = u64;
pub struct StoredBlockBody {
/// The id of the first transaction in this block
pub start_tx_id: TxNumber,
/// The total number of transactions
/// The total number of transactions in the block
pub tx_count: NumTransactions,
}
@ -40,10 +40,20 @@ impl StoredBlockBody {
self.start_tx_id.saturating_add(self.tx_count).saturating_sub(1)
}
/// First transaction index.
pub fn first_tx_index(&self) -> TxNumber {
self.start_tx_id
}
/// Return a flag whether the block is empty
pub fn is_empty(&self) -> bool {
self.tx_count == 0
}
/// Return number of transaction inside block
pub fn tx_count(&self) -> NumTransactions {
self.tx_count
}
}
/// The storage representation of a block ommers.

View File

@ -33,6 +33,7 @@ parking_lot = { version = "0.12", optional = true }
[dev-dependencies]
reth-db = { path = "../db", features = ["test-utils"] }
reth-primitives = { path = "../../primitives", features = ["arbitrary"] }
parking_lot = "0.12"
proptest = { version = "1.0" }
assert_matches = "1.5"

View File

@ -7,7 +7,7 @@ use std::collections::BTreeMap;
/// Execution Result containing vector of transaction changesets
/// and block reward if present
#[derive(Debug)]
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct ExecutionResult {
/// Transaction changeset containing [Receipt], changed [Accounts][Account] and Storages.
pub tx_changesets: Vec<TransactionChangeSet>,
@ -20,7 +20,7 @@ pub struct ExecutionResult {
/// transaction [Receipt] every change to state ([Account], Storage, [Bytecode])
/// that this transaction made and its old values
/// so that history account table can be updated.
#[derive(Debug, Clone)]
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct TransactionChangeSet {
/// Transaction receipt
pub receipt: Receipt,
@ -33,7 +33,9 @@ pub struct TransactionChangeSet {
/// Contains old/new account changes
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum AccountInfoChangeSet {
/// The account is newly created.
/// The account is newly created. Account can be created by just by sending balance,
///
/// Revert of this changeset is empty account,
Created {
/// The newly created account.
new: Account,
@ -41,11 +43,15 @@ pub enum AccountInfoChangeSet {
/// An account was deleted (selfdestructed) or we have touched
/// an empty account and we need to remove/destroy it.
/// (Look at state clearing [EIP-158](https://eips.ethereum.org/EIPS/eip-158))
///
/// Revert of this changeset is old account
Destroyed {
/// The account that was destroyed.
old: Account,
},
/// The account was changed.
///
/// revert of this changeset is old account
Changed {
/// The account after the change.
new: Account,
@ -54,12 +60,34 @@ pub enum AccountInfoChangeSet {
},
/// Nothing was changed for the account (nonce/balance).
NoChange {
/// Useful to clear existing empty accounts pre-EIP-161.
/// Used to clear existing empty accounts pre-EIP-161.
is_empty: bool,
},
}
impl Default for AccountInfoChangeSet {
fn default() -> Self {
AccountInfoChangeSet::NoChange { is_empty: false }
}
}
impl AccountInfoChangeSet {
/// Create new account info changeset
pub fn new(old: Option<Account>, new: Option<Account>) -> Self {
match (old, new) {
(Some(old), Some(new)) => {
if new != old {
Self::Changed { new, old }
} else {
if new.is_empty() {}
Self::NoChange { is_empty: true }
}
}
(None, Some(new)) => Self::Created { new },
(Some(old), None) => Self::Destroyed { old },
(None, None) => Self::NoChange { is_empty: false },
}
}
/// Apply the changes from the changeset to a database transaction.
pub fn apply_to_db<'a, TX: DbTxMut<'a>>(
self,
@ -108,7 +136,7 @@ impl AccountInfoChangeSet {
}
/// Diff change set that is needed for creating history index and updating current world state.
#[derive(Debug, Clone)]
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct AccountChangeSet {
/// Old and New account account change.
pub account: AccountInfoChangeSet,

View File

@ -40,8 +40,8 @@ pub struct ShareableDatabase<DB> {
impl<DB> ShareableDatabase<DB> {
/// create new database provider
pub fn new(db: DB, chain_spec: ChainSpec) -> Self {
Self { db, chain_spec: Arc::new(chain_spec) }
pub fn new(db: DB, chain_spec: Arc<ChainSpec>) -> Self {
Self { db, chain_spec }
}
}
@ -366,6 +366,8 @@ impl<DB: Database> StateProviderFactory for ShareableDatabase<DB> {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::ShareableDatabase;
use crate::{BlockIdProvider, StateProviderFactory};
use reth_db::mdbx::{test_utils::create_test_db, EnvKind, WriteMap};
@ -375,7 +377,7 @@ mod tests {
fn common_history_provider() {
let chain_spec = ChainSpecBuilder::mainnet().build();
let db = create_test_db::<WriteMap>(EnvKind::RW);
let provider = ShareableDatabase::new(db, chain_spec);
let provider = ShareableDatabase::new(db, Arc::new(chain_spec));
let _ = provider.latest();
}
@ -383,7 +385,7 @@ mod tests {
fn default_chain_info() {
let chain_spec = ChainSpecBuilder::mainnet().build();
let db = create_test_db::<WriteMap>(EnvKind::RW);
let provider = ShareableDatabase::new(db, chain_spec);
let provider = ShareableDatabase::new(db, Arc::new(chain_spec));
let chain_info = provider.chain_info().expect("should be ok");
assert_eq!(chain_info.best_number, 0);

View File

@ -126,7 +126,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for HistoricalStateProviderRef<'a, 'b,
_address: Address,
_keys: &[H256],
) -> Result<(Vec<Bytes>, H256, Vec<Vec<Bytes>>)> {
todo!("this should retrieve past state info and generate proof")
Err(ProviderError::HistoryStateRoot.into())
}
}

View File

@ -77,7 +77,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for LatestStateProviderRef<'a, 'b, TX>
let (account_proof, storage_root) = loader
.generate_acount_proof(root, hashed_address)
.map_err(|_| ProviderError::StateTree)?;
.map_err(|_| ProviderError::StateTrie)?;
let account_proof = account_proof.into_iter().map(Bytes::from).collect();
let storage_proof = if storage_root == KECCAK_EMPTY {
@ -87,7 +87,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for LatestStateProviderRef<'a, 'b, TX>
let hashed_keys: Vec<H256> = keys.iter().map(keccak256).collect();
loader
.generate_storage_proofs(storage_root, hashed_address, &hashed_keys)
.map_err(|_| ProviderError::StateTree)?
.map_err(|_| ProviderError::StateTrie)?
.into_iter()
.map(|v| v.into_iter().map(Bytes::from).collect())
.collect()

View File

@ -0,0 +1,146 @@
//! Dummy blocks and data for tests
use crate::{
execution_result::{
AccountChangeSet, AccountInfoChangeSet, ExecutionResult, TransactionChangeSet,
},
Transaction,
};
use reth_db::{database::Database, models::StoredBlockBody, tables};
use reth_primitives::{
hex_literal::hex, proofs::EMPTY_ROOT, Account, Header, Receipt, SealedBlock,
SealedBlockWithSenders, Withdrawal, H160, H256, U256,
};
use reth_rlp::Decodable;
use std::collections::BTreeMap;
/// Assert genesis block
pub fn assert_genesis_block<DB: Database>(tx: &Transaction<'_, DB>, g: SealedBlock) {
let n = g.number;
let h = H256::zero();
// check if all tables are empty
assert_eq!(tx.table::<tables::Headers>().unwrap(), vec![(g.number, g.header.clone().unseal())]);
assert_eq!(tx.table::<tables::HeaderNumbers>().unwrap(), vec![(h, n)]);
assert_eq!(tx.table::<tables::CanonicalHeaders>().unwrap(), vec![(n, h)]);
assert_eq!(tx.table::<tables::HeaderTD>().unwrap(), vec![(n, g.difficulty.into())]);
assert_eq!(tx.table::<tables::BlockBodies>().unwrap(), vec![(0, StoredBlockBody::default())]);
assert_eq!(tx.table::<tables::BlockOmmers>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::BlockWithdrawals>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::Transactions>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::TxHashNumber>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::Receipts>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::PlainAccountState>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::PlainStorageState>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::AccountHistory>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::StorageHistory>().unwrap(), vec![]);
// TODO check after this gets done: https://github.com/paradigmxyz/reth/issues/1588
// Bytecodes are not reverted assert_eq!(tx.table::<tables::Bytecodes>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::BlockTransitionIndex>().unwrap(), vec![(n, 0)]);
assert_eq!(tx.table::<tables::TxTransitionIndex>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::AccountChangeSet>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::StorageChangeSet>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::HashedAccount>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::HashedStorage>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::AccountsTrie>().unwrap(), vec![(EMPTY_ROOT, vec![0x80])]);
assert_eq!(tx.table::<tables::StoragesTrie>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::TxSenders>().unwrap(), vec![]);
// SyncStage is not updated in tests
}
/// Test chain with genesis, blocks, execution results
/// that have correcte changesets.
pub struct BlockChainTestData {
/// Genesis
pub genesis: SealedBlock,
/// Blocks with its execution result
pub blocks: Vec<(SealedBlockWithSenders, ExecutionResult)>,
}
impl Default for BlockChainTestData {
fn default() -> Self {
Self { genesis: genesis(), blocks: vec![block1(), block2()] }
}
}
/// Genesis block
pub fn genesis() -> SealedBlock {
SealedBlock {
header: Header { number: 0, difficulty: U256::from(1), ..Default::default() }
.seal(H256::zero()),
body: vec![],
ommers: vec![],
withdrawals: Some(vec![]),
}
}
/// Block one that points to genesis
fn block1() -> (SealedBlockWithSenders, ExecutionResult) {
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let mut block = SealedBlock::decode(&mut block_rlp).unwrap();
block.withdrawals = Some(vec![Withdrawal::default()]);
let mut header = block.header.clone().unseal();
header.number = 1;
header.state_root =
H256(hex!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd"));
header.parent_hash = H256::zero();
block.header = header.seal_slow();
let mut account_changeset = AccountChangeSet {
account: AccountInfoChangeSet::Created {
new: Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
},
..Default::default()
};
account_changeset.storage.insert(U256::from(5), (U256::ZERO, U256::from(10)));
let exec_res = ExecutionResult {
tx_changesets: vec![TransactionChangeSet {
receipt: Receipt::default(), /* receipts are not saved. */
changeset: BTreeMap::from([(H160([0x60; 20]), account_changeset.clone())]),
new_bytecodes: BTreeMap::from([]),
}],
block_changesets: BTreeMap::from([(H160([0x61; 20]), account_changeset.account)]),
};
(SealedBlockWithSenders { block, senders: vec![H160([0x30; 20])] }, exec_res)
}
/// Block two that points to block 1
fn block2() -> (SealedBlockWithSenders, ExecutionResult) {
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let mut block = SealedBlock::decode(&mut block_rlp).unwrap();
block.withdrawals = Some(vec![Withdrawal::default()]);
let mut header = block.header.clone().unseal();
header.number = 2;
header.state_root =
H256(hex!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8"));
// parent_hash points to block1 hash
header.parent_hash =
H256(hex!("d846db2ab174c492cfe985c18fa75b154e20572bc33bb1c67cf5d2995791bdb7"));
block.header = header.seal_slow();
let mut account_changeset = AccountChangeSet::default();
// storage will be moved
let info_changeset = AccountInfoChangeSet::Changed {
old: Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
new: Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
};
account_changeset.account = info_changeset;
account_changeset.storage.insert(U256::from(5), (U256::from(10), U256::from(15)));
let block_changeset = AccountInfoChangeSet::Changed {
old: Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
new: Account { nonce: 3, balance: U256::from(20), bytecode_hash: None },
};
let exec_res = ExecutionResult {
tx_changesets: vec![TransactionChangeSet {
receipt: Receipt::default(), /* receipts are not saved. */
changeset: BTreeMap::from([(H160([0x60; 20]), account_changeset.clone())]),
new_bytecodes: BTreeMap::from([]),
}],
block_changesets: BTreeMap::from([(H160([0x60; 20]), block_changeset)]),
};
(SealedBlockWithSenders { block, senders: vec![H160([0x31; 20])] }, exec_res)
}

View File

@ -1,3 +1,4 @@
pub mod blocks;
mod mock;
mod noop;

View File

@ -3,7 +3,7 @@ use reth_interfaces::Result;
use reth_primitives::{Account, Address};
/// Account provider
#[auto_impl(&)]
#[auto_impl(&,Box)]
pub trait AccountProvider: Send + Sync {
/// Get basic account information.
fn basic_account(&self, address: Address) -> Result<Option<Account>>;

View File

@ -3,7 +3,7 @@ use reth_interfaces::Result;
use reth_primitives::{H256, U256};
/// Client trait for fetching block hashes by number.
#[auto_impl(&, Arc)]
#[auto_impl(&, Arc, Box)]
pub trait BlockHashProvider: Send + Sync {
/// Get the hash of the block with the given number. Returns `None` if no block with this number
/// exists.

View File

@ -1,5 +1,6 @@
use itertools::Itertools;
use itertools::{izip, Itertools};
use reth_db::{
common::KeyValue,
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
database::{Database, DatabaseGAT},
models::{
@ -9,22 +10,24 @@ use reth_db::{
},
table::Table,
tables,
transaction::{DbTx, DbTxMut},
transaction::{DbTx, DbTxMut, DbTxMutGAT},
TransitionList,
};
use reth_interfaces::{db::Error as DbError, provider::ProviderError};
use reth_primitives::{
keccak256, Account, Address, BlockHash, BlockNumber, Bytecode, ChainSpec, Hardfork, Header,
SealedBlock, StorageEntry, TransitionId, TxNumber, H256, U256,
keccak256, proofs::EMPTY_ROOT, Account, Address, BlockHash, BlockNumber, Bytecode, ChainSpec,
Hardfork, Header, Receipt, SealedBlock, SealedBlockWithSenders, StorageEntry,
TransactionSignedEcRecovered, TransitionId, TxNumber, H256, U256,
};
use reth_tracing::tracing::{info, trace};
use std::{
collections::{BTreeMap, BTreeSet},
collections::{btree_map::Entry, BTreeMap, BTreeSet},
fmt::Debug,
ops::{Deref, DerefMut},
ops::{Bound, Deref, DerefMut, Range, RangeBounds},
};
use crate::{
execution_result::{AccountInfoChangeSet, TransactionChangeSet},
insert_canonical_block,
trie::{DBTrieLoader, TrieError},
};
@ -274,19 +277,261 @@ impl<'this, DB> Transaction<'this, DB>
where
DB: Database,
{
/// Get requested blocks transaction with signer
pub fn get_block_transaction_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(BlockNumber, Vec<TransactionSignedEcRecovered>)>, TransactionError> {
self.get_take_block_transaction_range::<false>(range)
}
/// Take requested blocks transaction with signer
pub fn take_block_transaction_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(BlockNumber, Vec<TransactionSignedEcRecovered>)>, TransactionError> {
self.get_take_block_transaction_range::<true>(range)
}
/// Return range of blocks and its execution result
pub fn get_block_range(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<SealedBlockWithSenders>, TransactionError> {
self.get_take_block_range::<false>(chain_spec, range)
}
/// Return range of blocks and its execution result
pub fn take_block_range(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<SealedBlockWithSenders>, TransactionError> {
self.get_take_block_range::<true>(chain_spec, range)
}
/// Transverse over changesets and plain state and recreated the execution results.
///
/// Return results from database.
pub fn get_block_execution_result_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
self.get_take_block_execution_result_range::<false>(range)
}
/// Transverse over changesets and plain state and recreated the execution results.
///
/// Get results and remove them from database
pub fn take_block_execution_result_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
self.get_take_block_execution_result_range::<true>(range)
}
/// Get range of blocks and its execution result
pub fn get_block_and_execution_range(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
self.get_take_block_and_execution_range::<false>(chain_spec, range)
}
/// Take range of blocks and its execution result
pub fn take_block_and_execution_range(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
self.get_take_block_and_execution_range::<true>(chain_spec, range)
}
/// Unwind and clear account hashing
pub fn unwind_account_hashing(
&self,
range: Range<TransitionId>,
) -> Result<(), TransactionError> {
let mut hashed_accounts = self.cursor_write::<tables::HashedAccount>()?;
// Aggregate all transition changesets and and make list of account that have been changed.
self.cursor_read::<tables::AccountChangeSet>()?
.walk_range(range)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.rev()
// fold all account to get the old balance/nonces and account that needs to be removed
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, Option<Account>>, (_, account_before)| {
accounts.insert(account_before.address, account_before.info);
accounts
},
)
.into_iter()
// hash addresses and collect it inside sorted BTreeMap.
// We are doing keccak only once per address.
.map(|(address, account)| (keccak256(address), account))
.collect::<BTreeMap<_, _>>()
.into_iter()
// Apply values to HashedState (if Account is None remove it);
.try_for_each(|(hashed_address, account)| -> Result<(), TransactionError> {
if let Some(account) = account {
hashed_accounts.upsert(hashed_address, account)?;
} else if hashed_accounts.seek_exact(hashed_address)?.is_some() {
hashed_accounts.delete_current()?;
}
Ok(())
})?;
Ok(())
}
/// Unwind and clear storage hashing
pub fn unwind_storage_hashing(
&self,
range: Range<TransitionIdAddress>,
) -> Result<(), TransactionError> {
let mut hashed_storage = self.cursor_dup_write::<tables::HashedStorage>()?;
// Aggregate all transition changesets and make list of accounts that have been changed.
self.cursor_read::<tables::StorageChangeSet>()?
.walk_range(range)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.rev()
// fold all account to get the old balance/nonces and account that needs to be removed
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<(Address, H256), U256>,
(TransitionIdAddress((_, address)), storage_entry)| {
accounts.insert((address, storage_entry.key), storage_entry.value);
accounts
},
)
.into_iter()
// hash addresses and collect it inside sorted BTreeMap.
// We are doing keccak only once per address.
.map(|((address, key), value)| ((keccak256(address), keccak256(key)), value))
.collect::<BTreeMap<_, _>>()
.into_iter()
// Apply values to HashedStorage (if Value is zero just remove it);
.try_for_each(|((hashed_address, key), value)| -> Result<(), TransactionError> {
if hashed_storage
.seek_by_key_subkey(hashed_address, key)?
.filter(|entry| entry.key == key)
.is_some()
{
hashed_storage.delete_current()?;
}
if value != U256::ZERO {
hashed_storage.upsert(hashed_address, StorageEntry { key, value })?;
}
Ok(())
})?;
Ok(())
}
/// Unwind and clear account history indices
pub fn unwind_account_history_indices(
&self,
range: Range<TransitionId>,
) -> Result<(), TransactionError> {
let mut cursor = self.cursor_write::<tables::AccountHistory>()?;
let account_changeset = self
.cursor_read::<tables::AccountChangeSet>()?
.walk_range(range)?
.collect::<Result<Vec<_>, _>>()?;
let last_indices = account_changeset
.into_iter()
// reverse so we can get lowest transition id where we need to unwind account.
.rev()
// fold all account and get last transition index
.fold(BTreeMap::new(), |mut accounts: BTreeMap<Address, u64>, (index, account)| {
// we just need address and lowest transition id.
accounts.insert(account.address, index);
accounts
});
// try to unwind the index
for (address, rem_index) in last_indices {
let shard_part = unwind_account_history_shards::<DB>(&mut cursor, address, rem_index)?;
// check last shard_part, if present, items needs to be reinserted.
if !shard_part.is_empty() {
// there are items in list
self.put::<tables::AccountHistory>(
ShardedKey::new(address, u64::MAX),
TransitionList::new(shard_part)
.expect("There is at least one element in list and it is sorted."),
)?;
}
}
Ok(())
}
/// Unwind and clear storage history indices
pub fn unwind_storage_history_indices(
&self,
range: Range<TransitionIdAddress>,
) -> Result<(), TransactionError> {
let mut cursor = self.cursor_write::<tables::StorageHistory>()?;
let storage_changesets = self
.cursor_read::<tables::StorageChangeSet>()?
.walk_range(range)?
.collect::<Result<Vec<_>, _>>()?;
let last_indices = storage_changesets
.into_iter()
// reverse so we can get lowest transition id where we need to unwind account.
.rev()
// fold all storages and get last transition index
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<(Address, H256), u64>, (index, storage)| {
// we just need address and lowest transition id.
accounts.insert((index.address(), storage.key), index.transition_id());
accounts
},
);
for ((address, storage_key), rem_index) in last_indices {
let shard_part =
unwind_storage_history_shards::<DB>(&mut cursor, address, storage_key, rem_index)?;
// check last shard_part, if present, items needs to be reinserted.
if !shard_part.is_empty() {
// there are items in list
self.put::<tables::StorageHistory>(
StorageShardedKey::new(address, storage_key, u64::MAX),
TransitionList::new(shard_part)
.expect("There is at least one element in list and it is sorted."),
)?;
}
}
Ok(())
}
/// Insert full block and make it canonical
///
/// This is atomic operation and transaction will do one commit at the end of the function.
pub fn insert_block(
&mut self,
block: &SealedBlock,
block: SealedBlockWithSenders,
chain_spec: &ChainSpec,
changeset: ExecutionResult,
) -> Result<(), TransactionError> {
// Header, Body, SenderRecovery, TD, TxLookup stages
let (from, to) = insert_canonical_block(self.deref_mut(), block, false).unwrap();
let (block, senders) = block.into_components();
let block_number = block.number;
let block_state_root = block.state_root;
let block_hash = block.hash();
let parent_block_number = block.number.saturating_sub(1);
let parent_block_number = block.number - 1;
let (from, to) =
insert_canonical_block(self.deref_mut(), block, Some(senders), false).unwrap();
// execution stage
self.insert_execution_result(vec![changeset], chain_spec, parent_block_number)?;
@ -310,12 +555,12 @@ where
let current_root = self.get_header(parent_block_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref_mut());
let root = loader.update_root(current_root, from..to).and_then(|e| e.root())?;
if root != block.state_root {
if root != block_state_root {
return Err(TransactionError::StateTrieRootMismatch {
got: root,
expected: block.state_root,
block_number: block.number,
block_hash: block.hash(),
expected: block_state_root,
block_number,
block_hash,
})
}
}
@ -332,8 +577,488 @@ where
self.insert_storage_history_index(indices)?;
}
// commit block to database
self.commit()?;
Ok(())
}
/// Return list of entries from table
///
/// If TAKE is true, opened cursor would be write and it would delete all values from db.
#[inline]
pub fn get_or_take<T: Table, const TAKE: bool>(
&self,
range: impl RangeBounds<T::Key>,
) -> Result<Vec<KeyValue<T>>, DbError> {
if TAKE {
let mut cursor_write = self.cursor_write::<T>()?;
let mut walker = cursor_write.walk_range(range)?;
let mut items = Vec::new();
while let Some(i) = walker.next().transpose()? {
walker.delete_current()?;
items.push(i)
}
Ok(items)
} else {
self.cursor_read::<T>()?.walk_range(range)?.collect::<Result<Vec<_>, _>>()
}
}
/// Get requested blocks transaction with signer
fn get_take_block_transaction_range<const TAKE: bool>(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(BlockNumber, Vec<TransactionSignedEcRecovered>)>, TransactionError> {
// Just read block tx id from table. as it is needed to get execution results.
let block_bodies = self.get_or_take::<tables::BlockBodies, false>(range)?;
if block_bodies.is_empty() {
return Ok(Vec::new())
}
// iterate over and get all transaction and signers
let first_transaction =
block_bodies.first().expect("If we have headers").1.first_tx_index();
let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_index();
let transactions =
self.get_or_take::<tables::Transactions, TAKE>(first_transaction..=last_transaction)?;
let senders =
self.get_or_take::<tables::TxSenders, TAKE>(first_transaction..=last_transaction)?;
if TAKE {
// rm TxHashNumber
let mut tx_hash_cursor = self.cursor_write::<tables::TxHashNumber>()?;
for (_, tx) in transactions.iter() {
if tx_hash_cursor.seek_exact(tx.hash())?.is_some() {
tx_hash_cursor.delete_current()?;
}
}
// rm TxTransitionId
self.get_or_take::<tables::TxTransitionIndex, TAKE>(
first_transaction..=last_transaction,
)?;
}
// Merge transaction into blocks
let mut block_tx = Vec::new();
let mut senders = senders.into_iter();
let mut transactions = transactions.into_iter();
for (block_number, block_body) in block_bodies {
let mut one_block_tx = Vec::new();
for _ in block_body.tx_id_range() {
let tx = transactions.next();
let sender = senders.next();
let recovered = match (tx, sender) {
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
if tx_id != sender_tx_id {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
} else {
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
}
}
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
}
(None, None) => Err(ProviderError::BlockBodyTransactionCount),
}?;
one_block_tx.push(recovered)
}
block_tx.push((block_number, one_block_tx));
}
Ok(block_tx)
}
/// Return range of blocks and its execution result
fn get_take_block_range<const TAKE: bool>(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<SealedBlockWithSenders>, TransactionError> {
// For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers
let block_headers = self.get_or_take::<tables::Headers, TAKE>(range.clone())?;
if block_headers.is_empty() {
return Ok(Vec::new())
}
let block_header_hashes =
self.get_or_take::<tables::CanonicalHeaders, TAKE>(range.clone())?;
let block_ommers = self.get_or_take::<tables::BlockOmmers, TAKE>(range.clone())?;
let block_withdrawals =
self.get_or_take::<tables::BlockWithdrawals, TAKE>(range.clone())?;
let block_tx = self.get_take_block_transaction_range::<TAKE>(range.clone())?;
if TAKE {
// rm HeaderTD
self.get_or_take::<tables::HeaderTD, TAKE>(range)?;
// rm HeaderNumbers
let mut header_number_cursor = self.cursor_write::<tables::HeaderNumbers>()?;
for (_, hash) in block_header_hashes.iter() {
if header_number_cursor.seek_exact(*hash)?.is_some() {
header_number_cursor.delete_current()?;
}
}
}
// merge all into block
let block_header_iter = block_headers.into_iter();
let block_header_hashes_iter = block_header_hashes.into_iter();
let block_tx_iter = block_tx.into_iter();
// can be not found in tables
let mut block_ommers_iter = block_ommers.into_iter();
let mut block_withdrawals_iter = block_withdrawals.into_iter();
let mut block_ommers = block_ommers_iter.next();
let mut block_withdrawals = block_withdrawals_iter.next();
let mut blocks = Vec::new();
for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!(
block_header_iter.into_iter(),
block_header_hashes_iter.into_iter(),
block_tx_iter.into_iter()
) {
let header = header.seal(header_hash);
let (body, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip();
// Ommers can be missing
let mut ommers = Vec::new();
if let Some((block_number, _)) = block_ommers.as_ref() {
if *block_number == main_block_number {
// Seal ommers as they dont have hash.
ommers = block_ommers
.take()
.unwrap()
.1
.ommers
.into_iter()
.map(|h| h.seal_slow())
.collect();
block_ommers = block_ommers_iter.next();
}
};
// withdrawal can be missing
let shanghai_is_active =
chain_spec.fork(Hardfork::Paris).active_at_block(main_block_number);
let mut withdrawals = Some(Vec::new());
if shanghai_is_active {
if let Some((block_number, _)) = block_withdrawals.as_ref() {
if *block_number == main_block_number {
withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals);
block_withdrawals = block_withdrawals_iter.next();
}
}
} else {
withdrawals = None
}
blocks.push(SealedBlockWithSenders {
block: SealedBlock { header, body, ommers, withdrawals },
senders,
})
}
Ok(blocks)
}
/// Transverse over changesets and plain state and recreated the execution results.
fn get_take_block_execution_result_range<const TAKE: bool>(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
let block_transition =
self.get_or_take::<tables::BlockTransitionIndex, TAKE>(range.clone())?;
if block_transition.is_empty() {
return Ok(Vec::new())
}
// get block transitions
let first_block_number =
block_transition.first().expect("Check for empty is already done").0;
// get block transition of parent block.
let from = self.get_block_transition(first_block_number.saturating_sub(1))?;
let to = block_transition.last().expect("Check for empty is already done").1;
// NOTE: Just get block bodies dont remove them
// it is connection point for bodies getter and execution result getter.
let block_bodies = self.get_or_take::<tables::BlockBodies, false>(range)?;
// get saved previous values
let from_storage: TransitionIdAddress = (from, Address::zero()).into();
let to_storage: TransitionIdAddress = (to, Address::zero()).into();
let storage_changeset =
self.get_or_take::<tables::StorageChangeSet, TAKE>(from_storage..to_storage)?;
let account_changeset = self.get_or_take::<tables::AccountChangeSet, TAKE>(from..to)?;
// iterate previous value and get plain state value to create changeset
// Double option around Account represent if Account state is know (first option) and
// account is removed (Second Option)
type LocalPlainState = BTreeMap<Address, (Option<Option<Account>>, BTreeMap<H256, U256>)>;
type Changesets = BTreeMap<
TransitionId,
BTreeMap<Address, (AccountInfoChangeSet, BTreeMap<H256, (U256, U256)>)>,
>;
let mut local_plain_state: LocalPlainState = BTreeMap::new();
// iterate in reverse and get plain state.
// Bundle execution changeset to its particular transaction and block
let mut all_changesets: Changesets = BTreeMap::new();
let mut plain_accounts_cursor = self.cursor_write::<tables::PlainAccountState>()?;
let mut plain_storage_cursor = self.cursor_dup_write::<tables::PlainStorageState>()?;
// add account changeset changes
for (transition_id, account_before) in account_changeset.into_iter().rev() {
let new_info = match local_plain_state.entry(account_before.address) {
Entry::Vacant(entry) => {
let new_account =
plain_accounts_cursor.seek(account_before.address)?.map(|(_s, i)| i);
entry.insert((Some(account_before.info), BTreeMap::new()));
new_account
}
Entry::Occupied(mut entry) => {
let new_account =
std::mem::replace(&mut entry.get_mut().0, Some(account_before.info));
new_account.expect("As we are stacking account first, account would always be Some(Some) or Some(None)")
}
};
let account_info_changeset = AccountInfoChangeSet::new(account_before.info, new_info);
// insert changeset to transition id. Multiple account for same transition Id are not
// possible.
all_changesets
.entry(transition_id)
.or_default()
.entry(account_before.address)
.or_default()
.0 = account_info_changeset
}
// add storage changeset changes
for (transition_and_address, storage_entry) in storage_changeset.into_iter().rev() {
let TransitionIdAddress((transition_id, address)) = transition_and_address;
let new_storage =
match local_plain_state.entry(address).or_default().1.entry(storage_entry.key) {
Entry::Vacant(entry) => {
let new_storage = plain_storage_cursor
.seek_by_key_subkey(address, storage_entry.key)?
.filter(|storage| storage.key == storage_entry.key)
.unwrap_or_default();
entry.insert(storage_entry.value);
new_storage.value
}
Entry::Occupied(mut entry) => {
std::mem::replace(entry.get_mut(), storage_entry.value)
}
};
all_changesets
.entry(transition_id)
.or_default()
.entry(address)
.or_default()
.1
.insert(storage_entry.key, (storage_entry.value, new_storage));
}
if TAKE {
// iterate over local plain state remove all account and all storages.
for (address, (account, storage)) in local_plain_state.into_iter() {
// revert account
if let Some(account) = account {
plain_accounts_cursor.seek_exact(address)?;
if let Some(account) = account {
plain_accounts_cursor.upsert(address, account)?;
} else {
plain_accounts_cursor.delete_current()?;
}
}
// revert storages
for (storage_key, storage_value) in storage.into_iter() {
let storage_entry = StorageEntry { key: storage_key, value: storage_value };
// delete previous value
if plain_storage_cursor
.seek_by_key_subkey(address, storage_key)?
.filter(|s| s.key == storage_key)
.is_some()
{
plain_storage_cursor.delete_current()?
}
// insert value if needed
if storage_value != U256::ZERO {
plain_storage_cursor.insert(address, storage_entry)?;
}
}
}
}
// NOTE: Some storage changesets can be empty,
// all account changeset have at least beneficiary fee transfer.
// iterate over block body and create ExecutionResult
let mut block_exec_results = Vec::new();
let mut changeset_iter = all_changesets.into_iter();
let mut block_transition_iter = block_transition.into_iter();
let mut next_transition_id = from;
let mut next_changeset = changeset_iter.next().unwrap_or_default();
// loop break if we are at the end of the blocks.
for (_, block_body) in block_bodies.into_iter() {
let mut block_exec_res = ExecutionResult::default();
for _ in 0..block_body.tx_count {
// only if next_changeset
let changeset = if next_transition_id == next_changeset.0 {
let changeset = next_changeset
.1
.into_iter()
.map(|(address, (account, storage))| {
(
address,
AccountChangeSet {
account,
storage: storage
.into_iter()
.map(|(key, val)| (U256::from_be_bytes(key.0), val))
.collect(),
wipe_storage: false, /* it is always false as all storage
* changesets for selfdestruct are
* already accounted. */
},
)
})
.collect();
next_changeset = changeset_iter.next().unwrap_or_default();
changeset
} else {
BTreeMap::new()
};
next_transition_id += 1;
block_exec_res.tx_changesets.push(TransactionChangeSet {
receipt: Receipt::default(), /* TODO(receipt) when they are saved, load them
* from db */
changeset,
new_bytecodes: Default::default(), /* TODO(bytecode), bytecode is not cleared
* so it is same sa previous. */
});
}
let Some((_,block_transition)) = block_transition_iter.next() else { break};
// if block transition points to 1+next transition id it means that there is block
// changeset.
if block_transition == next_transition_id + 1 {
// assert last_transition_id == block_transition
if next_transition_id == next_changeset.0 {
// take block changeset
block_exec_res.block_changesets = next_changeset
.1
.into_iter()
.map(|(address, (account, _))| (address, account))
.collect();
next_changeset = changeset_iter.next().unwrap_or_default();
}
next_transition_id += 1;
}
block_exec_results.push(block_exec_res)
}
Ok(block_exec_results)
}
/// Return range of blocks and its execution result
pub fn get_take_block_and_execution_range<const TAKE: bool>(
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
if TAKE {
let (from_transition, parent_number, parent_state_root) = match range.start_bound() {
Bound::Included(n) => {
let parent_number = n.saturating_sub(1);
let transition = self.get_block_transition(parent_number)?;
let parent = self.get_header(parent_number)?;
(transition, parent_number, parent.state_root)
}
Bound::Excluded(n) => {
let transition = self.get_block_transition(*n)?;
let parent = self.get_header(*n)?;
(transition, *n, parent.state_root)
}
Bound::Unbounded => (0, 0, EMPTY_ROOT),
};
let to_transition = match range.end_bound() {
Bound::Included(n) => self.get_block_transition(*n)?,
Bound::Excluded(n) => self.get_block_transition(n.saturating_sub(1))?,
Bound::Unbounded => TransitionId::MAX,
};
let transition_range = from_transition..to_transition;
let zero = Address::zero();
let transition_storage_range =
(from_transition, zero).into()..(to_transition, zero).into();
self.unwind_account_hashing(transition_range.clone())?;
self.unwind_account_history_indices(transition_range.clone())?;
self.unwind_storage_hashing(transition_storage_range.clone())?;
self.unwind_storage_history_indices(transition_storage_range)?;
// merkle tree
let new_state_root;
{
let (tip_number, _) =
self.cursor_read::<tables::CanonicalHeaders>()?.last()?.unwrap_or_default();
let current_root = self.get_header(tip_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref());
new_state_root =
loader.update_root(current_root, transition_range).and_then(|e| e.root())?;
}
// state root should be always correct as we are reverting state.
// but for sake of double verification we will check it again.
if new_state_root != parent_state_root {
let parent_hash = self.get_block_hash(parent_number)?;
return Err(TransactionError::StateTrieRootMismatch {
got: new_state_root,
expected: parent_state_root,
block_number: parent_number,
block_hash: parent_hash,
})
}
}
// get blocks
let blocks = self.get_take_block_range::<TAKE>(chain_spec, range.clone())?;
// get execution res
let execution_res = self.get_take_block_execution_result_range::<TAKE>(range.clone())?;
// combine them
let blocks_with_exec_result: Vec<_> =
blocks.into_iter().zip(execution_res.into_iter()).collect();
// remove block bodies it is needed for both get block range and get block execution results
// that is why it is deleted afterwards.
if TAKE {
// rm block bodies
self.get_or_take::<tables::BlockBodies, TAKE>(range)?;
}
// return them
Ok(blocks_with_exec_result)
}
/// Update all pipeline sync stage progress.
pub fn update_pipeline_stages(
&self,
block_number: BlockNumber,
) -> Result<(), TransactionError> {
// iterate over
let mut cursor = self.cursor_write::<tables::SyncStage>()?;
while let Some((stage_name, _)) = cursor.next()? {
cursor.upsert(stage_name, block_number)?
}
Ok(())
}
@ -741,6 +1466,90 @@ where
}
Ok(())
}
/// Return full table as Vec
pub fn table<T: Table>(&self) -> Result<Vec<KeyValue<T>>, DbError>
where
T::Key: Default + Ord,
{
self.cursor_read::<T>()?.walk(Some(T::Key::default()))?.collect::<Result<Vec<_>, DbError>>()
}
}
/// Unwind all history shards. For boundary shard, remove it from database and
/// return last part of shard with still valid items. If all full shard were removed, return list
/// would be empty.
fn unwind_account_history_shards<DB: Database>(
cursor: &mut <<DB as DatabaseGAT<'_>>::TXMut as DbTxMutGAT<'_>>::CursorMut<
tables::AccountHistory,
>,
address: Address,
transition_id: TransitionId,
) -> Result<Vec<usize>, TransactionError> {
let mut item = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?;
while let Some((sharded_key, list)) = item {
// there is no more shard for address
if sharded_key.key != address {
break
}
cursor.delete_current()?;
// check first item and if it is more and eq than `transition_id` delete current
// item.
let first = list.iter(0).next().expect("List can't empty");
if first >= transition_id as usize {
item = cursor.prev()?;
continue
} else if transition_id <= sharded_key.highest_transition_id {
// if first element is in scope whole list would be removed.
// so at least this first element is present.
return Ok(list.iter(0).take_while(|i| *i < transition_id as usize).collect::<Vec<_>>())
} else {
let new_list = list.iter(0).collect::<Vec<_>>();
return Ok(new_list)
}
}
Ok(Vec::new())
}
/// Unwind all history shards. For boundary shard, remove it from database and
/// return last part of shard with still valid items. If all full shard were removed, return list
/// would be empty but this does not mean that there is none shard left but that there is no
/// splitted shards.
fn unwind_storage_history_shards<DB: Database>(
cursor: &mut <<DB as DatabaseGAT<'_>>::TXMut as DbTxMutGAT<'_>>::CursorMut<
tables::StorageHistory,
>,
address: Address,
storage_key: H256,
transition_id: TransitionId,
) -> Result<Vec<usize>, TransactionError> {
let mut item = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?;
while let Some((storage_sharded_key, list)) = item {
// there is no more shard for address
if storage_sharded_key.address != address ||
storage_sharded_key.sharded_key.key != storage_key
{
// there is no more shard for address and storage_key.
break
}
cursor.delete_current()?;
// check first item and if it is more and eq than `transition_id` delete current
// item.
let first = list.iter(0).next().expect("List can't empty");
if first >= transition_id as usize {
item = cursor.prev()?;
continue
} else if transition_id <= storage_sharded_key.sharded_key.highest_transition_id {
// if first element is in scope whole list would be removed.
// so at least this first element is present.
return Ok(list.iter(0).take_while(|i| *i < transition_id as usize).collect::<Vec<_>>())
} else {
return Ok(list.iter(0).collect::<Vec<_>>())
}
}
Ok(Vec::new())
}
/// An error that can occur when using the transaction container
@ -756,7 +1565,7 @@ pub enum TransactionError {
#[error("Merkle trie calculation error: {0}")]
MerkleTrie(#[from] TrieError),
/// Root mismatch
#[error("Merkle trie root mismatch on block: #{block_number:?} {block_hash:?}. got: {got:?} expected:{got:?}")]
#[error("Merkle trie root mismatch on block: #{block_number:?} {block_hash:?}. got: {got:?} expected:{expected:?}")]
StateTrieRootMismatch {
/// Expected root
expected: H256,
@ -768,3 +1577,69 @@ pub enum TransactionError {
block_hash: BlockHash,
},
}
#[cfg(test)]
mod test {
use crate::{insert_canonical_block, test_utils::blocks::*, Transaction};
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_primitives::{proofs::EMPTY_ROOT, ChainSpecBuilder, MAINNET};
use std::ops::DerefMut;
#[test]
fn insert_get_take() {
let db = create_test_rw_db();
// setup
let mut tx = Transaction::new(db.as_ref()).unwrap();
let chain_spec = ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.shanghai_activated()
.build();
let data = BlockChainTestData::default();
let genesis = data.genesis.clone();
let (block1, exec_res1) = data.blocks[0].clone();
let (block2, exec_res2) = data.blocks[1].clone();
insert_canonical_block(tx.deref_mut(), data.genesis.clone(), None, false).unwrap();
tx.put::<tables::AccountsTrie>(EMPTY_ROOT, vec![0x80]).unwrap();
assert_genesis_block(&tx, data.genesis.clone());
tx.insert_block(block1.clone(), &chain_spec, exec_res1.clone()).unwrap();
// get one block
let get = tx.get_block_and_execution_range(&chain_spec, 1..=1).unwrap();
assert_eq!(get, vec![(block1.clone(), exec_res1.clone())]);
// take one block
let take = tx.take_block_and_execution_range(&chain_spec, 1..=1).unwrap();
assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]);
assert_genesis_block(&tx, genesis.clone());
tx.insert_block(block1.clone(), &chain_spec, exec_res1.clone()).unwrap();
tx.insert_block(block2.clone(), &chain_spec, exec_res2.clone()).unwrap();
// get second block
let get = tx.get_block_and_execution_range(&chain_spec, 2..=2).unwrap();
assert_eq!(get, vec![(block2.clone(), exec_res2.clone())]);
// get two blocks
let get = tx.get_block_and_execution_range(&chain_spec, 1..=2).unwrap();
assert_eq!(
get,
vec![(block1.clone(), exec_res1.clone()), (block2.clone(), exec_res2.clone())]
);
// take two blocks
let get = tx.take_block_and_execution_range(&chain_spec, 1..=2).unwrap();
assert_eq!(
get,
vec![(block1.clone(), exec_res1.clone()), (block2.clone(), exec_res2.clone())]
);
// assert genesis state
assert_genesis_block(&tx, genesis);
}
}

View File

@ -32,7 +32,9 @@ pub enum TrieError {
InternalError(#[from] cita_trie::TrieError),
/// The database doesn't contain the root of the trie.
#[error("The root node wasn't found in the DB")]
MissingRoot(H256),
MissingAccountRoot(H256),
#[error("The storage root node wasn't found in the DB")]
MissingStorageRoot(H256),
/// Error returned by the database.
#[error("{0:?}")]
DatabaseError(#[from] reth_db::Error),
@ -112,7 +114,7 @@ where
if root == EMPTY_ROOT {
return Self::new(tx)
}
tx.get::<tables::AccountsTrie>(root)?.ok_or(TrieError::MissingRoot(root))?;
tx.get::<tables::AccountsTrie>(root)?.ok_or(TrieError::MissingAccountRoot(root))?;
Ok(Self { tx })
}
}
@ -204,7 +206,7 @@ where
tx.cursor_dup_read::<tables::StoragesTrie>()?
.seek_by_key_subkey(key, root)?
.filter(|entry| entry.hash == root)
.ok_or(TrieError::MissingRoot(root))?;
.ok_or(TrieError::MissingStorageRoot(root))?;
Ok(Self { tx, key })
}
}
@ -254,7 +256,7 @@ where
impl<'tx, 'itx, TX: DbTx<'itx>> HashDatabase<'tx, 'itx, TX> {
/// Instantiates a new Database for the accounts trie, with an existing root
fn from_root(tx: &'tx TX, root: H256) -> Result<Self, TrieError> {
tx.get::<tables::AccountsTrie>(root)?.ok_or(TrieError::MissingRoot(root))?;
tx.get::<tables::AccountsTrie>(root)?.ok_or(TrieError::MissingAccountRoot(root))?;
Ok(Self { tx, _p: Default::default() })
}
}
@ -307,7 +309,7 @@ impl<'tx, 'itx, TX: DbTx<'itx>> DupHashDatabase<'tx, 'itx, TX> {
fn from_root(tx: &'tx TX, key: H256, root: H256) -> Result<Self, TrieError> {
tx.cursor_dup_read::<tables::StoragesTrie>()?
.seek_by_key_subkey(key, root)?
.ok_or(TrieError::MissingRoot(root))?;
.ok_or(TrieError::MissingAccountRoot(root))?;
Ok(Self { tx, key, _p: Default::default() })
}
}

View File

@ -4,7 +4,7 @@ use reth_db::{
transaction::{DbTx, DbTxMut},
};
use reth_interfaces::{provider::ProviderError, Result};
use reth_primitives::{SealedBlock, TransitionId, U256};
use reth_primitives::{Address, SealedBlock, TransitionId};
/// Insert block data into corresponding tables. Used mainly for testing & internal tooling.
///
@ -18,10 +18,12 @@ use reth_primitives::{SealedBlock, TransitionId, U256};
/// Return [TransitionId] `(from,to)`
pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx: &TX,
block: &SealedBlock,
block: SealedBlock,
senders: Option<Vec<Address>>,
has_block_reward: bool,
parent_tx_num_transition_id: Option<(u64, u64)>,
) -> Result<(TransitionId, TransitionId)> {
let block_number = block.number;
tx.put::<tables::CanonicalHeaders>(block.number, block.hash())?;
// Put header with canonical hashes.
tx.put::<tables::Headers>(block.number, block.header.as_ref().clone())?;
@ -29,7 +31,7 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
// total difficulty
let ttd = if block.number == 0 {
U256::ZERO
block.difficulty
} else {
let parent_block_number = block.number - 1;
let parent_ttd = tx.get::<tables::HeaderTD>(parent_block_number)?.unwrap_or_default();
@ -68,11 +70,24 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
StoredBlockBody { start_tx_id: current_tx_id, tx_count: block.body.len() as u64 },
)?;
for transaction in block.body.iter() {
let rec_tx = transaction.clone().into_ecrecovered().unwrap();
let hash = rec_tx.hash();
tx.put::<tables::TxSenders>(current_tx_id, rec_tx.signer())?;
tx.put::<tables::Transactions>(current_tx_id, rec_tx.into())?;
let senders_len = senders.as_ref().map(|s| s.len());
let tx_iter = if Some(block.body.len()) == senders_len {
block.body.into_iter().zip(senders.unwrap().into_iter()).collect::<Vec<(_, _)>>()
} else {
block
.body
.into_iter()
.map(|tx| {
let signer = tx.recover_signer();
(tx, signer.unwrap_or_default())
})
.collect::<Vec<(_, _)>>()
};
for (transaction, sender) in tx_iter {
let hash = transaction.hash();
tx.put::<tables::TxSenders>(current_tx_id, sender)?;
tx.put::<tables::Transactions>(current_tx_id, transaction)?;
tx.put::<tables::TxTransitionIndex>(current_tx_id, transition_id)?;
tx.put::<tables::TxHashNumber>(hash, current_tx_id)?;
transition_id += 1;
@ -80,11 +95,11 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
}
let mut has_withdrawals = false;
if let Some(withdrawals) = block.withdrawals.clone() {
if let Some(withdrawals) = block.withdrawals {
if !withdrawals.is_empty() {
has_withdrawals = true;
tx.put::<tables::BlockWithdrawals>(
block.number,
block_number,
StoredBlockWithdrawals { withdrawals },
)?;
}
@ -93,7 +108,7 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
if has_block_reward || has_withdrawals {
transition_id += 1;
}
tx.put::<tables::BlockTransitionIndex>(block.number, transition_id)?;
tx.put::<tables::BlockTransitionIndex>(block_number, transition_id)?;
let to_transition = transition_id;
Ok((from_transition, to_transition))
@ -103,8 +118,9 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
/// parent block in database.
pub fn insert_canonical_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx: &TX,
block: &SealedBlock,
block: SealedBlock,
senders: Option<Vec<Address>>,
has_block_reward: bool,
) -> Result<(TransitionId, TransitionId)> {
insert_block(tx, block, has_block_reward, None)
insert_block(tx, block, senders, has_block_reward, None)
}