mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Nit: replace block and sender with RecoveredBlock in ExecutedBlock (#13804)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
@ -6,7 +6,7 @@ use crate::{
|
||||
};
|
||||
use alloy_consensus::{transaction::TransactionMeta, BlockHeader};
|
||||
use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash};
|
||||
use alloy_primitives::{map::HashMap, Address, TxHash, B256};
|
||||
use alloy_primitives::{map::HashMap, TxHash, B256};
|
||||
use parking_lot::RwLock;
|
||||
use reth_chainspec::ChainInfo;
|
||||
use reth_execution_types::{Chain, ExecutionOutcome};
|
||||
@ -181,7 +181,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
) -> Self {
|
||||
let in_memory_state = InMemoryState::new(blocks, numbers, pending);
|
||||
let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| {
|
||||
state.block_ref().block().clone_sealed_header()
|
||||
state.block_ref().recovered_block().clone_sealed_header()
|
||||
});
|
||||
let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe);
|
||||
let (canon_state_notification_sender, _) =
|
||||
@ -228,7 +228,8 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
|
||||
/// Returns the header corresponding to the given hash.
|
||||
pub fn header_by_hash(&self, hash: B256) -> Option<SealedHeader<N::BlockHeader>> {
|
||||
self.state_by_hash(hash).map(|block| block.block_ref().block.clone_sealed_header())
|
||||
self.state_by_hash(hash)
|
||||
.map(|block| block.block_ref().recovered_block().clone_sealed_header())
|
||||
}
|
||||
|
||||
/// Clears all entries in the in memory state.
|
||||
@ -241,7 +242,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
/// Note: This assumes that the parent block of the pending block is canonical.
|
||||
pub fn set_pending_block(&self, pending: ExecutedBlock<N>) {
|
||||
// fetch the state of the pending block's parent block
|
||||
let parent = self.state_by_hash(pending.block().parent_hash());
|
||||
let parent = self.state_by_hash(pending.recovered_block().parent_hash());
|
||||
let pending = BlockState::with_parent(pending, parent);
|
||||
self.inner.in_memory_state.pending.send_modify(|p| {
|
||||
p.replace(pending);
|
||||
@ -264,15 +265,15 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
|
||||
// we first remove the blocks from the reorged chain
|
||||
for block in reorged {
|
||||
let hash = block.block().hash();
|
||||
let number = block.block().number();
|
||||
let hash = block.recovered_block().hash();
|
||||
let number = block.recovered_block().number();
|
||||
blocks.remove(&hash);
|
||||
numbers.remove(&number);
|
||||
}
|
||||
|
||||
// insert the new blocks
|
||||
for block in new_blocks {
|
||||
let parent = blocks.get(&block.block().parent_hash()).cloned();
|
||||
let parent = blocks.get(&block.recovered_block().parent_hash()).cloned();
|
||||
let block_state = BlockState::with_parent(block, parent);
|
||||
let hash = block_state.hash();
|
||||
let number = block_state.number();
|
||||
@ -332,16 +333,16 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
// height)
|
||||
let mut old_blocks = blocks
|
||||
.drain()
|
||||
.filter(|(_, b)| b.block_ref().block().number() > persisted_height)
|
||||
.filter(|(_, b)| b.block_ref().recovered_block().number() > persisted_height)
|
||||
.map(|(_, b)| b.block.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// sort the blocks by number so we can insert them back in natural order (low -> high)
|
||||
old_blocks.sort_unstable_by_key(|block| block.block().number());
|
||||
old_blocks.sort_unstable_by_key(|block| block.recovered_block().number());
|
||||
|
||||
// re-insert the blocks in natural order and connect them to their parent blocks
|
||||
for block in old_blocks {
|
||||
let parent = blocks.get(&block.block().parent_hash()).cloned();
|
||||
let parent = blocks.get(&block.recovered_block().parent_hash()).cloned();
|
||||
let block_state = BlockState::with_parent(block, parent);
|
||||
let hash = block_state.hash();
|
||||
let number = block_state.number();
|
||||
@ -354,7 +355,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
// also shift the pending state if it exists
|
||||
self.inner.in_memory_state.pending.send_modify(|p| {
|
||||
if let Some(p) = p.as_mut() {
|
||||
p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned();
|
||||
p.parent = blocks.get(&p.block_ref().recovered_block().parent_hash()).cloned();
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -461,7 +462,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
|
||||
/// Returns the `SealedHeader` corresponding to the pending state.
|
||||
pub fn pending_sealed_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
|
||||
self.pending_state().map(|h| h.block_ref().block().clone_sealed_header())
|
||||
self.pending_state().map(|h| h.block_ref().recovered_block().clone_sealed_header())
|
||||
}
|
||||
|
||||
/// Returns the `Header` corresponding to the pending state.
|
||||
@ -471,7 +472,8 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
|
||||
/// Returns the `SealedBlock` corresponding to the pending state.
|
||||
pub fn pending_block(&self) -> Option<SealedBlock<N::Block>> {
|
||||
self.pending_state().map(|block_state| block_state.block_ref().block().clone())
|
||||
self.pending_state()
|
||||
.map(|block_state| block_state.block_ref().recovered_block().sealed_block().clone())
|
||||
}
|
||||
|
||||
/// Returns the `RecoveredBlock` corresponding to the pending state.
|
||||
@ -479,15 +481,17 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
where
|
||||
N::SignedTx: SignedTransaction,
|
||||
{
|
||||
self.pending_state()
|
||||
.and_then(|block_state| block_state.block_ref().block().clone().try_recover().ok())
|
||||
self.pending_state().map(|block_state| block_state.block_ref().recovered_block().clone())
|
||||
}
|
||||
|
||||
/// Returns a tuple with the `SealedBlock` corresponding to the pending
|
||||
/// state and a vector of its `Receipt`s.
|
||||
pub fn pending_block_and_receipts(&self) -> Option<PendingBlockAndReceipts<N>> {
|
||||
self.pending_state().map(|block_state| {
|
||||
(block_state.block_ref().block().clone(), block_state.executed_block_receipts())
|
||||
(
|
||||
block_state.block_ref().recovered_block().sealed_block().clone(),
|
||||
block_state.executed_block_receipts(),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
@ -547,7 +551,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
for block_state in self.canonical_chain() {
|
||||
if let Some(tx) = block_state
|
||||
.block_ref()
|
||||
.block()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.iter()
|
||||
@ -571,7 +575,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
for block_state in self.canonical_chain() {
|
||||
if let Some((index, tx)) = block_state
|
||||
.block_ref()
|
||||
.block()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.iter()
|
||||
@ -582,10 +586,10 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
tx_hash,
|
||||
index: index as u64,
|
||||
block_hash: block_state.hash(),
|
||||
block_number: block_state.block_ref().block.number(),
|
||||
base_fee: block_state.block_ref().block.base_fee_per_gas(),
|
||||
timestamp: block_state.block_ref().block.timestamp(),
|
||||
excess_blob_gas: block_state.block_ref().block.excess_blob_gas(),
|
||||
block_number: block_state.block_ref().recovered_block().number(),
|
||||
base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(),
|
||||
timestamp: block_state.block_ref().recovered_block().timestamp(),
|
||||
excess_blob_gas: block_state.block_ref().recovered_block().excess_blob_gas(),
|
||||
};
|
||||
return Some((tx.clone(), meta))
|
||||
}
|
||||
@ -621,7 +625,7 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
if let Some(parent) = &self.parent {
|
||||
parent.anchor()
|
||||
} else {
|
||||
self.block.block().parent_num_hash()
|
||||
self.block.recovered_block().parent_num_hash()
|
||||
}
|
||||
}
|
||||
|
||||
@ -635,27 +639,20 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
&self.block
|
||||
}
|
||||
|
||||
/// Returns a clone of the block with recovered senders for the state.
|
||||
pub fn clone_recovered_block(&self) -> RecoveredBlock<N::Block> {
|
||||
let block = self.block.block().clone();
|
||||
let senders = self.block.senders().clone();
|
||||
RecoveredBlock::new_sealed(block, senders)
|
||||
}
|
||||
|
||||
/// Returns the hash of executed block that determines the state.
|
||||
pub fn hash(&self) -> B256 {
|
||||
self.block.block().hash()
|
||||
self.block.recovered_block().hash()
|
||||
}
|
||||
|
||||
/// Returns the block number of executed block that determines the state.
|
||||
pub fn number(&self) -> u64 {
|
||||
self.block.block().number()
|
||||
self.block.recovered_block().number()
|
||||
}
|
||||
|
||||
/// Returns the state root after applying the executed block that determines
|
||||
/// the state.
|
||||
pub fn state_root(&self) -> B256 {
|
||||
self.block.block().state_root()
|
||||
self.block.recovered_block().state_root()
|
||||
}
|
||||
|
||||
/// Returns the `Receipts` of executed block that determines the state.
|
||||
@ -748,7 +745,7 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
self.chain().find_map(|block_state| {
|
||||
block_state
|
||||
.block_ref()
|
||||
.block()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.iter()
|
||||
@ -768,7 +765,7 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
self.chain().find_map(|block_state| {
|
||||
block_state
|
||||
.block_ref()
|
||||
.block()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.iter()
|
||||
@ -779,10 +776,13 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
tx_hash,
|
||||
index: index as u64,
|
||||
block_hash: block_state.hash(),
|
||||
block_number: block_state.block_ref().block.number(),
|
||||
base_fee: block_state.block_ref().block.base_fee_per_gas(),
|
||||
timestamp: block_state.block_ref().block.timestamp(),
|
||||
excess_blob_gas: block_state.block_ref().block.excess_blob_gas(),
|
||||
block_number: block_state.block_ref().recovered_block().number(),
|
||||
base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(),
|
||||
timestamp: block_state.block_ref().recovered_block().timestamp(),
|
||||
excess_blob_gas: block_state
|
||||
.block_ref()
|
||||
.recovered_block()
|
||||
.excess_blob_gas(),
|
||||
};
|
||||
(tx.clone(), meta)
|
||||
})
|
||||
@ -793,10 +793,8 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
/// Represents an executed block stored in-memory.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Default)]
|
||||
pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
|
||||
/// Sealed block the rest of fields refer to.
|
||||
pub block: Arc<SealedBlock<N::Block>>,
|
||||
/// Block's senders.
|
||||
pub senders: Arc<Vec<Address>>,
|
||||
/// Recovered Block
|
||||
pub recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
/// Block's execution outcome.
|
||||
pub execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
/// Block's hashed state.
|
||||
@ -808,30 +806,17 @@ pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
|
||||
impl<N: NodePrimitives> ExecutedBlock<N> {
|
||||
/// [`ExecutedBlock`] constructor.
|
||||
pub const fn new(
|
||||
block: Arc<SealedBlock<N::Block>>,
|
||||
senders: Arc<Vec<Address>>,
|
||||
recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
hashed_state: Arc<HashedPostState>,
|
||||
trie: Arc<TrieUpdates>,
|
||||
) -> Self {
|
||||
Self { block, senders, execution_output, hashed_state, trie }
|
||||
Self { recovered_block, execution_output, hashed_state, trie }
|
||||
}
|
||||
|
||||
/// Returns a reference to the executed block.
|
||||
pub fn block(&self) -> &SealedBlock<N::Block> {
|
||||
&self.block
|
||||
}
|
||||
|
||||
/// Returns a reference to the block's senders
|
||||
pub fn senders(&self) -> &Vec<Address> {
|
||||
&self.senders
|
||||
}
|
||||
|
||||
/// Returns a [`RecoveredBlock`]
|
||||
///
|
||||
/// Note: this clones the block and senders.
|
||||
pub fn clone_recovered_block(&self) -> RecoveredBlock<N::Block> {
|
||||
RecoveredBlock::new_sealed((*self.block).clone(), (*self.senders).clone())
|
||||
/// Returns a reference to [`RecoveredBlock`]
|
||||
pub fn recovered_block(&self) -> &RecoveredBlock<N::Block> {
|
||||
&self.recovered_block
|
||||
}
|
||||
|
||||
/// Returns a reference to the block's execution outcome
|
||||
@ -890,7 +875,7 @@ impl<N: NodePrimitives<SignedTx: SignedTransaction>> NewCanonicalChain<N> {
|
||||
Self::Commit { new } => {
|
||||
let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| {
|
||||
chain.append_block(
|
||||
exec.clone_recovered_block(),
|
||||
exec.recovered_block().clone(),
|
||||
exec.execution_outcome().clone(),
|
||||
);
|
||||
chain
|
||||
@ -900,14 +885,14 @@ impl<N: NodePrimitives<SignedTx: SignedTransaction>> NewCanonicalChain<N> {
|
||||
Self::Reorg { new, old } => {
|
||||
let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| {
|
||||
chain.append_block(
|
||||
exec.clone_recovered_block(),
|
||||
exec.recovered_block().clone(),
|
||||
exec.execution_outcome().clone(),
|
||||
);
|
||||
chain
|
||||
}));
|
||||
let old = Arc::new(old.iter().fold(Chain::default(), |mut chain, exec| {
|
||||
chain.append_block(
|
||||
exec.clone_recovered_block(),
|
||||
exec.recovered_block().clone(),
|
||||
exec.execution_outcome().clone(),
|
||||
);
|
||||
chain
|
||||
@ -924,7 +909,7 @@ impl<N: NodePrimitives<SignedTx: SignedTransaction>> NewCanonicalChain<N> {
|
||||
pub fn tip(&self) -> &SealedBlock<N::Block> {
|
||||
match self {
|
||||
Self::Commit { new } | Self::Reorg { new, .. } => {
|
||||
new.last().expect("non empty blocks").block()
|
||||
new.last().expect("non empty blocks").recovered_block()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -935,7 +920,9 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::test_utils::TestBlockBuilder;
|
||||
use alloy_eips::eip7685::Requests;
|
||||
use alloy_primitives::{map::B256HashMap, BlockNumber, Bytes, StorageKey, StorageValue};
|
||||
use alloy_primitives::{
|
||||
map::B256HashMap, Address, BlockNumber, Bytes, StorageKey, StorageValue,
|
||||
};
|
||||
use rand::Rng;
|
||||
use reth_errors::ProviderResult;
|
||||
use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt};
|
||||
@ -1168,8 +1155,8 @@ mod tests {
|
||||
let result = in_memory_state.pending_state();
|
||||
assert!(result.is_some());
|
||||
let actual_pending_state = result.unwrap();
|
||||
assert_eq!(actual_pending_state.block.block().hash(), pending_hash);
|
||||
assert_eq!(actual_pending_state.block.block().number, pending_number);
|
||||
assert_eq!(actual_pending_state.block.recovered_block().hash(), pending_hash);
|
||||
assert_eq!(actual_pending_state.block.recovered_block().number, pending_number);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1210,7 +1197,7 @@ mod tests {
|
||||
|
||||
let state = BlockState::new(block.clone());
|
||||
|
||||
assert_eq!(state.hash(), block.block.hash());
|
||||
assert_eq!(state.hash(), block.recovered_block().hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1232,7 +1219,7 @@ mod tests {
|
||||
|
||||
let state = BlockState::new(block.clone());
|
||||
|
||||
assert_eq!(state.state_root(), block.block().state_root);
|
||||
assert_eq!(state.state_root(), block.recovered_block().state_root);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1255,18 +1242,24 @@ mod tests {
|
||||
let block2 = test_block_builder.get_executed_block_with_number(0, B256::random());
|
||||
let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] };
|
||||
state.update_chain(chain);
|
||||
assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block1.block().hash());
|
||||
assert_eq!(
|
||||
state.state_by_number(0).unwrap().block_ref().block().hash(),
|
||||
block1.block().hash()
|
||||
state.head_state().unwrap().block_ref().recovered_block().hash(),
|
||||
block1.recovered_block().hash()
|
||||
);
|
||||
assert_eq!(
|
||||
state.state_by_number(0).unwrap().block_ref().recovered_block().hash(),
|
||||
block1.recovered_block().hash()
|
||||
);
|
||||
|
||||
let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] };
|
||||
state.update_chain(chain);
|
||||
assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block2.block().hash());
|
||||
assert_eq!(
|
||||
state.state_by_number(0).unwrap().block_ref().block().hash(),
|
||||
block2.block().hash()
|
||||
state.head_state().unwrap().block_ref().recovered_block().hash(),
|
||||
block2.recovered_block().hash()
|
||||
);
|
||||
assert_eq!(
|
||||
state.state_by_number(0).unwrap().block_ref().recovered_block().hash(),
|
||||
block2.recovered_block().hash()
|
||||
);
|
||||
|
||||
assert_eq!(state.inner.in_memory_state.block_count(), 1);
|
||||
@ -1281,7 +1274,8 @@ mod tests {
|
||||
let block1 = test_block_builder.get_executed_block_with_number(0, B256::random());
|
||||
|
||||
// Second block with parent hash of the first block
|
||||
let block2 = test_block_builder.get_executed_block_with_number(1, block1.block().hash());
|
||||
let block2 =
|
||||
test_block_builder.get_executed_block_with_number(1, block1.recovered_block().hash());
|
||||
|
||||
// Commit the two blocks
|
||||
let chain = NewCanonicalChain::Commit { new: vec![block1.clone(), block2.clone()] };
|
||||
@ -1300,69 +1294,75 @@ mod tests {
|
||||
);
|
||||
|
||||
// Check the pending block
|
||||
assert_eq!(state.pending_block().unwrap(), block2.block().clone());
|
||||
assert_eq!(state.pending_block().unwrap(), block2.recovered_block().sealed_block().clone());
|
||||
|
||||
// Check the pending block number and hash
|
||||
assert_eq!(
|
||||
state.pending_block_num_hash().unwrap(),
|
||||
BlockNumHash { number: 1, hash: block2.block().hash() }
|
||||
BlockNumHash { number: 1, hash: block2.recovered_block().hash() }
|
||||
);
|
||||
|
||||
// Check the pending header
|
||||
assert_eq!(state.pending_header().unwrap(), block2.block().header().clone());
|
||||
assert_eq!(state.pending_header().unwrap(), block2.recovered_block().header().clone());
|
||||
|
||||
// Check the pending sealed header
|
||||
assert_eq!(state.pending_sealed_header().unwrap(), block2.block().clone_sealed_header());
|
||||
|
||||
// Check the pending block with senders
|
||||
assert_eq!(
|
||||
state.pending_recovered_block().unwrap(),
|
||||
block2.block().clone().try_recover().unwrap()
|
||||
state.pending_sealed_header().unwrap(),
|
||||
block2.recovered_block().clone_sealed_header()
|
||||
);
|
||||
|
||||
// Check the pending block with senders
|
||||
assert_eq!(state.pending_recovered_block().unwrap(), block2.recovered_block().clone());
|
||||
|
||||
// Check the pending block and receipts
|
||||
assert_eq!(state.pending_block_and_receipts().unwrap(), (block2.block().clone(), vec![]));
|
||||
assert_eq!(
|
||||
state.pending_block_and_receipts().unwrap(),
|
||||
(block2.recovered_block().sealed_block().clone(), vec![])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_canonical_in_memory_state_state_provider() {
|
||||
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
|
||||
let block1 = test_block_builder.get_executed_block_with_number(1, B256::random());
|
||||
let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash());
|
||||
let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash());
|
||||
let block2 =
|
||||
test_block_builder.get_executed_block_with_number(2, block1.recovered_block().hash());
|
||||
let block3 =
|
||||
test_block_builder.get_executed_block_with_number(3, block2.recovered_block().hash());
|
||||
|
||||
let state1 = Arc::new(BlockState::new(block1.clone()));
|
||||
let state2 = Arc::new(BlockState::with_parent(block2.clone(), Some(state1.clone())));
|
||||
let state3 = Arc::new(BlockState::with_parent(block3.clone(), Some(state2.clone())));
|
||||
|
||||
let mut blocks = HashMap::default();
|
||||
blocks.insert(block1.block().hash(), state1);
|
||||
blocks.insert(block2.block().hash(), state2);
|
||||
blocks.insert(block3.block().hash(), state3);
|
||||
blocks.insert(block1.recovered_block().hash(), state1);
|
||||
blocks.insert(block2.recovered_block().hash(), state2);
|
||||
blocks.insert(block3.recovered_block().hash(), state3);
|
||||
|
||||
let mut numbers = BTreeMap::new();
|
||||
numbers.insert(1, block1.block().hash());
|
||||
numbers.insert(2, block2.block().hash());
|
||||
numbers.insert(3, block3.block().hash());
|
||||
numbers.insert(1, block1.recovered_block().hash());
|
||||
numbers.insert(2, block2.recovered_block().hash());
|
||||
numbers.insert(3, block3.recovered_block().hash());
|
||||
|
||||
let canonical_state = CanonicalInMemoryState::new(blocks, numbers, None, None, None);
|
||||
|
||||
let historical: StateProviderBox = Box::new(MockStateProvider);
|
||||
|
||||
let overlay_provider = canonical_state.state_provider(block3.block().hash(), historical);
|
||||
let overlay_provider =
|
||||
canonical_state.state_provider(block3.recovered_block().hash(), historical);
|
||||
|
||||
assert_eq!(overlay_provider.in_memory.len(), 3);
|
||||
assert_eq!(overlay_provider.in_memory[0].block().number, 3);
|
||||
assert_eq!(overlay_provider.in_memory[1].block().number, 2);
|
||||
assert_eq!(overlay_provider.in_memory[2].block().number, 1);
|
||||
assert_eq!(overlay_provider.in_memory[0].recovered_block().number, 3);
|
||||
assert_eq!(overlay_provider.in_memory[1].recovered_block().number, 2);
|
||||
assert_eq!(overlay_provider.in_memory[2].recovered_block().number, 1);
|
||||
|
||||
assert_eq!(
|
||||
overlay_provider.in_memory[0].block().parent_hash,
|
||||
overlay_provider.in_memory[1].block().hash()
|
||||
overlay_provider.in_memory[0].recovered_block().parent_hash,
|
||||
overlay_provider.in_memory[1].recovered_block().hash()
|
||||
);
|
||||
assert_eq!(
|
||||
overlay_provider.in_memory[1].block().parent_hash,
|
||||
overlay_provider.in_memory[2].block().hash()
|
||||
overlay_provider.in_memory[1].recovered_block().parent_hash,
|
||||
overlay_provider.in_memory[2].recovered_block().hash()
|
||||
);
|
||||
|
||||
let unknown_hash = B256::random();
|
||||
@ -1381,7 +1381,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_canonical_in_memory_state_canonical_chain_single_block() {
|
||||
let block = TestBlockBuilder::eth().get_executed_block_with_number(1, B256::random());
|
||||
let hash = block.block().hash();
|
||||
let hash = block.recovered_block().hash();
|
||||
let mut blocks = HashMap::default();
|
||||
blocks.insert(hash, Arc::new(BlockState::new(block)));
|
||||
let mut numbers = BTreeMap::new();
|
||||
@ -1403,7 +1403,7 @@ mod tests {
|
||||
|
||||
for i in 1..=3 {
|
||||
let block = block_builder.get_executed_block_with_number(i, parent_hash);
|
||||
let hash = block.block().hash();
|
||||
let hash = block.recovered_block().hash();
|
||||
state.update_blocks(Some(block), None);
|
||||
parent_hash = hash;
|
||||
}
|
||||
@ -1425,7 +1425,7 @@ mod tests {
|
||||
|
||||
for i in 1..=2 {
|
||||
let block = block_builder.get_executed_block_with_number(i, parent_hash);
|
||||
let hash = block.block().hash();
|
||||
let hash = block.recovered_block().hash();
|
||||
state.update_blocks(Some(block), None);
|
||||
parent_hash = hash;
|
||||
}
|
||||
@ -1446,14 +1446,14 @@ mod tests {
|
||||
|
||||
let parents = chain[3].parent_state_chain();
|
||||
assert_eq!(parents.len(), 3);
|
||||
assert_eq!(parents[0].block().block.number, 3);
|
||||
assert_eq!(parents[1].block().block.number, 2);
|
||||
assert_eq!(parents[2].block().block.number, 1);
|
||||
assert_eq!(parents[0].block().recovered_block().number, 3);
|
||||
assert_eq!(parents[1].block().recovered_block().number, 2);
|
||||
assert_eq!(parents[2].block().recovered_block().number, 1);
|
||||
|
||||
let parents = chain[2].parent_state_chain();
|
||||
assert_eq!(parents.len(), 2);
|
||||
assert_eq!(parents[0].block().block.number, 2);
|
||||
assert_eq!(parents[1].block().block.number, 1);
|
||||
assert_eq!(parents[0].block().recovered_block().number, 2);
|
||||
assert_eq!(parents[1].block().recovered_block().number, 1);
|
||||
|
||||
let parents = chain[0].parent_state_chain();
|
||||
assert_eq!(parents.len(), 0);
|
||||
@ -1465,15 +1465,15 @@ mod tests {
|
||||
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
|
||||
let single_block =
|
||||
create_mock_state(&mut test_block_builder, single_block_number, B256::random());
|
||||
let single_block_hash = single_block.block().block.hash();
|
||||
let single_block_hash = single_block.block().recovered_block().hash();
|
||||
|
||||
let parents = single_block.parent_state_chain();
|
||||
assert_eq!(parents.len(), 0);
|
||||
|
||||
let block_state_chain = single_block.chain().collect::<Vec<_>>();
|
||||
assert_eq!(block_state_chain.len(), 1);
|
||||
assert_eq!(block_state_chain[0].block().block.number, single_block_number);
|
||||
assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash);
|
||||
assert_eq!(block_state_chain[0].block().recovered_block().number, single_block_number);
|
||||
assert_eq!(block_state_chain[0].block().recovered_block().hash(), single_block_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1483,18 +1483,18 @@ mod tests {
|
||||
|
||||
let block_state_chain = chain[2].chain().collect::<Vec<_>>();
|
||||
assert_eq!(block_state_chain.len(), 3);
|
||||
assert_eq!(block_state_chain[0].block().block.number, 3);
|
||||
assert_eq!(block_state_chain[1].block().block.number, 2);
|
||||
assert_eq!(block_state_chain[2].block().block.number, 1);
|
||||
assert_eq!(block_state_chain[0].block().recovered_block().number, 3);
|
||||
assert_eq!(block_state_chain[1].block().recovered_block().number, 2);
|
||||
assert_eq!(block_state_chain[2].block().recovered_block().number, 1);
|
||||
|
||||
let block_state_chain = chain[1].chain().collect::<Vec<_>>();
|
||||
assert_eq!(block_state_chain.len(), 2);
|
||||
assert_eq!(block_state_chain[0].block().block.number, 2);
|
||||
assert_eq!(block_state_chain[1].block().block.number, 1);
|
||||
assert_eq!(block_state_chain[0].block().recovered_block().number, 2);
|
||||
assert_eq!(block_state_chain[1].block().recovered_block().number, 1);
|
||||
|
||||
let block_state_chain = chain[0].chain().collect::<Vec<_>>();
|
||||
assert_eq!(block_state_chain.len(), 1);
|
||||
assert_eq!(block_state_chain[0].block().block.number, 1);
|
||||
assert_eq!(block_state_chain[0].block().recovered_block().number, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1502,10 +1502,14 @@ mod tests {
|
||||
// Generate 4 blocks
|
||||
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
|
||||
let block0 = test_block_builder.get_executed_block_with_number(0, B256::random());
|
||||
let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash());
|
||||
let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash());
|
||||
let block2 = test_block_builder.get_executed_block_with_number(2, block1.block.hash());
|
||||
let block2a = test_block_builder.get_executed_block_with_number(2, block1.block.hash());
|
||||
let block1 =
|
||||
test_block_builder.get_executed_block_with_number(1, block0.recovered_block.hash());
|
||||
let block1a =
|
||||
test_block_builder.get_executed_block_with_number(1, block0.recovered_block.hash());
|
||||
let block2 =
|
||||
test_block_builder.get_executed_block_with_number(2, block1.recovered_block.hash());
|
||||
let block2a =
|
||||
test_block_builder.get_executed_block_with_number(2, block1.recovered_block.hash());
|
||||
|
||||
let sample_execution_outcome = ExecutionOutcome {
|
||||
receipts: Receipts::from_iter([vec![], vec![]]),
|
||||
@ -1520,7 +1524,7 @@ mod tests {
|
||||
chain_commit.to_chain_notification(),
|
||||
CanonStateNotification::Commit {
|
||||
new: Arc::new(Chain::new(
|
||||
vec![block0.clone_recovered_block(), block1.clone_recovered_block()],
|
||||
vec![block0.recovered_block().clone(), block1.recovered_block().clone()],
|
||||
sample_execution_outcome.clone(),
|
||||
None
|
||||
))
|
||||
@ -1537,12 +1541,12 @@ mod tests {
|
||||
chain_reorg.to_chain_notification(),
|
||||
CanonStateNotification::Reorg {
|
||||
old: Arc::new(Chain::new(
|
||||
vec![block1.clone_recovered_block(), block2.clone_recovered_block()],
|
||||
vec![block1.recovered_block().clone(), block2.recovered_block().clone()],
|
||||
sample_execution_outcome.clone(),
|
||||
None
|
||||
)),
|
||||
new: Arc::new(Chain::new(
|
||||
vec![block1a.clone_recovered_block(), block2a.clone_recovered_block()],
|
||||
vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()],
|
||||
sample_execution_outcome,
|
||||
None
|
||||
))
|
||||
|
||||
@ -65,8 +65,8 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N> {
|
||||
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>> {
|
||||
for block in &self.in_memory {
|
||||
if block.block.number() == number {
|
||||
return Ok(Some(block.block.hash()));
|
||||
if block.recovered_block().number() == number {
|
||||
return Ok(Some(block.recovered_block().hash()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,9 +82,9 @@ impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N>
|
||||
let mut earliest_block_number = None;
|
||||
let mut in_memory_hashes = Vec::new();
|
||||
for block in &self.in_memory {
|
||||
if range.contains(&block.block.number()) {
|
||||
in_memory_hashes.insert(0, block.block.hash());
|
||||
earliest_block_number = Some(block.block.number());
|
||||
if range.contains(&block.recovered_block().number()) {
|
||||
in_memory_hashes.insert(0, block.recovered_block().hash());
|
||||
earliest_block_number = Some(block.recovered_block().number());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -210,8 +210,7 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
|
||||
let (block, senders) = block_with_senders.split_sealed();
|
||||
ExecutedBlock::new(
|
||||
Arc::new(block),
|
||||
Arc::new(senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(block, senders)),
|
||||
Arc::new(ExecutionOutcome::new(
|
||||
BundleState::default(),
|
||||
receipts,
|
||||
@ -251,7 +250,7 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
range.map(move |number| {
|
||||
let current_parent_hash = parent_hash;
|
||||
let block = self.get_executed_block_with_number(number, current_parent_hash);
|
||||
parent_hash = block.block.hash();
|
||||
parent_hash = block.recovered_block().hash();
|
||||
block
|
||||
})
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ impl<T: EngineTypes, N: NodePrimitives> Display for EngineApiRequest<T, N> {
|
||||
match self {
|
||||
Self::Beacon(msg) => msg.fmt(f),
|
||||
Self::InsertExecutedBlock(block) => {
|
||||
write!(f, "InsertExecutedBlock({:?})", block.block().num_hash())
|
||||
write!(f, "InsertExecutedBlock({:?})", block.recovered_block().num_hash())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,11 +142,11 @@ where
|
||||
&self,
|
||||
blocks: Vec<ExecutedBlock<N::Primitives>>,
|
||||
) -> Result<Option<BlockNumHash>, PersistenceError> {
|
||||
debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks");
|
||||
debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks");
|
||||
let start_time = Instant::now();
|
||||
let last_block_hash_num = blocks.last().map(|block| BlockNumHash {
|
||||
hash: block.block().hash(),
|
||||
number: block.block().header().number(),
|
||||
hash: block.recovered_block().hash(),
|
||||
number: block.recovered_block().header().number(),
|
||||
});
|
||||
|
||||
if last_block_hash_num.is_some() {
|
||||
@ -339,7 +339,7 @@ mod tests {
|
||||
let mut test_block_builder = TestBlockBuilder::eth();
|
||||
let executed =
|
||||
test_block_builder.get_executed_block_with_number(block_number, B256::random());
|
||||
let block_hash = executed.block().hash();
|
||||
let block_hash = executed.recovered_block().hash();
|
||||
|
||||
let blocks = vec![executed];
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@ -363,7 +363,7 @@ mod tests {
|
||||
|
||||
let mut test_block_builder = TestBlockBuilder::eth();
|
||||
let blocks = test_block_builder.get_executed_blocks(0..5).collect::<Vec<_>>();
|
||||
let last_hash = blocks.last().unwrap().block().hash();
|
||||
let last_hash = blocks.last().unwrap().recovered_block().hash();
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
persistence_handle.save_blocks(blocks, tx).unwrap();
|
||||
@ -380,7 +380,7 @@ mod tests {
|
||||
let mut test_block_builder = TestBlockBuilder::eth();
|
||||
for range in ranges {
|
||||
let blocks = test_block_builder.get_executed_blocks(range).collect::<Vec<_>>();
|
||||
let last_hash = blocks.last().unwrap().block().hash();
|
||||
let last_hash = blocks.last().unwrap().recovered_block().hash();
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
persistence_handle.save_blocks(blocks, tx).unwrap();
|
||||
|
||||
@ -147,7 +147,7 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
|
||||
/// Returns the block by hash.
|
||||
fn block_by_hash(&self, hash: B256) -> Option<Arc<SealedBlock<N::Block>>> {
|
||||
self.blocks_by_hash.get(&hash).map(|b| b.block.clone())
|
||||
self.blocks_by_hash.get(&hash).map(|b| Arc::new(b.recovered_block().sealed_block().clone()))
|
||||
}
|
||||
|
||||
/// Returns all available blocks for the given hash that lead back to the canonical chain, from
|
||||
@ -156,10 +156,10 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
/// Returns `None` if the block for the given hash is not found.
|
||||
fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec<ExecutedBlock<N>>)> {
|
||||
let block = self.blocks_by_hash.get(&hash).cloned()?;
|
||||
let mut parent_hash = block.block().parent_hash();
|
||||
let mut parent_hash = block.recovered_block().parent_hash();
|
||||
let mut blocks = vec![block];
|
||||
while let Some(executed) = self.blocks_by_hash.get(&parent_hash) {
|
||||
parent_hash = executed.block.parent_hash();
|
||||
parent_hash = executed.recovered_block().parent_hash();
|
||||
blocks.push(executed.clone());
|
||||
}
|
||||
|
||||
@ -168,9 +168,9 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
|
||||
/// Insert executed block into the state.
|
||||
fn insert_executed(&mut self, executed: ExecutedBlock<N>) {
|
||||
let hash = executed.block.hash();
|
||||
let parent_hash = executed.block.parent_hash();
|
||||
let block_number = executed.block.number();
|
||||
let hash = executed.recovered_block().hash();
|
||||
let parent_hash = executed.recovered_block().parent_hash();
|
||||
let block_number = executed.recovered_block().number();
|
||||
|
||||
if self.blocks_by_hash.contains_key(&hash) {
|
||||
return;
|
||||
@ -202,7 +202,7 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
let executed = self.blocks_by_hash.remove(&hash)?;
|
||||
|
||||
// Remove this block from collection of children of its parent block.
|
||||
let parent_entry = self.parent_to_child.entry(executed.block.parent_hash());
|
||||
let parent_entry = self.parent_to_child.entry(executed.recovered_block().parent_hash());
|
||||
if let hash_map::Entry::Occupied(mut entry) = parent_entry {
|
||||
entry.get_mut().remove(&hash);
|
||||
|
||||
@ -215,10 +215,11 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
let children = self.parent_to_child.remove(&hash).unwrap_or_default();
|
||||
|
||||
// Remove this block from `blocks_by_number`.
|
||||
let block_number_entry = self.blocks_by_number.entry(executed.block.number());
|
||||
let block_number_entry = self.blocks_by_number.entry(executed.recovered_block().number());
|
||||
if let btree_map::Entry::Occupied(mut entry) = block_number_entry {
|
||||
// We have to find the index of the block since it exists in a vec
|
||||
if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) {
|
||||
if let Some(index) = entry.get().iter().position(|b| b.recovered_block().hash() == hash)
|
||||
{
|
||||
entry.get_mut().swap_remove(index);
|
||||
|
||||
// If there are no blocks left then remove the entry for this block
|
||||
@ -239,7 +240,7 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
}
|
||||
|
||||
while let Some(executed) = self.blocks_by_hash.get(¤t_block) {
|
||||
current_block = executed.block.parent_hash();
|
||||
current_block = executed.recovered_block().parent_hash();
|
||||
if current_block == hash {
|
||||
return true
|
||||
}
|
||||
@ -267,14 +268,16 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
// upper bound
|
||||
let mut current_block = self.current_canonical_head.hash;
|
||||
while let Some(executed) = self.blocks_by_hash.get(¤t_block) {
|
||||
current_block = executed.block.parent_hash();
|
||||
if executed.block.number() <= upper_bound {
|
||||
debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head");
|
||||
if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) {
|
||||
debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head");
|
||||
current_block = executed.recovered_block().parent_hash();
|
||||
if executed.recovered_block().number() <= upper_bound {
|
||||
debug!(target: "engine::tree", num_hash=?executed.recovered_block().num_hash(), "Attempting to remove block walking back from the head");
|
||||
if let Some((removed, _)) = self.remove_by_hash(executed.recovered_block().hash()) {
|
||||
debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed block walking back from the head");
|
||||
// finally, move the trie updates
|
||||
self.persisted_trie_updates
|
||||
.insert(removed.block.hash(), (removed.block.number(), removed.trie));
|
||||
self.persisted_trie_updates.insert(
|
||||
removed.recovered_block().hash(),
|
||||
(removed.recovered_block().number(), removed.trie),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -297,11 +300,11 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
let blocks_to_remove = self
|
||||
.blocks_by_number
|
||||
.range((Bound::Unbounded, Bound::Excluded(finalized_num)))
|
||||
.flat_map(|(_, blocks)| blocks.iter().map(|b| b.block.hash()))
|
||||
.flat_map(|(_, blocks)| blocks.iter().map(|b| b.recovered_block().hash()))
|
||||
.collect::<Vec<_>>();
|
||||
for hash in blocks_to_remove {
|
||||
if let Some((removed, _)) = self.remove_by_hash(hash) {
|
||||
debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain block");
|
||||
debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain block");
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,17 +321,19 @@ impl<N: NodePrimitives> TreeState<N> {
|
||||
|
||||
// re-insert the finalized hash if we removed it
|
||||
if let Some(position) =
|
||||
blocks_to_remove.iter().position(|b| b.block.hash() == finalized_hash)
|
||||
blocks_to_remove.iter().position(|b| b.recovered_block().hash() == finalized_hash)
|
||||
{
|
||||
let finalized_block = blocks_to_remove.swap_remove(position);
|
||||
self.blocks_by_number.insert(finalized_num, vec![finalized_block]);
|
||||
}
|
||||
|
||||
let mut blocks_to_remove =
|
||||
blocks_to_remove.into_iter().map(|e| e.block.hash()).collect::<VecDeque<_>>();
|
||||
let mut blocks_to_remove = blocks_to_remove
|
||||
.into_iter()
|
||||
.map(|e| e.recovered_block().hash())
|
||||
.collect::<VecDeque<_>>();
|
||||
while let Some(block) = blocks_to_remove.pop_front() {
|
||||
if let Some((removed, children)) = self.remove_by_hash(block) {
|
||||
debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain child block");
|
||||
debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain child block");
|
||||
blocks_to_remove.extend(children);
|
||||
}
|
||||
}
|
||||
@ -900,11 +905,11 @@ where
|
||||
return Ok(None)
|
||||
};
|
||||
|
||||
let new_head_number = new_head_block.block.number();
|
||||
let new_head_number = new_head_block.recovered_block().number();
|
||||
let mut current_canonical_number = self.state.tree_state.current_canonical_head.number;
|
||||
|
||||
let mut new_chain = vec![new_head_block.clone()];
|
||||
let mut current_hash = new_head_block.block.parent_hash();
|
||||
let mut current_hash = new_head_block.recovered_block().parent_hash();
|
||||
let mut current_number = new_head_number - 1;
|
||||
|
||||
// Walk back the new chain until we reach a block we know about
|
||||
@ -913,7 +918,7 @@ where
|
||||
// that are _above_ the current canonical head.
|
||||
while current_number > current_canonical_number {
|
||||
if let Some(block) = self.executed_block_by_hash(current_hash)? {
|
||||
current_hash = block.block.parent_hash();
|
||||
current_hash = block.recovered_block().parent_hash();
|
||||
current_number -= 1;
|
||||
new_chain.push(block);
|
||||
} else {
|
||||
@ -942,7 +947,7 @@ where
|
||||
while current_canonical_number > current_number {
|
||||
if let Some(block) = self.executed_block_by_hash(old_hash)? {
|
||||
old_chain.push(block.clone());
|
||||
old_hash = block.block.parent_hash();
|
||||
old_hash = block.recovered_block().parent_hash();
|
||||
current_canonical_number -= 1;
|
||||
} else {
|
||||
// This shouldn't happen as we're walking back the canonical chain
|
||||
@ -958,7 +963,7 @@ where
|
||||
// a common ancestor (fork block) is reached.
|
||||
while old_hash != current_hash {
|
||||
if let Some(block) = self.executed_block_by_hash(old_hash)? {
|
||||
old_hash = block.block.parent_hash();
|
||||
old_hash = block.recovered_block().parent_hash();
|
||||
old_chain.push(block);
|
||||
} else {
|
||||
// This shouldn't happen as we're walking back the canonical chain
|
||||
@ -967,7 +972,7 @@ where
|
||||
}
|
||||
|
||||
if let Some(block) = self.executed_block_by_hash(current_hash)? {
|
||||
current_hash = block.block.parent_hash();
|
||||
current_hash = block.recovered_block().parent_hash();
|
||||
new_chain.push(block);
|
||||
} else {
|
||||
// This shouldn't happen as we've already walked this path
|
||||
@ -1203,7 +1208,7 @@ where
|
||||
if blocks_to_persist.is_empty() {
|
||||
debug!(target: "engine::tree", "Returned empty set of blocks to persist");
|
||||
} else {
|
||||
debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.block.num_hash()).collect::<Vec<_>>(), "Persisting blocks");
|
||||
debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::<Vec<_>>(), "Persisting blocks");
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.persistence.save_blocks(blocks_to_persist, tx);
|
||||
self.persistence_state.start(rx);
|
||||
@ -1262,9 +1267,9 @@ where
|
||||
FromEngine::Request(request) => {
|
||||
match request {
|
||||
EngineApiRequest::InsertExecutedBlock(block) => {
|
||||
debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block");
|
||||
debug!(target: "engine::tree", block=?block.recovered_block().num_hash(), "inserting already executed block");
|
||||
let now = Instant::now();
|
||||
let sealed_block = block.block.clone();
|
||||
let sealed_block = Arc::new(block.recovered_block().sealed_block().clone());
|
||||
self.state.tree_state.insert_executed(block);
|
||||
self.metrics.engine.inserted_already_executed_blocks.increment(1);
|
||||
|
||||
@ -1544,15 +1549,15 @@ where
|
||||
|
||||
debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist");
|
||||
while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) {
|
||||
if block.block.number() <= last_persisted_number {
|
||||
if block.recovered_block().number() <= last_persisted_number {
|
||||
break;
|
||||
}
|
||||
|
||||
if block.block.number() <= target_number {
|
||||
if block.recovered_block().number() <= target_number {
|
||||
blocks_to_persist.push(block.clone());
|
||||
}
|
||||
|
||||
current_hash = block.block.parent_hash();
|
||||
current_hash = block.recovered_block().parent_hash();
|
||||
}
|
||||
|
||||
// reverse the order so that the oldest block comes first
|
||||
@ -1610,8 +1615,7 @@ where
|
||||
let hashed_state = self.provider.hashed_post_state(execution_output.state());
|
||||
|
||||
Ok(Some(ExecutedBlock {
|
||||
block: Arc::new(block),
|
||||
senders: Arc::new(senders),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)),
|
||||
trie: updates.clone(),
|
||||
execution_output: Arc::new(execution_output),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
@ -2003,7 +2007,7 @@ where
|
||||
let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None };
|
||||
|
||||
let BlockNumHash { number: new_num, hash: new_hash } =
|
||||
new.first().map(|block| block.block.num_hash())?;
|
||||
new.first().map(|block| block.recovered_block().num_hash())?;
|
||||
|
||||
match new_num.cmp(&self.persistence_state.last_persisted_block.number) {
|
||||
Ordering::Greater => {
|
||||
@ -2045,8 +2049,8 @@ where
|
||||
|
||||
// reinsert any missing reorged blocks
|
||||
if let NewCanonicalChain::Reorg { new, old } = &chain_update {
|
||||
let new_first = new.first().map(|first| first.block.num_hash());
|
||||
let old_first = old.first().map(|first| first.block.num_hash());
|
||||
let new_first = new.first().map(|first| first.recovered_block().num_hash());
|
||||
let old_first = old.first().map(|first| first.recovered_block().num_hash());
|
||||
trace!(target: "engine::tree", ?new_first, ?old_first, "Reorg detected, new and old first blocks");
|
||||
|
||||
self.update_reorg_metrics(old.len());
|
||||
@ -2080,8 +2084,13 @@ where
|
||||
/// This reinserts any blocks in the new chain that do not already exist in the tree
|
||||
fn reinsert_reorged_blocks(&mut self, new_chain: Vec<ExecutedBlock<N>>) {
|
||||
for block in new_chain {
|
||||
if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() {
|
||||
trace!(target: "engine::tree", num=?block.block.number(), hash=?block.block.hash(), "Reinserting block into tree state");
|
||||
if self
|
||||
.state
|
||||
.tree_state
|
||||
.executed_block_by_hash(block.recovered_block().hash())
|
||||
.is_none()
|
||||
{
|
||||
trace!(target: "engine::tree", num=?block.recovered_block().number(), hash=?block.recovered_block().hash(), "Reinserting block into tree state");
|
||||
self.state.tree_state.insert_executed(block);
|
||||
}
|
||||
}
|
||||
@ -2464,15 +2473,18 @@ where
|
||||
debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root");
|
||||
|
||||
let executed: ExecutedBlock<N> = ExecutedBlock {
|
||||
block: sealed_block.clone(),
|
||||
senders: Arc::new(block.senders().to_vec()),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
sealed_block.as_ref().clone(),
|
||||
block.senders().to_vec(),
|
||||
)),
|
||||
execution_output: Arc::new(ExecutionOutcome::from((output, block_number))),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
trie: Arc::new(trie_output),
|
||||
};
|
||||
|
||||
if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash() {
|
||||
debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block");
|
||||
if self.state.tree_state.canonical_block_hash() == executed.recovered_block().parent_hash()
|
||||
{
|
||||
debug!(target: "engine::tree", pending = ?executed.recovered_block().num_hash() ,"updating pending block");
|
||||
// if the parent is the canonical head, we can insert the block as the pending block
|
||||
self.canonical_in_memory_state.set_pending_block(executed.clone());
|
||||
}
|
||||
@ -2988,7 +3000,7 @@ mod tests {
|
||||
let mut parent_hash = B256::ZERO;
|
||||
|
||||
for block in &blocks {
|
||||
let sealed_block = block.block();
|
||||
let sealed_block = block.recovered_block();
|
||||
let hash = sealed_block.hash();
|
||||
let number = sealed_block.number;
|
||||
blocks_by_hash.insert(hash, block.clone());
|
||||
@ -3002,7 +3014,7 @@ mod tests {
|
||||
self.tree.state.tree_state = TreeState {
|
||||
blocks_by_hash,
|
||||
blocks_by_number,
|
||||
current_canonical_head: blocks.last().unwrap().block().num_hash(),
|
||||
current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(),
|
||||
parent_to_child,
|
||||
persisted_trie_updates: HashMap::default(),
|
||||
};
|
||||
@ -3013,12 +3025,11 @@ mod tests {
|
||||
CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending, None, None);
|
||||
|
||||
self.blocks = blocks.clone();
|
||||
self.persist_blocks(
|
||||
blocks
|
||||
.into_iter()
|
||||
.map(|b| RecoveredBlock::new_sealed(b.block().clone(), b.senders().clone()))
|
||||
.collect(),
|
||||
);
|
||||
|
||||
let recovered_blocks =
|
||||
blocks.iter().map(|b| b.recovered_block().clone()).collect::<Vec<_>>();
|
||||
|
||||
self.persist_blocks(recovered_blocks);
|
||||
|
||||
self
|
||||
}
|
||||
@ -3311,7 +3322,7 @@ mod tests {
|
||||
let test_harness = TestHarness::new(MAINNET.clone()).with_blocks(blocks.clone());
|
||||
|
||||
for executed_block in blocks {
|
||||
let sealed_block = executed_block.block();
|
||||
let sealed_block = executed_block.recovered_block();
|
||||
|
||||
let expected_state = BlockState::new(executed_block.clone());
|
||||
|
||||
@ -3441,21 +3452,21 @@ mod tests {
|
||||
tree_state.insert_executed(blocks[1].clone());
|
||||
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[0].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[1].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[0].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[1].recovered_block().hash()]))
|
||||
);
|
||||
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
|
||||
|
||||
tree_state.insert_executed(blocks[2].clone());
|
||||
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[1].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[2].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[1].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[2].recovered_block().hash()]))
|
||||
);
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[1].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
|
||||
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[2].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@ -3469,12 +3480,12 @@ mod tests {
|
||||
}
|
||||
assert_eq!(tree_state.blocks_by_hash.len(), 5);
|
||||
|
||||
let fork_block_3 =
|
||||
test_block_builder.get_executed_block_with_number(3, blocks[1].block.hash());
|
||||
let fork_block_4 =
|
||||
test_block_builder.get_executed_block_with_number(4, fork_block_3.block.hash());
|
||||
let fork_block_5 =
|
||||
test_block_builder.get_executed_block_with_number(5, fork_block_4.block.hash());
|
||||
let fork_block_3 = test_block_builder
|
||||
.get_executed_block_with_number(3, blocks[1].recovered_block().hash());
|
||||
let fork_block_4 = test_block_builder
|
||||
.get_executed_block_with_number(4, fork_block_3.recovered_block().hash());
|
||||
let fork_block_5 = test_block_builder
|
||||
.get_executed_block_with_number(5, fork_block_4.recovered_block().hash());
|
||||
|
||||
tree_state.insert_executed(fork_block_3.clone());
|
||||
tree_state.insert_executed(fork_block_4.clone());
|
||||
@ -3482,16 +3493,16 @@ mod tests {
|
||||
|
||||
assert_eq!(tree_state.blocks_by_hash.len(), 8);
|
||||
assert_eq!(tree_state.blocks_by_number[&3].len(), 2); // two blocks at height 3 (original and fork)
|
||||
assert_eq!(tree_state.parent_to_child[&blocks[1].block.hash()].len(), 2); // block 2 should have two children
|
||||
assert_eq!(tree_state.parent_to_child[&blocks[1].recovered_block().hash()].len(), 2); // block 2 should have two children
|
||||
|
||||
// verify that we can insert the same block again without issues
|
||||
tree_state.insert_executed(fork_block_4.clone());
|
||||
assert_eq!(tree_state.blocks_by_hash.len(), 8);
|
||||
|
||||
assert!(tree_state.parent_to_child[&fork_block_3.block.hash()]
|
||||
.contains(&fork_block_4.block.hash()));
|
||||
assert!(tree_state.parent_to_child[&fork_block_4.block.hash()]
|
||||
.contains(&fork_block_5.block.hash()));
|
||||
assert!(tree_state.parent_to_child[&fork_block_3.recovered_block().hash()]
|
||||
.contains(&fork_block_4.recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child[&fork_block_4.recovered_block().hash()]
|
||||
.contains(&fork_block_5.recovered_block().hash()));
|
||||
|
||||
assert_eq!(tree_state.blocks_by_number[&4].len(), 2);
|
||||
assert_eq!(tree_state.blocks_by_number[&5].len(), 2);
|
||||
@ -3510,40 +3521,40 @@ mod tests {
|
||||
let last = blocks.last().unwrap();
|
||||
|
||||
// set the canonical head
|
||||
tree_state.set_canonical_head(last.block.num_hash());
|
||||
tree_state.set_canonical_head(last.recovered_block().num_hash());
|
||||
|
||||
// inclusive bound, so we should remove anything up to and including 2
|
||||
tree_state.remove_until(
|
||||
BlockNumHash::new(2, blocks[1].block.hash()),
|
||||
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
|
||||
start_num_hash.hash,
|
||||
Some(blocks[1].block.num_hash()),
|
||||
Some(blocks[1].recovered_block().num_hash()),
|
||||
);
|
||||
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&1));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&2));
|
||||
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&3));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&4));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&5));
|
||||
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
|
||||
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[2].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
|
||||
);
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[3].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
|
||||
);
|
||||
}
|
||||
|
||||
@ -3560,40 +3571,40 @@ mod tests {
|
||||
let last = blocks.last().unwrap();
|
||||
|
||||
// set the canonical head
|
||||
tree_state.set_canonical_head(last.block.num_hash());
|
||||
tree_state.set_canonical_head(last.recovered_block().num_hash());
|
||||
|
||||
// we should still remove everything up to and including 2
|
||||
tree_state.remove_until(
|
||||
BlockNumHash::new(2, blocks[1].block.hash()),
|
||||
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
|
||||
start_num_hash.hash,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&1));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&2));
|
||||
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&3));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&4));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&5));
|
||||
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
|
||||
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[2].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
|
||||
);
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[3].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
|
||||
);
|
||||
}
|
||||
|
||||
@ -3610,40 +3621,40 @@ mod tests {
|
||||
let last = blocks.last().unwrap();
|
||||
|
||||
// set the canonical head
|
||||
tree_state.set_canonical_head(last.block.num_hash());
|
||||
tree_state.set_canonical_head(last.recovered_block().num_hash());
|
||||
|
||||
// we have no forks so we should still remove anything up to and including 2
|
||||
tree_state.remove_until(
|
||||
BlockNumHash::new(2, blocks[1].block.hash()),
|
||||
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
|
||||
start_num_hash.hash,
|
||||
Some(blocks[0].block.num_hash()),
|
||||
Some(blocks[0].recovered_block().num_hash()),
|
||||
);
|
||||
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&1));
|
||||
assert!(!tree_state.blocks_by_number.contains_key(&2));
|
||||
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&3));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&4));
|
||||
assert!(tree_state.blocks_by_number.contains_key(&5));
|
||||
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
|
||||
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
|
||||
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
|
||||
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[2].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
|
||||
);
|
||||
assert_eq!(
|
||||
tree_state.parent_to_child.get(&blocks[3].block.hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].block.hash()]))
|
||||
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
|
||||
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
|
||||
);
|
||||
}
|
||||
|
||||
@ -3660,40 +3671,44 @@ mod tests {
|
||||
}
|
||||
|
||||
// set block 3 as the current canonical head
|
||||
test_harness.tree.state.tree_state.set_canonical_head(blocks[2].block.num_hash());
|
||||
test_harness
|
||||
.tree
|
||||
.state
|
||||
.tree_state
|
||||
.set_canonical_head(blocks[2].recovered_block().num_hash());
|
||||
|
||||
// create a fork from block 2
|
||||
let fork_block_3 =
|
||||
test_block_builder.get_executed_block_with_number(3, blocks[1].block.hash());
|
||||
let fork_block_4 =
|
||||
test_block_builder.get_executed_block_with_number(4, fork_block_3.block.hash());
|
||||
let fork_block_5 =
|
||||
test_block_builder.get_executed_block_with_number(5, fork_block_4.block.hash());
|
||||
let fork_block_3 = test_block_builder
|
||||
.get_executed_block_with_number(3, blocks[1].recovered_block().hash());
|
||||
let fork_block_4 = test_block_builder
|
||||
.get_executed_block_with_number(4, fork_block_3.recovered_block().hash());
|
||||
let fork_block_5 = test_block_builder
|
||||
.get_executed_block_with_number(5, fork_block_4.recovered_block().hash());
|
||||
|
||||
test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone());
|
||||
test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone());
|
||||
test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone());
|
||||
|
||||
// normal (non-reorg) case
|
||||
let result = test_harness.tree.on_new_head(blocks[4].block.hash()).unwrap();
|
||||
let result = test_harness.tree.on_new_head(blocks[4].recovered_block().hash()).unwrap();
|
||||
assert!(matches!(result, Some(NewCanonicalChain::Commit { .. })));
|
||||
if let Some(NewCanonicalChain::Commit { new }) = result {
|
||||
assert_eq!(new.len(), 2);
|
||||
assert_eq!(new[0].block.hash(), blocks[3].block.hash());
|
||||
assert_eq!(new[1].block.hash(), blocks[4].block.hash());
|
||||
assert_eq!(new[0].recovered_block().hash(), blocks[3].recovered_block().hash());
|
||||
assert_eq!(new[1].recovered_block().hash(), blocks[4].recovered_block().hash());
|
||||
}
|
||||
|
||||
// reorg case
|
||||
let result = test_harness.tree.on_new_head(fork_block_5.block.hash()).unwrap();
|
||||
let result = test_harness.tree.on_new_head(fork_block_5.recovered_block().hash()).unwrap();
|
||||
assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. })));
|
||||
if let Some(NewCanonicalChain::Reorg { new, old }) = result {
|
||||
assert_eq!(new.len(), 3);
|
||||
assert_eq!(new[0].block.hash(), fork_block_3.block.hash());
|
||||
assert_eq!(new[1].block.hash(), fork_block_4.block.hash());
|
||||
assert_eq!(new[2].block.hash(), fork_block_5.block.hash());
|
||||
assert_eq!(new[0].recovered_block().hash(), fork_block_3.recovered_block().hash());
|
||||
assert_eq!(new[1].recovered_block().hash(), fork_block_4.recovered_block().hash());
|
||||
assert_eq!(new[2].recovered_block().hash(), fork_block_5.recovered_block().hash());
|
||||
|
||||
assert_eq!(old.len(), 1);
|
||||
assert_eq!(old[0].block.hash(), blocks[2].block.hash());
|
||||
assert_eq!(old[0].recovered_block().hash(), blocks[2].recovered_block().hash());
|
||||
}
|
||||
}
|
||||
|
||||
@ -3712,7 +3727,7 @@ mod tests {
|
||||
}
|
||||
|
||||
// set last block as the current canonical head
|
||||
let last_block = blocks.last().unwrap().block.clone();
|
||||
let last_block = blocks.last().unwrap().recovered_block().clone();
|
||||
|
||||
test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash());
|
||||
|
||||
@ -3722,8 +3737,7 @@ mod tests {
|
||||
|
||||
for block in &chain_a {
|
||||
test_harness.tree.state.tree_state.insert_executed(ExecutedBlock {
|
||||
block: Arc::new(block.clone_sealed_block()),
|
||||
senders: Arc::new(block.senders().to_vec()),
|
||||
recovered_block: Arc::new(block.clone()),
|
||||
execution_output: Arc::new(ExecutionOutcome::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie: Arc::new(TrieUpdates::default()),
|
||||
@ -3733,8 +3747,7 @@ mod tests {
|
||||
|
||||
for block in &chain_b {
|
||||
test_harness.tree.state.tree_state.insert_executed(ExecutedBlock {
|
||||
block: Arc::new(block.clone_sealed_block()),
|
||||
senders: Arc::new(block.senders().to_vec()),
|
||||
recovered_block: Arc::new(block.clone()),
|
||||
execution_output: Arc::new(ExecutionOutcome::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie: Arc::new(TrieUpdates::default()),
|
||||
@ -3752,12 +3765,12 @@ mod tests {
|
||||
if let Some(NewCanonicalChain::Reorg { new, old }) = result {
|
||||
assert_eq!(new.len(), expected_new.len());
|
||||
for (index, block) in expected_new.iter().enumerate() {
|
||||
assert_eq!(new[index].block.hash(), block.hash());
|
||||
assert_eq!(new[index].recovered_block().hash(), block.hash());
|
||||
}
|
||||
|
||||
assert_eq!(old.len(), chain_a.len());
|
||||
for (index, block) in chain_a.iter().enumerate() {
|
||||
assert_eq!(old[index].block.hash(), block.hash());
|
||||
assert_eq!(old[index].recovered_block().hash(), block.hash());
|
||||
}
|
||||
}
|
||||
|
||||
@ -3798,12 +3811,12 @@ mod tests {
|
||||
for (i, item) in
|
||||
blocks_to_persist.iter().enumerate().take(expected_blocks_to_persist_length)
|
||||
{
|
||||
assert_eq!(item.block.number, last_persisted_block_number + i as u64 + 1);
|
||||
assert_eq!(item.recovered_block().number, last_persisted_block_number + i as u64 + 1);
|
||||
}
|
||||
|
||||
// make sure only canonical blocks are included
|
||||
let fork_block = test_block_builder.get_executed_block_with_number(4, B256::random());
|
||||
let fork_block_hash = fork_block.block.hash();
|
||||
let fork_block_hash = fork_block.recovered_block().hash();
|
||||
test_harness.tree.state.tree_state.insert_executed(fork_block);
|
||||
|
||||
assert!(test_harness.tree.state.tree_state.block_by_hash(fork_block_hash).is_some());
|
||||
@ -3812,12 +3825,11 @@ mod tests {
|
||||
assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length);
|
||||
|
||||
// check that the fork block is not included in the blocks to persist
|
||||
assert!(!blocks_to_persist.iter().any(|b| b.block.hash() == fork_block_hash));
|
||||
assert!(!blocks_to_persist.iter().any(|b| b.recovered_block().hash() == fork_block_hash));
|
||||
|
||||
// check that the original block 4 is still included
|
||||
assert!(blocks_to_persist
|
||||
.iter()
|
||||
.any(|b| b.block.number == 4 && b.block.hash() == blocks[4].block.hash()));
|
||||
assert!(blocks_to_persist.iter().any(|b| b.recovered_block().number == 4 &&
|
||||
b.recovered_block().hash() == blocks[4].recovered_block().hash()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@ -3831,7 +3843,7 @@ mod tests {
|
||||
test_harness = test_harness.with_blocks(blocks);
|
||||
|
||||
let missing_block = test_block_builder
|
||||
.generate_random_block(6, test_harness.blocks.last().unwrap().block().hash());
|
||||
.generate_random_block(6, test_harness.blocks.last().unwrap().recovered_block().hash());
|
||||
|
||||
test_harness.fcu_to(missing_block.hash(), PayloadStatusEnum::Syncing).await;
|
||||
|
||||
@ -3855,11 +3867,11 @@ mod tests {
|
||||
test_harness = test_harness.with_blocks(base_chain.clone());
|
||||
|
||||
test_harness
|
||||
.fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid)
|
||||
.fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid)
|
||||
.await;
|
||||
|
||||
// extend main chain
|
||||
let main_chain = test_harness.block_builder.create_fork(base_chain[0].block(), 3);
|
||||
let main_chain = test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 3);
|
||||
|
||||
test_harness.insert_chain(main_chain).await;
|
||||
}
|
||||
@ -3872,7 +3884,7 @@ mod tests {
|
||||
let main_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect();
|
||||
test_harness = test_harness.with_blocks(main_chain.clone());
|
||||
|
||||
let fork_chain = test_harness.block_builder.create_fork(main_chain[2].block(), 3);
|
||||
let fork_chain = test_harness.block_builder.create_fork(main_chain[2].recovered_block(), 3);
|
||||
let fork_chain_last_hash = fork_chain.last().unwrap().hash();
|
||||
|
||||
// add fork blocks to the tree
|
||||
@ -3905,13 +3917,13 @@ mod tests {
|
||||
test_harness = test_harness.with_blocks(base_chain.clone());
|
||||
|
||||
test_harness
|
||||
.fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid)
|
||||
.fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid)
|
||||
.await;
|
||||
|
||||
// extend main chain with enough blocks to trigger pipeline run but don't insert them
|
||||
let main_chain = test_harness
|
||||
.block_builder
|
||||
.create_fork(base_chain[0].block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10);
|
||||
.create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10);
|
||||
|
||||
let main_chain_last_hash = main_chain.last().unwrap().hash();
|
||||
test_harness.send_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await;
|
||||
@ -3972,14 +3984,14 @@ mod tests {
|
||||
|
||||
// fcu to the tip of base chain
|
||||
test_harness
|
||||
.fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid)
|
||||
.fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid)
|
||||
.await;
|
||||
|
||||
// create main chain, extension of base chain, with enough blocks to
|
||||
// trigger backfill sync
|
||||
let main_chain = test_harness
|
||||
.block_builder
|
||||
.create_fork(base_chain[0].block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10);
|
||||
.create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10);
|
||||
|
||||
let main_chain_last = main_chain.last().unwrap();
|
||||
let main_chain_last_hash = main_chain_last.hash();
|
||||
@ -4099,11 +4111,12 @@ mod tests {
|
||||
|
||||
// fcu to the tip of base chain
|
||||
test_harness
|
||||
.fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid)
|
||||
.fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid)
|
||||
.await;
|
||||
|
||||
// create main chain, extension of base chain
|
||||
let main_chain = test_harness.block_builder.create_fork(base_chain[0].block(), 10);
|
||||
let main_chain =
|
||||
test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 10);
|
||||
// determine target in the middle of main hain
|
||||
let target = main_chain.get(5).unwrap();
|
||||
let target_hash = target.hash();
|
||||
@ -4138,7 +4151,7 @@ mod tests {
|
||||
let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect();
|
||||
test_harness = test_harness.with_blocks(base_chain.clone());
|
||||
|
||||
let old_head = base_chain.first().unwrap().block();
|
||||
let old_head = base_chain.first().unwrap().recovered_block();
|
||||
|
||||
// extend base chain
|
||||
let extension_chain = test_harness.block_builder.create_fork(old_head, 5);
|
||||
@ -4198,7 +4211,7 @@ mod tests {
|
||||
// side chain consisting of two blocks, the last will be inserted first
|
||||
// so that we force it to be buffered
|
||||
let side_chain =
|
||||
test_harness.block_builder.create_fork(base_chain.last().unwrap().block(), 2);
|
||||
test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 2);
|
||||
|
||||
// buffer last block of side chain
|
||||
let buffered_block = side_chain.last().unwrap();
|
||||
@ -4236,7 +4249,7 @@ mod tests {
|
||||
let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect();
|
||||
test_harness = test_harness.with_blocks(base_chain.clone());
|
||||
|
||||
let old_head = base_chain.first().unwrap().block();
|
||||
let old_head = base_chain.first().unwrap().recovered_block();
|
||||
|
||||
// extend base chain
|
||||
let extension_chain = test_harness.block_builder.create_fork(old_head, 5);
|
||||
@ -4300,8 +4313,9 @@ mod tests {
|
||||
test_harness = test_harness.with_blocks(base_chain.clone());
|
||||
|
||||
// create a side chain with an invalid block
|
||||
let side_chain =
|
||||
test_harness.block_builder.create_fork(base_chain.last().unwrap().block(), 15);
|
||||
let side_chain = test_harness
|
||||
.block_builder
|
||||
.create_fork(base_chain.last().unwrap().recovered_block(), 15);
|
||||
let invalid_index = 9;
|
||||
|
||||
test_harness.setup_range_insertion_for_invalid_chain(side_chain.clone(), invalid_index);
|
||||
|
||||
@ -30,7 +30,8 @@ use reth_payload_builder_primitives::PayloadBuilderError;
|
||||
use reth_payload_primitives::PayloadBuilderAttributes;
|
||||
use reth_primitives::{
|
||||
proofs::{self},
|
||||
Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, TransactionSigned,
|
||||
Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, RecoveredBlock,
|
||||
TransactionSigned,
|
||||
};
|
||||
use reth_primitives_traits::Block as _;
|
||||
use reth_revm::database::StateProviderDatabase;
|
||||
@ -478,8 +479,10 @@ where
|
||||
|
||||
// create the executed block data
|
||||
let executed = ExecutedBlock {
|
||||
block: sealed_block.clone(),
|
||||
senders: Arc::new(executed_senders),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
sealed_block.as_ref().clone(),
|
||||
executed_senders,
|
||||
)),
|
||||
execution_output: Arc::new(execution_outcome),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
trie: Arc::new(trie_output),
|
||||
|
||||
@ -344,7 +344,7 @@ where
|
||||
tokio::select! {
|
||||
payload = built_payloads.select_next_some() => {
|
||||
if let Some(executed_block) = payload.executed_block() {
|
||||
debug!(target: "reth::cli", block=?executed_block.block().num_hash(), "inserting built payload");
|
||||
debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload");
|
||||
if let Either::Right(eth_service) = &mut engine_service {
|
||||
eth_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into());
|
||||
}
|
||||
|
||||
@ -25,7 +25,8 @@ use reth_payload_builder_primitives::PayloadBuilderError;
|
||||
use reth_payload_primitives::PayloadBuilderAttributes;
|
||||
use reth_payload_util::{NoopPayloadTransactions, PayloadTransactions};
|
||||
use reth_primitives::{
|
||||
proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, SealedHeader, TxType,
|
||||
proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, RecoveredBlock,
|
||||
SealedHeader, TxType,
|
||||
};
|
||||
use reth_primitives_traits::block::Block as _;
|
||||
use reth_provider::{
|
||||
@ -431,8 +432,10 @@ where
|
||||
|
||||
// create the executed block data
|
||||
let executed: ExecutedBlock<OpPrimitives> = ExecutedBlock {
|
||||
block: sealed_block.clone(),
|
||||
senders: Arc::new(info.executed_senders),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
sealed_block.as_ref().clone(),
|
||||
info.executed_senders,
|
||||
)),
|
||||
execution_output: Arc::new(execution_outcome),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
trie: Arc::new(trie_output),
|
||||
|
||||
@ -792,7 +792,7 @@ mod tests {
|
||||
use reth_db_api::{cursor::DbCursorRO, transaction::DbTx};
|
||||
use reth_errors::ProviderError;
|
||||
use reth_execution_types::{Chain, ExecutionOutcome};
|
||||
use reth_primitives::{EthPrimitives, Receipt, SealedBlock, StaticFileSegment};
|
||||
use reth_primitives::{EthPrimitives, Receipt, RecoveredBlock, SealedBlock, StaticFileSegment};
|
||||
use reth_primitives_traits::{BlockBody, SignedTransaction};
|
||||
use reth_storage_api::{
|
||||
BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader,
|
||||
@ -925,8 +925,7 @@ mod tests {
|
||||
ExecutionOutcome { receipts: block_receipts.into(), ..Default::default() };
|
||||
|
||||
ExecutedBlock::new(
|
||||
Arc::new(block.clone()),
|
||||
Arc::new(senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)),
|
||||
execution_outcome.into(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -987,10 +986,10 @@ mod tests {
|
||||
if state.anchor().number + 1 == block_number {
|
||||
let mut lowest_memory_block =
|
||||
state.parent_state_chain().last().expect("qed").block();
|
||||
let num_hash = lowest_memory_block.block().num_hash();
|
||||
let num_hash = lowest_memory_block.recovered_block().num_hash();
|
||||
|
||||
let mut execution_output = (*lowest_memory_block.execution_output).clone();
|
||||
execution_output.first_block = lowest_memory_block.block().number;
|
||||
execution_output.first_block = lowest_memory_block.recovered_block().number;
|
||||
lowest_memory_block.execution_output = Arc::new(execution_output);
|
||||
|
||||
// Push to disk
|
||||
@ -1055,8 +1054,10 @@ mod tests {
|
||||
first_in_mem_block.senders().expect("failed to recover senders");
|
||||
let chain = NewCanonicalChain::Commit {
|
||||
new: vec![ExecutedBlock::new(
|
||||
Arc::new(first_in_mem_block.clone()),
|
||||
Arc::new(in_memory_block_senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(
|
||||
first_in_mem_block.clone(),
|
||||
in_memory_block_senders,
|
||||
)),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -1089,8 +1090,10 @@ mod tests {
|
||||
|
||||
// Insert the last block into the pending state
|
||||
provider.canonical_in_memory_state.set_pending_block(ExecutedBlock {
|
||||
block: Arc::new(last_in_mem_block.clone()),
|
||||
senders: Default::default(),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
last_in_mem_block.clone(),
|
||||
Default::default(),
|
||||
)),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie: Default::default(),
|
||||
@ -1145,8 +1148,10 @@ mod tests {
|
||||
first_in_mem_block.senders().expect("failed to recover senders");
|
||||
let chain = NewCanonicalChain::Commit {
|
||||
new: vec![ExecutedBlock::new(
|
||||
Arc::new(first_in_mem_block.clone()),
|
||||
Arc::new(in_memory_block_senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(
|
||||
first_in_mem_block.clone(),
|
||||
in_memory_block_senders,
|
||||
)),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -1197,8 +1202,10 @@ mod tests {
|
||||
|
||||
// Set the block as pending
|
||||
provider.canonical_in_memory_state.set_pending_block(ExecutedBlock {
|
||||
block: Arc::new(block.clone()),
|
||||
senders: Default::default(),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
block.clone(),
|
||||
block.senders().unwrap(),
|
||||
)),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie: Default::default(),
|
||||
@ -1278,8 +1285,10 @@ mod tests {
|
||||
first_in_mem_block.senders().expect("failed to recover senders");
|
||||
let chain = NewCanonicalChain::Commit {
|
||||
new: vec![ExecutedBlock::new(
|
||||
Arc::new(first_in_mem_block.clone()),
|
||||
Arc::new(in_memory_block_senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(
|
||||
first_in_mem_block.clone(),
|
||||
in_memory_block_senders,
|
||||
)),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -1841,8 +1850,7 @@ mod tests {
|
||||
.map(|block| {
|
||||
let senders = block.senders().expect("failed to recover senders");
|
||||
ExecutedBlock::new(
|
||||
Arc::new(block.clone()),
|
||||
Arc::new(senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)),
|
||||
Arc::new(ExecutionOutcome {
|
||||
bundle: BundleState::new(
|
||||
in_memory_state.into_iter().map(|(address, (account, _))| {
|
||||
@ -1977,8 +1985,10 @@ mod tests {
|
||||
// adding a pending block to state can test pending() and pending_state_by_hash() function
|
||||
let pending_block = database_blocks[database_blocks.len() - 1].clone();
|
||||
only_database_provider.canonical_in_memory_state.set_pending_block(ExecutedBlock {
|
||||
block: Arc::new(pending_block.clone()),
|
||||
senders: Default::default(),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
pending_block.clone(),
|
||||
Default::default(),
|
||||
)),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie: Default::default(),
|
||||
@ -2098,8 +2108,10 @@ mod tests {
|
||||
// Set the pending block in memory
|
||||
let pending_block = in_memory_blocks.last().unwrap();
|
||||
provider.canonical_in_memory_state.set_pending_block(ExecutedBlock {
|
||||
block: Arc::new(pending_block.clone()),
|
||||
senders: Default::default(),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
pending_block.clone(),
|
||||
Default::default(),
|
||||
)),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie: Default::default(),
|
||||
|
||||
@ -442,7 +442,7 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
|
||||
let (start, end) = self.convert_range_bounds(range, || {
|
||||
in_mem_chain
|
||||
.iter()
|
||||
.map(|b| b.block_ref().block().body().transactions().len() as u64)
|
||||
.map(|b| b.block_ref().recovered_block().body().transactions().len() as u64)
|
||||
.sum::<u64>() +
|
||||
last_block_body_index.last_tx_num()
|
||||
});
|
||||
@ -474,7 +474,8 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
|
||||
|
||||
// Iterate from the lowest block to the highest in-memory chain
|
||||
for block_state in in_mem_chain.iter().rev() {
|
||||
let block_tx_count = block_state.block_ref().block().body().transactions().len();
|
||||
let block_tx_count =
|
||||
block_state.block_ref().recovered_block().body().transactions().len();
|
||||
let remaining = (tx_range.end() - tx_range.start() + 1) as usize;
|
||||
|
||||
// If the transaction range start is equal or higher than the next block first
|
||||
@ -546,7 +547,7 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
|
||||
// Iterate from the lowest block to the highest
|
||||
for block_state in in_mem_chain.iter().rev() {
|
||||
let executed_block = block_state.block_ref();
|
||||
let block = executed_block.block();
|
||||
let block = executed_block.recovered_block();
|
||||
|
||||
for tx_index in 0..block.body().transactions().len() {
|
||||
match id {
|
||||
@ -629,7 +630,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
(*block_hash).into(),
|
||||
|db_provider| db_provider.header(block_hash),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().header().clone())),
|
||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_header())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -637,7 +638,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
num.into(),
|
||||
|db_provider| db_provider.header_by_number(num),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().header().clone())),
|
||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_header())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -679,7 +680,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.headers_range(range),
|
||||
|block_state, _| Some(block_state.block_ref().block().header().clone()),
|
||||
|block_state, _| Some(block_state.block_ref().recovered_block().header().clone()),
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -691,7 +692,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
number.into(),
|
||||
|db_provider| db_provider.sealed_header(number),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().clone_sealed_header())),
|
||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_sealed_header())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -702,7 +703,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.sealed_headers_range(range),
|
||||
|block_state, _| Some(block_state.block_ref().block().clone_sealed_header()),
|
||||
|block_state, _| Some(block_state.block_ref().recovered_block().clone_sealed_header()),
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -716,7 +717,7 @@ impl<N: ProviderNodeTypes> HeaderProvider for ConsistentProvider<N> {
|
||||
range,
|
||||
|db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate),
|
||||
|block_state, predicate| {
|
||||
let header = block_state.block_ref().block().sealed_header();
|
||||
let header = block_state.block_ref().recovered_block().sealed_header();
|
||||
predicate(header).then(|| header.clone())
|
||||
},
|
||||
predicate,
|
||||
@ -802,7 +803,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
hash.into(),
|
||||
|db_provider| db_provider.find_block_by_hash(hash, source),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().clone_block())),
|
||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())),
|
||||
)
|
||||
}
|
||||
BlockSource::Pending => {
|
||||
@ -815,7 +816,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
id,
|
||||
|db_provider| db_provider.block(id),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().clone_block())),
|
||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -847,7 +848,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
id,
|
||||
|db_provider| db_provider.block_with_senders(id, transaction_kind),
|
||||
|block_state| Ok(Some(block_state.clone_recovered_block())),
|
||||
|block_state| Ok(Some(block_state.block().recovered_block().clone())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -859,7 +860,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
id,
|
||||
|db_provider| db_provider.sealed_block_with_senders(id, transaction_kind),
|
||||
|block_state| Ok(Some(block_state.clone_recovered_block())),
|
||||
|block_state| Ok(Some(block_state.block().recovered_block().clone())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -867,7 +868,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.block_range(range),
|
||||
|block_state, _| Some(block_state.block_ref().block().clone_block()),
|
||||
|block_state, _| Some(block_state.block_ref().recovered_block().clone_block()),
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -879,7 +880,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.block_with_senders_range(range),
|
||||
|block_state, _| Some(block_state.clone_recovered_block()),
|
||||
|block_state, _| Some(block_state.block().recovered_block().clone()),
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -891,7 +892,7 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.sealed_block_with_senders_range(range),
|
||||
|block_state, _| Some(block_state.clone_recovered_block()),
|
||||
|block_state, _| Some(block_state.block().recovered_block().clone()),
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -913,7 +914,13 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
id.into(),
|
||||
|provider| provider.transaction_by_id(id),
|
||||
|tx_index, _, block_state| {
|
||||
Ok(block_state.block_ref().block().body().transactions().get(tx_index).cloned())
|
||||
Ok(block_state
|
||||
.block_ref()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.get(tx_index)
|
||||
.cloned())
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -926,7 +933,13 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
id.into(),
|
||||
|provider| provider.transaction_by_id_unhashed(id),
|
||||
|tx_index, _, block_state| {
|
||||
Ok(block_state.block_ref().block().body().transactions().get(tx_index).cloned())
|
||||
Ok(block_state
|
||||
.block_ref()
|
||||
.recovered_block()
|
||||
.body()
|
||||
.transactions()
|
||||
.get(tx_index)
|
||||
.cloned())
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -956,7 +969,7 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_tx(
|
||||
id.into(),
|
||||
|provider| provider.transaction_block(id),
|
||||
|_, _, block_state| Ok(Some(block_state.block_ref().block().number())),
|
||||
|_, _, block_state| Ok(Some(block_state.block_ref().recovered_block().number())),
|
||||
)
|
||||
}
|
||||
|
||||
@ -967,7 +980,9 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
id,
|
||||
|provider| provider.transactions_by_block(id),
|
||||
|block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())),
|
||||
|block_state| {
|
||||
Ok(Some(block_state.block_ref().recovered_block().body().transactions().to_vec()))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@ -978,7 +993,9 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block_range_while(
|
||||
range,
|
||||
|db_provider, range, _| db_provider.transactions_by_block_range(range),
|
||||
|block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()),
|
||||
|block_state, _| {
|
||||
Some(block_state.block_ref().recovered_block().body().transactions().to_vec())
|
||||
},
|
||||
|_| true,
|
||||
)
|
||||
}
|
||||
@ -991,7 +1008,8 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
range,
|
||||
|db_provider, db_range| db_provider.transactions_by_tx_range(db_range),
|
||||
|index_range, block_state| {
|
||||
Ok(block_state.block_ref().block().body().transactions()[index_range].to_vec())
|
||||
Ok(block_state.block_ref().recovered_block().body().transactions()[index_range]
|
||||
.to_vec())
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -1003,7 +1021,9 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_tx_range(
|
||||
range,
|
||||
|db_provider, db_range| db_provider.senders_by_tx_range(db_range),
|
||||
|index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()),
|
||||
|index_range, block_state| {
|
||||
Ok(block_state.block_ref().recovered_block.senders()[index_range].to_vec())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@ -1011,7 +1031,9 @@ impl<N: ProviderNodeTypes> TransactionsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_tx(
|
||||
id.into(),
|
||||
|provider| provider.transaction_sender(id),
|
||||
|tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()),
|
||||
|tx_index, _, block_state| {
|
||||
Ok(block_state.block_ref().recovered_block.senders().get(tx_index).copied())
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1032,7 +1054,7 @@ impl<N: ProviderNodeTypes> ReceiptProvider for ConsistentProvider<N> {
|
||||
fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>> {
|
||||
for block_state in self.head_block.iter().flat_map(|b| b.chain()) {
|
||||
let executed_block = block_state.block_ref();
|
||||
let block = executed_block.block();
|
||||
let block = executed_block.recovered_block();
|
||||
let receipts = block_state.executed_block_receipts();
|
||||
|
||||
// assuming 1:1 correspondence between transactions and receipts
|
||||
@ -1124,7 +1146,9 @@ impl<N: ProviderNodeTypes> WithdrawalsProvider for ConsistentProvider<N> {
|
||||
self.get_in_memory_or_storage_by_block(
|
||||
id,
|
||||
|db_provider| db_provider.withdrawals_by_block(id, timestamp),
|
||||
|block_state| Ok(block_state.block_ref().block().body().withdrawals().cloned()),
|
||||
|block_state| {
|
||||
Ok(block_state.block_ref().recovered_block().body().withdrawals().cloned())
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1139,7 +1163,7 @@ impl<N: ProviderNodeTypes> OmmersProvider for ConsistentProvider<N> {
|
||||
return Ok(Some(Vec::new()))
|
||||
}
|
||||
|
||||
Ok(block_state.block_ref().block().body().ommers().map(|o| o.to_vec()))
|
||||
Ok(block_state.block_ref().recovered_block().body().ommers().map(|o| o.to_vec()))
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -1167,8 +1191,9 @@ impl<N: ProviderNodeTypes> BlockBodyIndicesProvider for ConsistentProvider<N> {
|
||||
|
||||
// Iterate from the lowest block in memory until our target block
|
||||
for state in block_state.chain().collect::<Vec<_>>().into_iter().rev() {
|
||||
let block_tx_count = state.block_ref().block.body().transactions().len() as u64;
|
||||
if state.block_ref().block().number() == number {
|
||||
let block_tx_count =
|
||||
state.block_ref().recovered_block().body().transactions().len() as u64;
|
||||
if state.block_ref().recovered_block().number() == number {
|
||||
stored_indices.tx_count = block_tx_count;
|
||||
} else {
|
||||
stored_indices.first_tx_num += block_tx_count;
|
||||
@ -1450,7 +1475,7 @@ mod tests {
|
||||
use reth_chain_state::{ExecutedBlock, NewCanonicalChain};
|
||||
use reth_db::models::AccountBeforeTx;
|
||||
use reth_execution_types::ExecutionOutcome;
|
||||
use reth_primitives::SealedBlock;
|
||||
use reth_primitives::{RecoveredBlock, SealedBlock};
|
||||
use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader};
|
||||
use reth_testing_utils::generators::{
|
||||
self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams,
|
||||
@ -1550,8 +1575,10 @@ mod tests {
|
||||
first_in_mem_block.senders().expect("failed to recover senders");
|
||||
let chain = NewCanonicalChain::Commit {
|
||||
new: vec![ExecutedBlock::new(
|
||||
Arc::new(first_in_mem_block.clone()),
|
||||
Arc::new(in_memory_block_senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(
|
||||
first_in_mem_block.clone(),
|
||||
in_memory_block_senders,
|
||||
)),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -1590,8 +1617,10 @@ mod tests {
|
||||
|
||||
// Insert the last block into the pending state
|
||||
provider.canonical_in_memory_state.set_pending_block(ExecutedBlock {
|
||||
block: Arc::new(last_in_mem_block.clone()),
|
||||
senders: Default::default(),
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(
|
||||
last_in_mem_block.clone(),
|
||||
Default::default(),
|
||||
)),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie: Default::default(),
|
||||
@ -1654,8 +1683,10 @@ mod tests {
|
||||
first_in_mem_block.senders().expect("failed to recover senders");
|
||||
let chain = NewCanonicalChain::Commit {
|
||||
new: vec![ExecutedBlock::new(
|
||||
Arc::new(first_in_mem_block.clone()),
|
||||
Arc::new(in_memory_block_senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(
|
||||
first_in_mem_block.clone(),
|
||||
in_memory_block_senders,
|
||||
)),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
@ -1758,8 +1789,7 @@ mod tests {
|
||||
.map(|block| {
|
||||
let senders = block.senders().expect("failed to recover senders");
|
||||
ExecutedBlock::new(
|
||||
Arc::new(block.clone()),
|
||||
Arc::new(senders),
|
||||
Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)),
|
||||
Arc::new(ExecutionOutcome {
|
||||
bundle: BundleState::new(
|
||||
in_memory_state.into_iter().map(|(address, (account, _))| {
|
||||
|
||||
@ -143,9 +143,9 @@ where
|
||||
}
|
||||
|
||||
// NOTE: checked non-empty above
|
||||
let first_block = blocks.first().unwrap().block();
|
||||
let first_block = blocks.first().unwrap().recovered_block();
|
||||
|
||||
let last_block = blocks.last().unwrap().block();
|
||||
let last_block = blocks.last().unwrap().recovered_block();
|
||||
let first_number = first_block.number();
|
||||
let last_block_number = last_block.number();
|
||||
|
||||
@ -160,11 +160,9 @@ where
|
||||
// * trie updates (cannot naively extend, need helper)
|
||||
// * indices (already done basically)
|
||||
// Insert the blocks
|
||||
for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks {
|
||||
let sealed_block = Arc::unwrap_or_clone(block)
|
||||
.try_with_senders_unchecked(Arc::unwrap_or_clone(senders))
|
||||
.unwrap();
|
||||
self.database().insert_block(sealed_block, StorageLocation::Both)?;
|
||||
for ExecutedBlock { recovered_block, execution_output, hashed_state, trie } in blocks {
|
||||
self.database()
|
||||
.insert_block(Arc::unwrap_or_clone(recovered_block), StorageLocation::Both)?;
|
||||
|
||||
// Write state and changesets to the database.
|
||||
// Must be written after blocks because of the receipt lookup.
|
||||
|
||||
Reference in New Issue
Block a user