refactor: make sender recovery explicit in provider (#5776)

This commit is contained in:
Bjerg
2023-12-15 15:05:52 +02:00
committed by GitHub
parent faa9a22a71
commit 3f7760d852
12 changed files with 157 additions and 142 deletions

View File

@ -195,7 +195,13 @@ impl Command {
let provider_rw = factory.provider_rw()?;
// Insert block, state and hashes
provider_rw.insert_block(block.clone(), None, None)?;
provider_rw.insert_block(
block
.clone()
.try_seal_with_senders()
.map_err(|_| BlockValidationError::SenderRecoveryError)?,
None,
)?;
block_state.write_to_db(provider_rw.tx_ref(), OriginalValuesKnown::No)?;
let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?;
let storages = provider_rw.plain_state_storages(storage_lists)?;

View File

@ -177,10 +177,10 @@ impl Command {
Ok(senders) => senders,
Err(err) => {
warn!(target: "reth::cli", "Error sealing block with senders: {err:?}. Skipping...");
continue
continue;
}
};
provider_rw.insert_block(sealed_block.block, Some(sealed_block.senders), None)?;
provider_rw.insert_block(sealed_block, None)?;
}
// Check if any of hashing or merkle stages aren't on the same block number as
@ -277,7 +277,7 @@ impl Command {
let clean_result = merkle_stage.execute(&provider_rw, clean_input);
assert!(clean_result.is_ok(), "Clean state root calculation failed");
if clean_result.unwrap().done {
break
break;
}
}
@ -343,7 +343,7 @@ impl Command {
clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default()
{
first_mismatched_storage = Some((incremental, clean));
break
break;
}
}
(Some(incremental), None) => {

View File

@ -161,33 +161,35 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
if block.number <= last_finalized_block {
// check if block is canonical
if self.is_block_hash_canonical(&block.hash)? {
return Ok(Some(BlockStatus::Valid))
return Ok(Some(BlockStatus::Valid));
}
// check if block is inside database
if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() {
return Ok(Some(BlockStatus::Valid))
return Ok(Some(BlockStatus::Valid));
}
return Err(BlockchainTreeError::PendingBlockIsFinalized {
last_finalized: last_finalized_block,
}
.into())
.into());
}
// check if block is part of canonical chain
if self.is_block_hash_canonical(&block.hash)? {
return Ok(Some(BlockStatus::Valid))
return Ok(Some(BlockStatus::Valid));
}
// is block inside chain
if let Some(status) = self.is_block_inside_chain(&block) {
return Ok(Some(status))
return Ok(Some(status));
}
// check if block is disconnected
if let Some(block) = self.state.buffered_blocks.block(block) {
return Ok(Some(BlockStatus::Disconnected { missing_ancestor: block.parent_num_hash() }))
return Ok(Some(BlockStatus::Disconnected {
missing_ancestor: block.parent_num_hash(),
}));
}
Ok(None)
@ -278,7 +280,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
// get canonical fork.
let canonical_fork = self.canonical_fork(chain_id)?;
return Some(BundleStateData { state, parent_block_hashed, canonical_fork })
return Some(BundleStateData { state, parent_block_hashed, canonical_fork });
}
// check if there is canonical block
@ -288,7 +290,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
canonical_fork: ForkBlock { number: canonical_number, hash: block_hash },
state: BundleStateWithReceipts::default(),
parent_block_hashed: self.canonical_chain().inner().clone(),
})
});
}
None
@ -311,7 +313,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
// check if block parent can be found in any side chain.
if let Some(chain_id) = self.block_indices().get_blocks_chain_id(&parent.hash) {
// found parent in side tree, try to insert there
return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind)
return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind);
}
// if not found, check if the parent can be found inside canonical chain.
@ -319,7 +321,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
.is_block_hash_canonical(&parent.hash)
.map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?
{
return self.try_append_canonical_chain(block, block_validation_kind)
return self.try_append_canonical_chain(block, block_validation_kind);
}
// this is another check to ensure that if the block points to a canonical block its block
@ -335,7 +337,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
block_number: block.number,
},
block.block,
))
));
}
}
@ -412,7 +414,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
return Err(InsertBlockError::execution_error(
BlockValidationError::BlockPreMerge { hash: block.hash }.into(),
block.block,
))
));
}
let parent_header = provider
@ -575,7 +577,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
} else {
// if there is no fork block that point to other chains, break the loop.
// it means that this fork joins to canonical block.
break
break;
}
}
hashes
@ -596,9 +598,9 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
// get fork block chain
if let Some(fork_chain_id) = self.block_indices().get_blocks_chain_id(&fork.hash) {
chain_id = fork_chain_id;
continue
continue;
}
break
break;
}
(self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork)
}
@ -705,7 +707,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> {
// validate block consensus rules
if let Err(err) = self.validate_block(&block) {
return Err(InsertBlockError::consensus_error(err, block.block))
return Err(InsertBlockError::consensus_error(err, block.block));
}
self.state.buffered_blocks.insert_block(block);
@ -722,17 +724,17 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
?block,
"Failed to validate total difficulty for block {}: {e:?}", block.header.hash
);
return Err(e)
return Err(e);
}
if let Err(e) = self.externals.consensus.validate_header(block) {
error!(?block, "Failed to validate header {}: {e:?}", block.header.hash);
return Err(e)
return Err(e);
}
if let Err(e) = self.externals.consensus.validate_block(block) {
error!(?block, "Failed to validate block {}: {e:?}", block.header.hash);
return Err(e)
return Err(e);
}
Ok(())
@ -753,7 +755,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
Some(BlockStatus::Valid)
} else {
Some(BlockStatus::Accepted)
}
};
}
None
}
@ -794,7 +796,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
// validate block consensus rules
if let Err(err) = self.validate_block(&block) {
return Err(InsertBlockError::consensus_error(err, block.block))
return Err(InsertBlockError::consensus_error(err, block.block));
}
Ok(InsertPayloadOk::Inserted(
@ -963,7 +965,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
}
if header.is_none() && self.is_block_hash_inside_chain(*hash) {
return Ok(None)
return Ok(None);
}
if header.is_none() {
@ -1018,9 +1020,9 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
return Err(CanonicalError::from(BlockValidationError::BlockPreMerge {
hash: *block_hash,
})
.into())
.into());
}
return Ok(CanonicalOutcome::AlreadyCanonical { header })
return Ok(CanonicalOutcome::AlreadyCanonical { header });
}
let Some(chain_id) = self.block_indices().get_blocks_chain_id(block_hash) else {
@ -1028,7 +1030,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain {
block_hash: *block_hash,
})
.into())
.into());
};
let chain = self.state.chains.remove(&chain_id).expect("To be present");
@ -1190,7 +1192,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
block_number: tip.number,
block_hash: tip.hash,
},
))))
))));
}
let (blocks, state) = chain.into_inner();
@ -1214,7 +1216,7 @@ impl<DB: Database, EF: ExecutorFactory> BlockchainTree<DB, EF> {
pub fn unwind(&mut self, unwind_to: BlockNumber) -> RethResult<()> {
// nothing to be done if unwind_to is higher then the tip
if self.block_indices().canonical_tip().number <= unwind_to {
return Ok(())
return Ok(());
}
// revert `N` blocks from current canonical chain and put them inside BlockchanTree
let old_canon_chain = self.revert_canonical_from_database(unwind_to)?;
@ -1343,7 +1345,12 @@ mod tests {
genesis.header.header.state_root = EMPTY_ROOT_HASH;
let provider = factory.provider_rw().unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider
.insert_block(
genesis.try_seal_with_senders().expect("invalid tx signature in genesis"),
None,
)
.unwrap();
// insert first 10 blocks
for i in 0..10 {
@ -1454,8 +1461,9 @@ mod tests {
let provider_rw = provider_factory.provider_rw().unwrap();
provider_rw
.insert_block(
SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()),
Some(Vec::new()),
SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default())
.try_seal_with_senders()
.unwrap(),
None,
)
.unwrap();

View File

@ -355,7 +355,7 @@ where
inconsistent_stage_checkpoint = stage_checkpoint,
"Pipeline sync progress is inconsistent"
);
return Ok(self.blockchain.block_hash(first_stage_checkpoint)?)
return Ok(self.blockchain.block_hash(first_stage_checkpoint)?);
}
}
@ -431,7 +431,7 @@ where
Ok(None) => {
// we don't have the block yet and the distance exceeds the allowed
// threshold
return Some(state.finalized_block_hash)
return Some(state.finalized_block_hash);
}
Ok(Some(_)) => {
// we're fully synced to the finalized block
@ -472,7 +472,7 @@ where
) -> Option<B256> {
// check pre merge block error
if insert_err.map(|err| err.is_block_pre_merge()).unwrap_or_default() {
return Some(B256::ZERO)
return Some(B256::ZERO);
}
// If this is sent from new payload then the parent hash could be in a side chain, and is
@ -487,7 +487,7 @@ where
// we need to check if the parent block is the last POW block, if so then the payload is
// the first POS. The engine API spec mandates a zero hash to be returned: <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_newpayloadv1>
if parent_header.difficulty != U256::ZERO {
return Some(B256::ZERO)
return Some(B256::ZERO);
}
// parent is canonical POS block
@ -571,11 +571,11 @@ where
// FCU resulted in a fatal error from which we can't recover
let err = err.clone();
let _ = tx.send(Err(error));
return OnForkchoiceUpdateOutcome::Fatal(err)
return OnForkchoiceUpdateOutcome::Fatal(err);
}
}
let _ = tx.send(Err(error));
return OnForkchoiceUpdateOutcome::Processed
return OnForkchoiceUpdateOutcome::Processed;
}
};
@ -600,7 +600,7 @@ where
if self.sync.has_reached_max_block(tip_number) {
// Terminate the sync early if it's reached the maximum user
// configured block.
return OnForkchoiceUpdateOutcome::ReachedMaxBlock
return OnForkchoiceUpdateOutcome::ReachedMaxBlock;
}
}
ForkchoiceStatus::Syncing => {
@ -629,21 +629,21 @@ where
) -> RethResult<OnForkChoiceUpdated> {
trace!(target: "consensus::engine", ?state, "Received new forkchoice state update");
if state.head_block_hash.is_zero() {
return Ok(OnForkChoiceUpdated::invalid_state())
return Ok(OnForkChoiceUpdated::invalid_state());
}
// check if the new head hash is connected to any ancestor that we previously marked as
// invalid
let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash);
if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu) {
return Ok(OnForkChoiceUpdated::with_invalid(status))
return Ok(OnForkChoiceUpdated::with_invalid(status));
}
if self.sync.is_pipeline_active() {
// We can only process new forkchoice updates if the pipeline is idle, since it requires
// exclusive access to the database
trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update");
return Ok(OnForkChoiceUpdated::syncing())
return Ok(OnForkChoiceUpdated::syncing());
}
if let Some(hook) = self.hooks.active_db_write_hook() {
@ -655,7 +655,7 @@ where
"Hook is in progress, skipping forkchoice update. \
This may affect the performance of your node as a validator."
);
return Ok(OnForkChoiceUpdated::syncing())
return Ok(OnForkChoiceUpdated::syncing());
}
let start = Instant::now();
@ -724,7 +724,7 @@ where
// attributes
if let Some(invalid_fcu_response) = self.ensure_consistent_state(state)? {
trace!(target: "consensus::engine", ?state, head=?state.head_block_hash, "Forkchoice state is inconsistent, returning invalid response");
return Ok(invalid_fcu_response)
return Ok(invalid_fcu_response);
}
// the CL requested to build a new payload on top of this new VALID head
@ -735,7 +735,7 @@ where
);
trace!(target: "consensus::engine", status = ?payload_response, ?state, "Returning forkchoice status");
return Ok(payload_response)
return Ok(payload_response);
}
PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash))
@ -744,7 +744,7 @@ where
if let RethError::Canonical(ref err) = error {
if err.is_fatal() {
tracing::error!(target: "consensus::engine", ?err, "Encountered fatal error");
return Err(error)
return Err(error);
}
}
@ -756,7 +756,7 @@ where
self.ensure_consistent_state_with_status(state, &status)?
{
trace!(target: "consensus::engine", ?status, ?state, "Forkchoice state is inconsistent, returning invalid response");
return Ok(invalid_fcu_response)
return Ok(invalid_fcu_response);
}
trace!(target: "consensus::engine", ?status, ?state, "Returning forkchoice status");
@ -812,7 +812,7 @@ where
// we likely do not have the finalized or safe blocks, and would return an incorrect
// INVALID status instead.
if status.is_valid() {
return self.ensure_consistent_state(state)
return self.ensure_consistent_state(state);
}
Ok(None)
@ -838,7 +838,7 @@ where
if !state.finalized_block_hash.is_zero() &&
!self.blockchain.is_canonical(state.finalized_block_hash)?
{
return Ok(Some(OnForkChoiceUpdated::invalid_state()))
return Ok(Some(OnForkChoiceUpdated::invalid_state()));
}
// Finalized block is consistent, so update it in the canon chain tracker.
@ -852,7 +852,7 @@ where
if !state.safe_block_hash.is_zero() &&
!self.blockchain.is_canonical(state.safe_block_hash)?
{
return Ok(Some(OnForkChoiceUpdated::invalid_state()))
return Ok(Some(OnForkChoiceUpdated::invalid_state()));
}
// Safe block is consistent, so update it in the canon chain tracker.
@ -913,7 +913,7 @@ where
if !safe_block_hash.is_zero() {
if self.blockchain.safe_block_hash()? == Some(safe_block_hash) {
// nothing to update
return Ok(())
return Ok(());
}
let safe =
@ -933,7 +933,7 @@ where
if !finalized_block_hash.is_zero() {
if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) {
// nothing to update
return Ok(())
return Ok(());
}
let finalized = self
@ -967,7 +967,7 @@ where
if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash) {
warn!(target: "consensus::engine", ?error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid");
debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=?error, "Head was previously marked as invalid");
return invalid_ancestor
return invalid_ancestor;
}
#[allow(clippy::single_match)]
@ -979,7 +979,7 @@ where
return PayloadStatus::from_status(PayloadStatusEnum::Invalid {
validation_error: error.to_string(),
})
.with_latest_valid_hash(B256::ZERO)
.with_latest_valid_hash(B256::ZERO);
}
RethError::Canonical(CanonicalError::BlockchainTree(
BlockchainTreeError::BlockHashNotFoundInChain { .. },
@ -1063,7 +1063,7 @@ where
// begin a payload build process. In such an event, the forkchoiceState update MUST NOT
// be rolled back.
if attrs.timestamp <= head.timestamp {
return OnForkChoiceUpdated::invalid_payload_attributes()
return OnForkChoiceUpdated::invalid_payload_attributes();
}
// 8. Client software MUST begin a payload build process building on top of
@ -1130,7 +1130,7 @@ where
if let Some(status) =
self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash)
{
return Ok(status)
return Ok(status);
}
let res = if self.sync.is_pipeline_idle() {
@ -1215,7 +1215,7 @@ where
}
let status = PayloadStatusEnum::from(error);
return Err(PayloadStatus::new(status, latest_valid_hash))
return Err(PayloadStatus::new(status, latest_valid_hash));
}
};
@ -1270,7 +1270,7 @@ where
let latest_valid_hash =
self.latest_valid_hash_for_invalid_payload(parent_hash, None);
let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes);
return Err(PayloadStatus::new(status, latest_valid_hash))
return Err(PayloadStatus::new(status, latest_valid_hash));
}
// we can use `zip` safely here because we already compared their length
@ -1282,7 +1282,7 @@ where
let latest_valid_hash =
self.latest_valid_hash_for_invalid_payload(parent_hash, None);
let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes);
return Err(PayloadStatus::new(status, latest_valid_hash))
return Err(PayloadStatus::new(status, latest_valid_hash));
}
}
} else if !block_versioned_hashes.is_empty() {
@ -1290,7 +1290,7 @@ where
// provided in the new payload call, so the payload is invalid
let latest_valid_hash = self.latest_valid_hash_for_invalid_payload(parent_hash, None);
let status = PayloadStatusEnum::from(PayloadError::InvalidVersionedHashes);
return Err(PayloadStatus::new(status, latest_valid_hash))
return Err(PayloadStatus::new(status, latest_valid_hash));
}
Ok(())
@ -1346,7 +1346,7 @@ where
if let Some(status) =
self.check_invalid_ancestor_with_head(block.parent_hash, block.hash)
{
return Ok(status)
return Ok(status);
}
// not known to be invalid, but we don't know anything else
@ -1445,7 +1445,7 @@ where
// check if the block's parent is already marked as invalid
if self.check_invalid_ancestor_with_head(block.parent_hash, block.hash).is_some() {
// can skip this invalid block
return
return;
}
match self
@ -1511,7 +1511,7 @@ where
// threshold
self.sync.set_pipeline_sync_target(target);
// we can exit early here because the pipeline will take care of syncing
return
return;
}
// continue downloading the missing parent
@ -1614,7 +1614,7 @@ where
}
EngineSyncEvent::PipelineTaskDropped => {
error!(target: "consensus::engine", "Failed to receive spawned pipeline");
return Some(Err(BeaconConsensusEngineError::PipelineChannelClosed))
return Some(Err(BeaconConsensusEngineError::PipelineChannelClosed));
}
EngineSyncEvent::PipelineFinished { result, reached_max_block } => {
return self.on_pipeline_finished(result, reached_max_block)
@ -1644,7 +1644,7 @@ where
if reached_max_block {
// Terminate the sync early if it's reached the maximum user
// configured block.
return Some(Ok(()))
return Some(Ok(()));
}
if let ControlFlow::Unwind { bad_block, .. } = ctrl {
@ -1652,7 +1652,7 @@ where
// update the `invalid_headers` cache with the new invalid headers
self.invalid_headers.insert(*bad_block);
return None
return None;
}
// update the canon chain if continuous is enabled
@ -1670,7 +1670,7 @@ where
},
Err(error) => {
error!(target: "consensus::engine", ?error, "Error getting canonical header for continuous sync");
return Some(Err(RethError::Provider(error).into()))
return Some(Err(RethError::Provider(error).into()));
}
};
self.blockchain.set_canonical_head(max_header);
@ -1682,7 +1682,7 @@ where
// This is only possible if the node was run with `debug.tip`
// argument and without CL.
warn!(target: "consensus::engine", "No fork choice state available");
return None
return None;
}
};
@ -1752,7 +1752,7 @@ where
}
Err(error) => {
error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state");
return Some(Err(error.into()))
return Some(Err(error.into()));
}
};
}
@ -1790,7 +1790,7 @@ where
self.blockchain.connect_buffered_blocks_to_canonical_hashes()
{
error!(target: "consensus::engine", ?error, "Error connecting buffered blocks to canonical hashes on hook result");
return Err(error.into())
return Err(error.into());
}
}
}
@ -1843,7 +1843,7 @@ where
},
)? {
this.on_hook_result(result)?;
continue
continue;
}
// Process one incoming message from the CL. We don't drain the messages right away,
@ -1858,11 +1858,11 @@ where
OnForkchoiceUpdateOutcome::Processed => {}
OnForkchoiceUpdateOutcome::ReachedMaxBlock => {
// reached the max block, we can terminate the future
return Poll::Ready(Ok(()))
return Poll::Ready(Ok(()));
}
OnForkchoiceUpdateOutcome::Fatal(err) => {
// fatal error, we can terminate the future
return Poll::Ready(Err(RethError::Execution(err).into()))
return Poll::Ready(Err(RethError::Execution(err).into()));
}
}
}
@ -1878,23 +1878,23 @@ where
this.listeners.push_listener(tx);
}
}
continue
continue;
}
// Both running hook with db write access and engine messages are pending,
// proceed to other polls
break
break;
}
// process sync events if any
match this.sync.poll(cx) {
Poll::Ready(sync_event) => {
if let Some(res) = this.on_sync_event(sync_event) {
return Poll::Ready(res)
return Poll::Ready(res);
}
// this could have taken a while, so we start the next cycle to handle any new
// engine messages
continue 'main
continue 'main;
}
Poll::Pending => {
// no more sync events to process
@ -1922,13 +1922,13 @@ where
// ensure we're polling until pending while also checking for new engine
// messages before polling the next hook
continue 'main
continue 'main;
}
}
// incoming engine messages and sync events are drained, so we can yield back
// control
return Poll::Pending
return Poll::Pending;
}
}
}
@ -2040,7 +2040,7 @@ mod tests {
result,
Err(BeaconConsensusEngineError::Pipeline(n)) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed))
);
break
break;
}
Err(TryRecvError::Empty) => {
let _ = env
@ -2133,7 +2133,14 @@ mod tests {
let factory = ProviderFactory::new(db, chain);
let provider = factory.provider_rw().unwrap();
blocks
.try_for_each(|b| provider.insert_block(b.clone(), None, None).map(|_| ()))
.try_for_each(|b| {
provider
.insert_block(
b.clone().try_seal_with_senders().expect("invalid tx signature in block"),
None,
)
.map(|_| ())
})
.expect("failed to insert");
provider.commit().unwrap();
}

View File

@ -492,6 +492,7 @@ mod tests {
use alloy_rlp::Decodable;
use assert_matches::assert_matches;
use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db};
use reth_interfaces::executor::BlockValidationError;
use reth_primitives::{
address, hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode,
ChainSpecBuilder, PruneModes, SealedBlock, StorageEntry, B256, MAINNET, U256,
@ -551,8 +552,16 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider
.insert_block(
genesis
.try_seal_with_senders()
.map_err(|_| BlockValidationError::SenderRecoveryError)
.unwrap(),
None,
)
.unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
let previous_stage_checkpoint = ExecutionCheckpoint {
@ -587,8 +596,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
let previous_stage_checkpoint = ExecutionCheckpoint {
@ -623,8 +632,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
let previous_checkpoint = StageCheckpoint { block_number: 1, stage_checkpoint: None };
@ -653,8 +662,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
// insert pre state
@ -759,8 +768,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
// variables
@ -831,8 +840,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider.insert_block(genesis, None, None).unwrap();
provider.insert_block(block.clone(), None, None).unwrap();
provider.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
provider.commit().unwrap();
// variables

View File

@ -95,7 +95,7 @@ impl AccountHashingStage {
let blocks = random_block_range(&mut rng, opts.blocks.clone(), B256::ZERO, opts.txs);
for block in blocks {
provider.insert_block(block, None, None).unwrap();
provider.insert_block(block.try_seal_with_senders().unwrap(), None).unwrap();
}
let mut accounts = random_eoa_account_range(&mut rng, opts.accounts);
{
@ -138,7 +138,7 @@ impl<DB: Database> Stage<DB> for AccountHashingStage {
input: ExecInput,
) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
return Ok(ExecOutput::done(input.checkpoint()));
}
let (from_block, to_block) = input.next_block_range().into_inner();
@ -238,7 +238,7 @@ impl<DB: Database> Stage<DB> for AccountHashingStage {
},
);
return Ok(ExecOutput { checkpoint, done: false })
return Ok(ExecOutput { checkpoint, done: false });
}
} else {
// Aggregate all transition changesets and make a list of accounts that have been
@ -549,7 +549,7 @@ mod tests {
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
return Ok(());
}
}
self.check_hashed_accounts()

View File

@ -77,8 +77,8 @@ mod tests {
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::decode(&mut block_rlp).unwrap();
provider_rw.insert_block(genesis, None, None).unwrap();
provider_rw.insert_block(block.clone(), None, None).unwrap();
provider_rw.insert_block(genesis.try_seal_with_senders().unwrap(), None).unwrap();
provider_rw.insert_block(block.clone().try_seal_with_senders().unwrap(), None).unwrap();
// Fill with bogus blocks to respect PruneMode distance.
let mut head = block.hash;
@ -86,7 +86,7 @@ mod tests {
for block_number in 2..=tip {
let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0));
head = nblock.hash;
provider_rw.insert_block(nblock, None, None).unwrap();
provider_rw.insert_block(nblock.try_seal_with_senders().unwrap(), None).unwrap();
}
provider_rw.commit().unwrap();

View File

@ -59,7 +59,6 @@ pub(crate) enum Action {
InsertBlockBodyIndices,
InsertTransactionBlock,
RecoverSigners,
GetNextTxNum,
GetParentTD,
}
@ -86,7 +85,6 @@ impl Action {
Action::InsertBlockWithdrawals => "insert block withdrawals",
Action::InsertBlockBodyIndices => "insert block body indices",
Action::InsertTransactionBlock => "insert transaction block",
Action::RecoverSigners => "recover signers",
Action::GetNextTxNum => "get next tx num",
Action::GetParentTD => "get parent TD",
}

View File

@ -141,7 +141,7 @@ impl<DB: Database> ProviderFactory<DB> {
if block_number == provider.best_block_number().unwrap_or_default() &&
block_number == provider.last_block_number().unwrap_or_default()
{
return Ok(Box::new(LatestStateProvider::new(provider.into_tx())))
return Ok(Box::new(LatestStateProvider::new(provider.into_tx())));
}
// +1 as the changeset that we want is the one that was applied after this block.
@ -566,7 +566,10 @@ mod tests {
{
let provider = factory.provider_rw().unwrap();
assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_));
assert_matches!(
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None),
Ok(_)
);
assert_matches!(
provider.transaction_sender(0), Ok(Some(sender))
if sender == block.body[0].recover_signer().unwrap()
@ -578,8 +581,7 @@ mod tests {
let provider = factory.provider_rw().unwrap();
assert_matches!(
provider.insert_block(
block.clone(),
None,
block.clone().try_seal_with_senders().unwrap(),
Some(&PruneModes {
sender_recovery: Some(PruneMode::Full),
transaction_lookup: Some(PruneMode::Full),
@ -604,7 +606,10 @@ mod tests {
for range in tx_ranges {
let provider = factory.provider_rw().unwrap();
assert_matches!(provider.insert_block(block.clone(), None, None), Ok(_));
assert_matches!(
provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None),
Ok(_)
);
let senders = provider.get_or_take::<tables::TxSenders, true>(range.clone());
assert_eq!(

View File

@ -2150,8 +2150,7 @@ impl<TX: DbTxMut + DbTx> BlockExecutionWriter for DatabaseProvider<TX> {
impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
fn insert_block(
&self,
block: SealedBlock,
senders: Option<Vec<Address>>,
block: SealedBlockWithSenders,
prune_modes: Option<&PruneModes>,
) -> ProviderResult<StoredBlockBodyIndices> {
let block_number = block.number;
@ -2185,7 +2184,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
if !block.ommers.is_empty() {
self.tx.put::<tables::BlockOmmers>(
block_number,
StoredBlockOmmers { ommers: block.ommers },
StoredBlockOmmers { ommers: block.block.ommers },
)?;
durations_recorder.record_relative(metrics::Action::InsertBlockOmmers);
}
@ -2199,29 +2198,14 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
durations_recorder.record_relative(metrics::Action::GetNextTxNum);
let first_tx_num = next_tx_num;
let tx_count = block.body.len() as u64;
let tx_count = block.block.body.len() as u64;
// Ensures we have all the senders for the block's transactions.
let senders = match senders {
Some(senders) if block.body.len() == senders.len() => {
// senders have the correct length as transactions in the block
senders
}
_ => {
// recover senders from transactions
let senders = TransactionSigned::recover_signers(&block.body, block.body.len())
.ok_or(ProviderError::SenderRecoveryError)?;
durations_recorder.record_relative(metrics::Action::RecoverSigners);
debug_assert_eq!(senders.len(), block.body.len(), "missing one or more senders");
senders
}
};
let mut tx_senders_elapsed = Duration::default();
let mut transactions_elapsed = Duration::default();
let mut tx_hash_numbers_elapsed = Duration::default();
for (transaction, sender) in block.body.into_iter().zip(senders) {
for (transaction, sender) in block.block.body.into_iter().zip(block.senders.iter()) {
let hash = transaction.hash();
if prune_modes
@ -2230,7 +2214,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
.is_none()
{
let start = Instant::now();
self.tx.put::<tables::TxSenders>(next_tx_num, sender)?;
self.tx.put::<tables::TxSenders>(next_tx_num, *sender)?;
tx_senders_elapsed += start.elapsed();
}
@ -2266,7 +2250,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
durations_recorder
.record_duration(metrics::Action::InsertTxHashNumbers, tx_hash_numbers_elapsed);
if let Some(withdrawals) = block.withdrawals {
if let Some(withdrawals) = block.block.withdrawals {
if !withdrawals.is_empty() {
self.tx.put::<tables::BlockWithdrawals>(
block_number,
@ -2317,8 +2301,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
// Insert the blocks
for block in blocks {
let (block, senders) = block.into_components();
self.insert_block(block, Some(senders), prune_modes)?;
self.insert_block(block, prune_modes)?;
durations_recorder.record_relative(metrics::Action::InsertBlock);
}

View File

@ -6,9 +6,8 @@ use auto_impl::auto_impl;
use reth_db::models::StoredBlockBodyIndices;
use reth_interfaces::provider::ProviderResult;
use reth_primitives::{
Address, Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders,
ChainSpec, Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader,
B256,
Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, ChainSpec,
Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256,
};
use reth_trie::{hashed_cursor::HashedPostState, updates::TrieUpdates};
use std::ops::RangeInclusive;
@ -293,8 +292,7 @@ pub trait BlockWriter: Send + Sync {
/// transition in the block.
fn insert_block(
&self,
block: SealedBlock,
senders: Option<Vec<Address>>,
block: SealedBlockWithSenders,
prune_modes: Option<&PruneModes>,
) -> ProviderResult<StoredBlockBodyIndices>;

View File

@ -87,8 +87,9 @@ impl Case for BlockchainTestCase {
SealedBlock::new(
case.genesis_block_header.clone().into(),
BlockBody::default(),
),
None,
)
.try_seal_with_senders()
.unwrap(),
None,
)
.map_err(|err| Error::RethError(err.into()))?;
@ -98,7 +99,7 @@ impl Case for BlockchainTestCase {
let last_block = case.blocks.iter().try_fold(None, |_, block| {
let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?;
provider
.insert_block(decoded.clone(), None, None)
.insert_block(decoded.clone().try_seal_with_senders().unwrap(), None)
.map_err(|err| Error::RethError(err.into()))?;
Ok::<Option<SealedBlock>, Error>(Some(decoded))
})?;