refactor(consensus, evm): move post-execution validation to consensus (#8321)

This commit is contained in:
Alexey Shekhirin
2024-05-22 18:20:14 +01:00
committed by GitHub
parent 90713300bf
commit f45ca74772
52 changed files with 424 additions and 346 deletions

4
Cargo.lock generated
View File

@ -6973,20 +6973,22 @@ name = "reth-evm-ethereum"
version = "0.2.0-beta.7"
dependencies = [
"alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)",
"reth-ethereum-consensus",
"reth-evm",
"reth-interfaces",
"reth-primitives",
"reth-revm",
"revm-primitives",
"tracing",
]
[[package]]
name = "reth-evm-optimism"
version = "0.2.0-beta.7"
dependencies = [
"reth-consensus-common",
"reth-evm",
"reth-interfaces",
"reth-optimism-consensus",
"reth-primitives",
"reth-provider",
"reth-revm",

View File

@ -298,7 +298,7 @@ impl Command {
consensus.validate_header_with_total_difficulty(block, U256::MAX)?;
consensus.validate_header(block)?;
consensus.validate_block(block)?;
consensus.validate_block_pre_execution(block)?;
let senders = block.senders().expect("sender recovery failed");
let block_with_senders =

View File

@ -197,7 +197,7 @@ impl Command {
)),
PruneModes::none(),
);
executor.execute_one((&sealed_block.clone().unseal(), td).into())?;
executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?;
let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize();
BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage(
provider_rw.tx_ref(),

View File

@ -730,7 +730,7 @@ where
return Err(e)
}
if let Err(e) = self.externals.consensus.validate_block(block) {
if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) {
error!(?block, "Failed to validate block {}: {e}", block.header.hash());
return Err(e)
}

View File

@ -210,8 +210,11 @@ impl AppendableChain {
let executor = externals.executor_factory.executor(db);
let block_hash = block.hash();
let block = block.unseal();
let state = executor.execute((&block, U256::MAX).into())?;
let BlockExecutionOutput { state, receipts, .. } = state;
externals.consensus.validate_block_post_execution(&block, &receipts)?;
let bundle_state = BundleStateWithReceipts::new(
state,
Receipts::from_block_receipt(receipts),

View File

@ -57,7 +57,7 @@ impl Config {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("reth config file extension must be '{EXTENSION}'"),
));
))
}
confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
}

View File

@ -22,8 +22,9 @@ use reth_interfaces::executor::{BlockExecutionError, BlockValidationError};
use reth_primitives::{
constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT},
eip4844::calculate_excess_blob_gas,
proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header,
Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256,
proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders,
ChainSpec, Header, Receipt, Receipts, SealedBlock, SealedHeader, TransactionSigned,
Withdrawals, B256, U256,
};
use reth_provider::{
BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory,
@ -84,7 +85,15 @@ impl Consensus for AutoSealConsensus {
Ok(())
}
fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> {
fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> {
Ok(())
}
fn validate_block_post_execution(
&self,
_block: &BlockWithSenders,
_receipts: &[Receipt],
) -> Result<(), ConsensusError> {
Ok(())
}
}
@ -361,7 +370,7 @@ impl StorageInner {
let header =
self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec);
let mut block = Block {
let block = Block {
header,
body: transactions,
ommers: ommers.clone(),
@ -376,27 +385,7 @@ impl StorageInner {
provider.latest().map_err(BlockExecutionError::LatestBlock)?,
);
// TODO(mattsse): At this point we don't know certain fields of the header, so we first
// execute it and then update the header this can be improved by changing the executor
// input, for now we intercept the errors and retry
loop {
match executor.executor(&mut db).execute((&block, U256::ZERO).into()) {
Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed {
gas,
..
})) => {
block.block.header.gas_used = gas.got;
}
Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff(
err,
))) => {
block.block.header.receipts_root = err.got;
}
_ => break,
};
}
// now execute the block
// execute the block
let BlockExecutionOutput { state, receipts, .. } =
executor.executor(&mut db).execute((&block, U256::ZERO).into())?;
let bundle_state = BundleStateWithReceipts::new(

View File

@ -61,7 +61,7 @@ pub fn validate_header_standalone(
/// - Compares the transactions root in the block header to the block body
/// - Pre-execution transaction validation
/// - (Optionally) Compares the receipts root in the block header to the block body
pub fn validate_block_standalone(
pub fn validate_block_pre_execution(
block: &SealedBlock,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
@ -366,13 +366,13 @@ mod tests {
// Single withdrawal
let block = create_block_with_withdrawals(&[1]);
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(()));
// Multiple increasing withdrawals
let block = create_block_with_withdrawals(&[1, 2, 3]);
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(()));
let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]);
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(()));
let (_, parent) = mock_block();
// Withdrawal index should be the last withdrawal index + 1
@ -428,7 +428,7 @@ mod tests {
// validate blob, it should fail blob gas used validation
assert_eq!(
validate_block_standalone(&block, &chain_spec),
validate_block_pre_execution(&block, &chain_spec),
Err(ConsensusError::BlobGasUsedDiff(GotExpected {
got: 1,
expected: expected_blob_gas_used

View File

@ -18,4 +18,4 @@ auto_impl.workspace = true
thiserror.workspace = true
[features]
test-utils = []
test-utils = []

View File

@ -9,8 +9,8 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use reth_primitives::{
BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError,
InvalidTransactionError, SealedBlock, SealedHeader, B256, U256,
BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header,
HeaderValidationError, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, B256, U256,
};
use std::fmt::Debug;
@ -83,7 +83,19 @@ pub trait Consensus: Debug + Send + Sync {
/// **This should not be called for the genesis block**.
///
/// Note: validating blocks does not include other validations of the Consensus
fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError>;
fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>;
/// Validate a block considering world state, i.e. things that can not be checked before
/// execution.
///
/// See the Yellow Paper sections 4.3.2 "Holistic Validity".
///
/// Note: validating blocks does not include other validations of the Consensus
fn validate_block_post_execution(
&self,
block: &BlockWithSenders,
receipts: &[Receipt],
) -> Result<(), ConsensusError>;
}
/// Consensus Errors
@ -98,6 +110,15 @@ pub enum ConsensusError {
gas_limit: u64,
},
/// Error when block gas used doesn't match expected value
#[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")]
BlockGasUsed {
/// The gas diff.
gas: GotExpected<u64>,
/// Gas spent by each transaction
gas_spent_by_tx: Vec<(u64, u64)>,
},
/// Error when the hash of block ommer is different from the expected hash.
#[error("mismatched block ommer hash: {0}")]
BodyOmmersHashDiff(GotExpectedBoxed<B256>),
@ -111,6 +132,14 @@ pub enum ConsensusError {
#[error("mismatched block transaction root: {0}")]
BodyTransactionRootDiff(GotExpectedBoxed<B256>),
/// Error when the receipt root in the block is different from the expected receipt root.
#[error("receipt root mismatch: {0}")]
BodyReceiptRootDiff(GotExpectedBoxed<B256>),
/// Error when header bloom filter is different from the expected bloom filter.
#[error("header bloom filter mismatch: {0}")]
BodyBloomLogDiff(GotExpectedBoxed<Bloom>),
/// Error when the withdrawals root in the block is different from the expected withdrawals
/// root.
#[error("mismatched block withdrawals root: {0}")]

View File

@ -1,5 +1,5 @@
use crate::{Consensus, ConsensusError};
use reth_primitives::{Header, SealedBlock, SealedHeader, U256};
use reth_primitives::{BlockWithSenders, Header, Receipt, SealedBlock, SealedHeader, U256};
use std::sync::atomic::{AtomicBool, Ordering};
/// Consensus engine implementation for testing
@ -60,7 +60,19 @@ impl Consensus for TestConsensus {
}
}
fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> {
fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
fn validate_block_post_execution(
&self,
_block: &BlockWithSenders,
_receipts: &[Receipt],
) -> Result<(), ConsensusError> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {

View File

@ -50,9 +50,9 @@ impl<E: EngineTypes + 'static> PayloadTestContext<E> {
let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap();
if payload.block().body.is_empty() {
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
continue;
continue
}
break;
break
}
}

View File

@ -17,4 +17,4 @@ reth-primitives.workspace = true
reth-consensus.workspace = true
[features]
optimism = ["reth-primitives/optimism"]
optimism = ["reth-primitives/optimism"]

View File

@ -9,12 +9,18 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use reth_consensus::{Consensus, ConsensusError};
use reth_consensus_common::validation;
use reth_consensus_common::validation::{
validate_block_pre_execution, validate_header_extradata, validate_header_standalone,
};
use reth_primitives::{
Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256,
BlockWithSenders, Chain, ChainSpec, Hardfork, Header, Receipt, SealedBlock, SealedHeader,
EMPTY_OMMER_ROOT_HASH, U256,
};
use std::{sync::Arc, time::SystemTime};
mod validation;
pub use validation::validate_block_post_execution;
/// Ethereum beacon consensus
///
/// This consensus engine does basic checks as outlined in the execution specs.
@ -33,7 +39,7 @@ impl EthBeaconConsensus {
impl Consensus for EthBeaconConsensus {
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
validation::validate_header_standalone(header, &self.chain_spec)?;
validate_header_standalone(header, &self.chain_spec)?;
Ok(())
}
@ -87,7 +93,7 @@ impl Consensus for EthBeaconConsensus {
// is greater than its parent timestamp.
// validate header extradata for all networks post merge
validation::validate_header_extradata(header)?;
validate_header_extradata(header)?;
// mixHash is used instead of difficulty inside EVM
// https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty
@ -111,14 +117,22 @@ impl Consensus for EthBeaconConsensus {
// * If the network is goerli pre-merge, ignore the extradata check, since we do not
// support clique. Same goes for OP blocks below Bedrock.
if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() {
validation::validate_header_extradata(header)?;
validate_header_extradata(header)?;
}
}
Ok(())
}
fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> {
validation::validate_block_standalone(block, &self.chain_spec)
fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> {
validate_block_pre_execution(block, &self.chain_spec)
}
fn validate_block_post_execution(
&self,
block: &BlockWithSenders,
receipts: &[Receipt],
) -> Result<(), ConsensusError> {
validate_block_post_execution(block, &self.chain_spec, receipts)
}
}

View File

@ -0,0 +1,82 @@
use reth_consensus::ConsensusError;
use reth_primitives::{
gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt,
ReceiptWithBloom, B256,
};
/// Validate a block with regard to execution results:
///
/// - Compares the receipts root in the block header to the block body
/// - Compares the gas used in the block header to the actual gas usage after execution
pub fn validate_block_post_execution(
block: &BlockWithSenders,
chain_spec: &ChainSpec,
receipts: &[Receipt],
) -> Result<(), ConsensusError> {
// Before Byzantium, receipts contained state root that would mean that expensive
// operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if chain_spec.is_byzantium_active_at_block(block.header.number) {
verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts.iter())?;
}
// Check if gas used matches the value set in header.
let cumulative_gas_used =
receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0);
if block.gas_used != cumulative_gas_used {
return Err(ConsensusError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used },
gas_spent_by_tx: gas_spent_by_transactions(receipts),
})
}
Ok(())
}
/// Calculate the receipts root, and compare it against against the expected receipts root and logs
/// bloom.
fn verify_receipts<'a>(
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
receipts: impl Iterator<Item = &'a Receipt> + Clone,
) -> Result<(), ConsensusError> {
// Calculate receipts root.
let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::<Vec<ReceiptWithBloom>>();
let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom);
// Create header log bloom.
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom);
compare_receipts_root_and_logs_bloom(
receipts_root,
logs_bloom,
expected_receipts_root,
expected_logs_bloom,
)?;
Ok(())
}
/// Compare the calculated receipts root with the expected receipts root, also compare
/// the calculated logs bloom with the expected logs bloom.
fn compare_receipts_root_and_logs_bloom(
calculated_receipts_root: B256,
calculated_logs_bloom: Bloom,
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
) -> Result<(), ConsensusError> {
if calculated_receipts_root != expected_receipts_root {
return Err(ConsensusError::BodyReceiptRootDiff(
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
))
}
if calculated_logs_bloom != expected_logs_bloom {
return Err(ConsensusError::BodyBloomLogDiff(
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
))
}
Ok(())
}

View File

@ -16,13 +16,11 @@ reth-evm.workspace = true
reth-primitives.workspace = true
reth-revm.workspace = true
reth-interfaces.workspace = true
reth-ethereum-consensus.workspace = true
# Ethereum
revm-primitives.workspace = true
# misc
tracing.workspace = true
[dev-dependencies]
reth-revm = { workspace = true, features = ["test-utils"] }
alloy-eips.workspace = true

View File

@ -2,9 +2,9 @@
use crate::{
dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS},
verify::verify_receipts,
EthEvmConfig,
};
use reth_ethereum_consensus::validate_block_post_execution;
use reth_evm::{
execute::{
BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput,
@ -17,8 +17,8 @@ use reth_interfaces::{
provider::ProviderError,
};
use reth_primitives::{
BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt,
Receipts, Withdrawals, MAINNET, U256,
BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Withdrawals,
MAINNET, U256,
};
use reth_revm::{
batch::{BlockBatchRecord, BlockExecutorStats},
@ -31,7 +31,6 @@ use revm_primitives::{
BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState,
};
use std::sync::Arc;
use tracing::debug;
/// Provides executors to execute regular ethereum blocks
#[derive(Debug, Clone)]
@ -187,16 +186,6 @@ where
}
drop(evm);
// Check if gas used matches the value set in header.
if block.gas_used != cumulative_gas_used {
let receipts = Receipts::from_block_receipt(receipts);
return Err(BlockValidationError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used },
gas_spent_by_tx: receipts.gas_spent_by_tx()?,
}
.into())
}
Ok((receipts, cumulative_gas_used))
}
}
@ -260,8 +249,8 @@ where
///
/// Returns the receipts of the transactions in the block and the total gas used.
///
/// Returns an error if execution fails or receipt verification fails.
fn execute_and_verify(
/// Returns an error if execution fails.
fn execute_without_verification(
&mut self,
block: &BlockWithSenders,
total_difficulty: U256,
@ -280,21 +269,6 @@ where
// 3. apply post execution changes
self.post_execution(block, total_difficulty)?;
// Before Byzantium, receipts contained state root that would mean that expensive
// operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if self.chain_spec().is_byzantium_active_at_block(block.header.number) {
if let Err(error) = verify_receipts(
block.header.receipts_root,
block.header.logs_bloom,
receipts.iter(),
) {
debug!(target: "evm", %error, ?receipts, "receipts verification failed");
return Err(error)
};
}
Ok((receipts, gas_used))
}
@ -363,7 +337,7 @@ where
/// State changes are committed to the database.
fn execute(mut self, input: Self::Input<'_>) -> Result<Self::Output, Self::Error> {
let BlockExecutionInput { block, total_difficulty } = input;
let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?;
let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?;
// NOTE: we need to merge keep the reverts for the bundle retention
self.state.merge_transitions(BundleRetention::Reverts);
@ -403,9 +377,12 @@ where
type Output = BatchBlockExecutionOutput;
type Error = BlockExecutionError;
fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
let BlockExecutionInput { block, total_difficulty } = input;
let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?;
let (receipts, _gas_used) =
self.executor.execute_without_verification(block, total_difficulty)?;
validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?;
// prepare the state according to the prune mode
let retention = self.batch_record.bundle_retention(block.number);
@ -523,7 +500,7 @@ mod tests {
// Now execute a block with the fixed header, ensure that it does not fail
executor
.execute_and_verify(
.execute_without_verification(
&BlockWithSenders {
block: Block {
header: header.clone(),
@ -634,7 +611,7 @@ mod tests {
// attempt to execute an empty block with parent beacon block root, this should not fail
executor
.execute_and_verify(
.execute_without_verification(
&BlockWithSenders {
block: Block { header, body: vec![], ommers: vec![], withdrawals: None },
senders: vec![],
@ -672,7 +649,7 @@ mod tests {
// attempt to execute the genesis block with non-zero parent beacon block root, expect err
header.parent_beacon_block_root = Some(B256::with_last_byte(0x69));
let _err = executor
.execute_one(
.execute_and_verify_one(
(
&BlockWithSenders {
block: Block {
@ -698,7 +675,7 @@ mod tests {
// now try to process the genesis block again, this time ensuring that a system contract
// call does not occur
executor
.execute_one(
.execute_and_verify_one(
(
&BlockWithSenders {
block: Block { header, body: vec![], ommers: vec![], withdrawals: None },
@ -752,7 +729,7 @@ mod tests {
// Now execute a block with the fixed header, ensure that it does not fail
executor
.execute_one(
.execute_and_verify_one(
(
&BlockWithSenders {
block: Block {

View File

@ -16,7 +16,6 @@ use reth_primitives::{
};
use reth_revm::{Database, EvmBuilder};
pub mod execute;
pub mod verify;
/// Ethereum DAO hardfork state change data.
pub mod dao_fork;

View File

@ -1,53 +0,0 @@
//! Helpers for verifying the receipts.
use reth_interfaces::executor::{BlockExecutionError, BlockValidationError};
use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256};
/// Calculate the receipts root, and compare it against against the expected receipts root and logs
/// bloom.
pub fn verify_receipts<'a>(
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
receipts: impl Iterator<Item = &'a Receipt> + Clone,
) -> Result<(), BlockExecutionError> {
// Calculate receipts root.
let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::<Vec<ReceiptWithBloom>>();
let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom);
// Create header log bloom.
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom);
compare_receipts_root_and_logs_bloom(
receipts_root,
logs_bloom,
expected_receipts_root,
expected_logs_bloom,
)?;
Ok(())
}
/// Compare the calculated receipts root with the expected receipts root, also compare
/// the calculated logs bloom with the expected logs bloom.
pub fn compare_receipts_root_and_logs_bloom(
calculated_receipts_root: B256,
calculated_logs_bloom: Bloom,
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
) -> Result<(), BlockExecutionError> {
if calculated_receipts_root != expected_receipts_root {
return Err(BlockValidationError::ReceiptRootDiff(
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
)
.into())
}
if calculated_logs_bloom != expected_logs_bloom {
return Err(BlockValidationError::BloomLogDiff(
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
)
.into())
}
Ok(())
}

View File

@ -89,10 +89,10 @@ where
type Output = BatchBlockExecutionOutput;
type Error = BlockExecutionError;
fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
match self {
Either::Left(a) => a.execute_one(input),
Either::Right(b) => b.execute_one(input),
Either::Left(a) => a.execute_and_verify_one(input),
Either::Right(b) => b.execute_and_verify_one(input),
}
}

View File

@ -5,8 +5,10 @@ use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receip
use revm::db::BundleState;
use revm_primitives::db::Database;
/// A general purpose executor trait that executes on an input (e.g. blocks) and produces an output
/// A general purpose executor trait that executes an input (e.g. block) and produces an output
/// (e.g. state changes and receipts).
///
/// This executor does not validate the output, see [BatchExecutor] for that.
pub trait Executor<DB> {
/// The input type for the executor.
type Input<'a>;
@ -17,12 +19,17 @@ pub trait Executor<DB> {
/// Consumes the type and executes the block.
///
/// Returns the output of the block execution.
/// # Note
/// Execution happens without any validation of the output. To validate the output, use the
/// [BatchExecutor].
///
/// # Returns
/// The output of the block execution.
fn execute(self, input: Self::Input<'_>) -> Result<Self::Output, Self::Error>;
}
/// A general purpose executor that can execute multiple inputs in sequence and keep track of the
/// state over the entire batch.
/// A general purpose executor that can execute multiple inputs in sequence, validate the outputs,
/// and keep track of the state over the entire batch.
pub trait BatchExecutor<DB> {
/// The input type for the executor.
type Input<'a>;
@ -31,27 +38,34 @@ pub trait BatchExecutor<DB> {
/// The error type returned by the executor.
type Error;
/// Executes the next block in the batch and update the state internally.
fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>;
/// Executes the next block in the batch, verifies the output and updates the state internally.
fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>;
/// Executes multiple inputs in the batch and update the state internally.
fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error>
/// Executes multiple inputs in the batch, verifies the output, and updates the state
/// internally.
///
/// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_one`]
/// for each input.
fn execute_and_verify_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error>
where
I: IntoIterator<Item = Self::Input<'a>>,
{
for input in inputs {
self.execute_one(input)?;
self.execute_and_verify_one(input)?;
}
Ok(())
}
/// Executes the entire batch and return the final state.
fn execute_batch<'a, I>(mut self, batch: I) -> Result<Self::Output, Self::Error>
/// Executes the entire batch, verifies the output, and returns the final state.
///
/// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_many`]
/// and [`BatchExecutor::finalize`].
fn execute_and_verify_batch<'a, I>(mut self, batch: I) -> Result<Self::Output, Self::Error>
where
I: IntoIterator<Item = Self::Input<'a>>,
Self: Sized,
{
self.execute_many(batch)?;
self.execute_and_verify_many(batch)?;
Ok(self.finalize())
}
@ -222,7 +236,7 @@ mod tests {
type Output = BatchBlockExecutionOutput;
type Error = BlockExecutionError;
fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> {
fn execute_and_verify_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> {
Ok(())
}

View File

@ -64,7 +64,7 @@ impl<DB> BatchExecutor<DB> for MockExecutorProvider {
type Output = BatchBlockExecutionOutput;
type Error = BlockExecutionError;
fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> {
fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> {
Ok(())
}

View File

@ -297,7 +297,7 @@ impl InsertBlockErrorKind {
// other execution errors that are considered internal errors
InsertBlockErrorKind::Execution(err) => {
match err {
BlockExecutionError::Validation(_) => {
BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => {
// this is caused by an invalid block
true
}

View File

@ -1,8 +1,6 @@
use crate::{provider::ProviderError, trie::StateRootError};
use reth_primitives::{
revm_primitives::EVMError, BlockNumHash, Bloom, GotExpected, GotExpectedBoxed,
PruneSegmentError, B256,
};
use reth_consensus::ConsensusError;
use reth_primitives::{revm_primitives::EVMError, BlockNumHash, PruneSegmentError, B256};
use thiserror::Error;
/// Transaction validation errors
@ -23,12 +21,6 @@ pub enum BlockValidationError {
/// Error when incrementing balance in post execution
#[error("incrementing balance in post execution failed")]
IncrementBalanceFailed,
/// Error when receipt root doesn't match expected value
#[error("receipt root mismatch: {0}")]
ReceiptRootDiff(GotExpectedBoxed<B256>),
/// Error when header bloom filter doesn't match expected value
#[error("header bloom filter mismatch: {0}")]
BloomLogDiff(GotExpectedBoxed<Bloom>),
/// Error when the state root does not match the expected value.
#[error(transparent)]
StateRoot(#[from] StateRootError),
@ -40,14 +32,6 @@ pub enum BlockValidationError {
/// The available block gas
block_available_gas: u64,
},
/// Error when block gas used doesn't match expected value
#[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")]
BlockGasUsed {
/// The gas diff.
gas: GotExpected<u64>,
/// Gas spent by each transaction
gas_spent_by_tx: Vec<(u64, u64)>,
},
/// Error for pre-merge block
#[error("block {hash} is pre merge")]
BlockPreMerge {
@ -88,6 +72,9 @@ pub enum BlockExecutionError {
/// Pruning error, transparently wrapping `PruneSegmentError`
#[error(transparent)]
Pruning(#[from] PruneSegmentError),
/// Consensus error, transparently wrapping `ConsensusError`
#[error(transparent)]
Consensus(#[from] ConsensusError),
/// Transaction error on revert with inner details
#[error("transaction error on revert: {inner}")]
CanonicalRevert {

View File

@ -2266,7 +2266,7 @@ mod tests {
assert!(service.pending_pings.contains_key(&node.id));
assert_eq!(service.pending_pings.len(), num_inserted);
if num_inserted == MAX_NODES_PING {
break;
break
}
}
}

View File

@ -95,7 +95,7 @@ where
max_non_empty: u64,
) -> DownloadResult<Option<Vec<SealedHeader>>> {
if range.is_empty() || max_non_empty == 0 {
return Ok(None);
return Ok(None)
}
// Collect headers while
@ -144,7 +144,7 @@ where
// if we're only connected to a few peers, we keep it low
if num_peers < *self.concurrent_requests_range.start() {
return max_requests;
return max_requests
}
max_requests.min(*self.concurrent_requests_range.end())
@ -238,7 +238,7 @@ where
.skip_while(|b| b.block_number() < expected)
.take_while(|b| self.download_range.contains(&b.block_number()))
.collect()
});
})
}
// Drop buffered response since we passed that range
@ -257,7 +257,7 @@ where
self.queued_bodies.shrink_to_fit();
self.metrics.total_flushed.increment(next_batch.len() as u64);
self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
return Some(next_batch);
return Some(next_batch)
}
None
}
@ -354,13 +354,13 @@ where
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.is_terminated() {
return Poll::Ready(None);
return Poll::Ready(None)
}
// Submit new requests and poll any in progress
loop {
// Yield next batch if ready
if let Some(next_batch) = this.try_split_next_batch() {
return Poll::Ready(Some(Ok(next_batch)));
return Poll::Ready(Some(Ok(next_batch)))
}
// Poll requests
@ -373,7 +373,7 @@ where
Err(error) => {
tracing::debug!(target: "downloaders::bodies", %error, "Request failed");
this.clear();
return Poll::Ready(Some(Err(error)));
return Poll::Ready(Some(Err(error)))
}
};
}
@ -396,7 +396,7 @@ where
Err(error) => {
tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request");
this.clear();
return Poll::Ready(Some(Err(error)));
return Poll::Ready(Some(Err(error)))
}
};
}
@ -409,21 +409,21 @@ where
this.buffered_responses.shrink_to_fit();
if !new_request_submitted {
break;
break
}
}
// All requests are handled, stream is finished
if this.in_progress_queue.is_empty() {
if this.queued_bodies.is_empty() {
return Poll::Ready(None);
return Poll::Ready(None)
}
let batch_size = this.stream_batch_size.min(this.queued_bodies.len());
let next_batch = this.queued_bodies.drain(..batch_size).collect::<Vec<_>>();
this.queued_bodies.shrink_to_fit();
this.metrics.total_flushed.increment(next_batch.len() as u64);
this.metrics.queued_blocks.set(this.queued_bodies.len() as f64);
return Poll::Ready(Some(Ok(next_batch)));
return Poll::Ready(Some(Ok(next_batch)))
}
Poll::Pending

View File

@ -180,7 +180,7 @@ where
let block = SealedBlock::new(next_header, next_body);
if let Err(error) = self.consensus.validate_block(&block) {
if let Err(error) = self.consensus.validate_block_pre_execution(&block) {
// Body is invalid, put the header back and return an error
let hash = block.hash();
let number = block.number;

View File

@ -479,9 +479,9 @@ where
if let Err(disconnect_err) =
this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested)
{
return Poll::Ready(Some(Err(disconnect_err.into())));
return Poll::Ready(Some(Err(disconnect_err.into())))
}
return Poll::Ready(Some(Err(err.into())));
return Poll::Ready(Some(Err(err.into())))
}
Poll::Pending => {
conn_ready = false;

View File

@ -20,7 +20,7 @@ impl PruningArgs {
/// Returns pruning configuration.
pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option<PruneConfig> {
if !self.full {
return None;
return None
}
Some(PruneConfig {
block_interval: 5,

View File

@ -373,7 +373,7 @@ fn parse_accounts(
while let Ok(n) = reader.read_line(&mut line) {
if n == 0 {
break;
break
}
let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;

View File

@ -411,7 +411,7 @@ impl NodeConfig {
// try to look up the header in the database
if let Some(header) = header {
info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database");
return Ok(header.number);
return Ok(header.number)
}
Ok(self.fetch_tip_from_network(client, tip.into()).await?.number)
@ -434,7 +434,7 @@ impl NodeConfig {
match get_single_header(&client, tip).await {
Ok(tip_header) => {
info!(target: "reth::cli", ?tip, "Successfully fetched tip");
return Ok(tip_header);
return Ok(tip_header)
}
Err(error) => {
fetch_failures += 1;

View File

@ -2,7 +2,7 @@
//! blocks from the network.
use eyre::Result;
use reth_consensus_common::validation::validate_block_standalone;
use reth_consensus_common::validation::validate_block_pre_execution;
use reth_fs_util as fs;
use reth_interfaces::p2p::{
bodies::client::BodiesClient,
@ -121,7 +121,7 @@ where
withdrawals: block.withdrawals,
};
validate_block_standalone(&block, &chain_spec)?;
validate_block_pre_execution(&block, &chain_spec)?;
Ok(block)
}

View File

@ -20,4 +20,4 @@ reth-consensus.workspace = true
[features]
optimism = [
"reth-primitives/optimism",
]
]

View File

@ -10,10 +10,18 @@
#![cfg(feature = "optimism")]
use reth_consensus::{Consensus, ConsensusError};
use reth_consensus_common::{validation, validation::validate_header_extradata};
use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256};
use reth_consensus_common::validation::{
validate_block_pre_execution, validate_header_extradata, validate_header_standalone,
};
use reth_primitives::{
BlockWithSenders, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH,
U256,
};
use std::{sync::Arc, time::SystemTime};
mod validation;
pub use validation::validate_block_post_execution;
/// Optimism consensus implementation.
///
/// Provides basic checks as outlined in the execution specs.
@ -37,7 +45,7 @@ impl OptimismBeaconConsensus {
impl Consensus for OptimismBeaconConsensus {
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
validation::validate_header_standalone(header, &self.chain_spec)?;
validate_header_standalone(header, &self.chain_spec)?;
Ok(())
}
@ -96,7 +104,15 @@ impl Consensus for OptimismBeaconConsensus {
Ok(())
}
fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> {
validation::validate_block_standalone(block, &self.chain_spec)
fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> {
validate_block_pre_execution(block, &self.chain_spec)
}
fn validate_block_post_execution(
&self,
block: &BlockWithSenders,
receipts: &[Receipt],
) -> Result<(), ConsensusError> {
validate_block_post_execution(block, &self.chain_spec, receipts)
}
}

View File

@ -0,0 +1,90 @@
use reth_consensus::ConsensusError;
use reth_primitives::{
gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom,
ChainSpec, GotExpected, Receipt, ReceiptWithBloom, B256,
};
/// Validate a block with regard to execution results:
///
/// - Compares the receipts root in the block header to the block body
/// - Compares the gas used in the block header to the actual gas usage after execution
pub fn validate_block_post_execution(
block: &BlockWithSenders,
chain_spec: &ChainSpec,
receipts: &[Receipt],
) -> Result<(), ConsensusError> {
// Before Byzantium, receipts contained state root that would mean that expensive
// operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if chain_spec.is_byzantium_active_at_block(block.header.number) {
verify_receipts(
block.header.receipts_root,
block.header.logs_bloom,
receipts.iter(),
chain_spec,
block.timestamp,
)?;
}
// Check if gas used matches the value set in header.
let cumulative_gas_used =
receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0);
if block.gas_used != cumulative_gas_used {
return Err(ConsensusError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used },
gas_spent_by_tx: gas_spent_by_transactions(receipts),
})
}
Ok(())
}
/// Verify the calculated receipts root against the expected receipts root.
fn verify_receipts<'a>(
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
receipts: impl Iterator<Item = &'a Receipt> + Clone,
chain_spec: &ChainSpec,
timestamp: u64,
) -> Result<(), ConsensusError> {
// Calculate receipts root.
let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::<Vec<ReceiptWithBloom>>();
let receipts_root =
calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp);
// Create header log bloom.
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom);
compare_receipts_root_and_logs_bloom(
receipts_root,
logs_bloom,
expected_receipts_root,
expected_logs_bloom,
)?;
Ok(())
}
/// Compare the calculated receipts root with the expected receipts root, also compare
/// the calculated logs bloom with the expected logs bloom.
fn compare_receipts_root_and_logs_bloom(
calculated_receipts_root: B256,
calculated_logs_bloom: Bloom,
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
) -> Result<(), ConsensusError> {
if calculated_receipts_root != expected_receipts_root {
return Err(ConsensusError::BodyReceiptRootDiff(
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
))
}
if calculated_logs_bloom != expected_logs_bloom {
return Err(ConsensusError::BodyBloomLogDiff(
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
))
}
Ok(())
}

View File

@ -17,8 +17,12 @@ reth-primitives.workspace = true
reth-revm.workspace = true
reth-interfaces.workspace = true
reth-provider.workspace = true
reth-consensus-common.workspace = true
# Optimism
reth-optimism-consensus.workspace = true
# revm
revm.workspace = true
revm-primitives.workspace = true
@ -35,4 +39,5 @@ optimism = [
"reth-provider/optimism",
"reth-interfaces/optimism",
"revm-primitives/optimism",
"reth-optimism-consensus/optimism",
]

View File

@ -1,9 +1,6 @@
//! Optimism block executor.
use crate::{
l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError,
OptimismEvmConfig,
};
use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig};
use reth_evm::{
execute::{
BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput,
@ -15,9 +12,10 @@ use reth_interfaces::{
executor::{BlockExecutionError, BlockValidationError},
provider::ProviderError,
};
use reth_optimism_consensus::validate_block_post_execution;
use reth_primitives::{
BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt,
Receipts, TxType, Withdrawals, U256,
BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Receipts,
TxType, Withdrawals, U256,
};
use reth_revm::{
batch::{BlockBatchRecord, BlockExecutorStats},
@ -30,7 +28,7 @@ use revm_primitives::{
BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState,
};
use std::sync::Arc;
use tracing::{debug, trace};
use tracing::trace;
/// Provides executors to execute regular ethereum blocks
#[derive(Debug, Clone)]
@ -157,12 +155,12 @@ where
transaction_gas_limit: transaction.gas_limit(),
block_available_gas,
}
.into());
.into())
}
// An optimism block should never contain blob transactions.
if matches!(transaction.tx_type(), TxType::Eip4844) {
return Err(OptimismBlockExecutionError::BlobTransactionRejected.into());
return Err(OptimismBlockExecutionError::BlobTransactionRejected.into())
}
// Cache the depositor account prior to the state transition for the deposit nonce.
@ -221,16 +219,6 @@ where
}
drop(evm);
// Check if gas used matches the value set in header.
if block.gas_used != cumulative_gas_used {
let receipts = Receipts::from_block_receipt(receipts);
return Err(BlockValidationError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used },
gas_spent_by_tx: receipts.gas_spent_by_tx()?,
}
.into());
}
Ok((receipts, cumulative_gas_used))
}
}
@ -292,8 +280,8 @@ where
///
/// Returns the receipts of the transactions in the block and the total gas used.
///
/// Returns an error if execution fails or receipt verification fails.
fn execute_and_verify(
/// Returns an error if execution fails.
fn execute_without_verification(
&mut self,
block: &BlockWithSenders,
total_difficulty: U256,
@ -312,23 +300,6 @@ where
// 3. apply post execution changes
self.post_execution(block, total_difficulty)?;
// Before Byzantium, receipts contained state root that would mean that expensive
// operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if self.chain_spec().is_byzantium_active_at_block(block.header.number) {
if let Err(error) = verify_receipts(
block.header.receipts_root,
block.header.logs_bloom,
receipts.iter(),
self.chain_spec(),
block.timestamp,
) {
debug!(target: "evm", %error, ?receipts, "receipts verification failed");
return Err(error);
};
}
Ok((receipts, gas_used))
}
@ -383,7 +354,7 @@ where
/// State changes are committed to the database.
fn execute(mut self, input: Self::Input<'_>) -> Result<Self::Output, Self::Error> {
let BlockExecutionInput { block, total_difficulty } = input;
let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?;
let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?;
// NOTE: we need to merge keep the reverts for the bundle retention
self.state.merge_transitions(BundleRetention::Reverts);
@ -426,9 +397,12 @@ where
type Output = BatchBlockExecutionOutput;
type Error = BlockExecutionError;
fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> {
let BlockExecutionInput { block, total_difficulty } = input;
let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?;
let (receipts, _gas_used) =
self.executor.execute_without_verification(block, total_difficulty)?;
validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?;
// prepare the state according to the prune mode
let retention = self.batch_record.bundle_retention(block.number);
@ -557,7 +531,7 @@ mod tests {
// Attempt to execute a block with one deposit and one non-deposit transaction
executor
.execute_one(
.execute_and_verify_one(
(
&BlockWithSenders {
block: Block {
@ -638,7 +612,7 @@ mod tests {
// attempt to execute an empty block with parent beacon block root, this should not fail
executor
.execute_one(
.execute_and_verify_one(
(
&BlockWithSenders {
block: Block {

View File

@ -23,7 +23,6 @@ pub mod l1;
pub use l1::*;
mod error;
pub mod verify;
pub use error::OptimismBlockExecutionError;
/// Optimism-related EVM configuration.

View File

@ -1,58 +0,0 @@
//! Helpers for verifying the receipts.
use reth_interfaces::executor::{BlockExecutionError, BlockValidationError};
use reth_primitives::{
proofs::calculate_receipt_root_optimism, Bloom, ChainSpec, GotExpected, Receipt,
ReceiptWithBloom, B256,
};
/// Verify the calculated receipts root against the expected receipts root.
pub fn verify_receipts<'a>(
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
receipts: impl Iterator<Item = &'a Receipt> + Clone,
chain_spec: &ChainSpec,
timestamp: u64,
) -> Result<(), BlockExecutionError> {
// Calculate receipts root.
let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::<Vec<ReceiptWithBloom>>();
let receipts_root =
calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp);
// Create header log bloom.
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom);
compare_receipts_root_and_logs_bloom(
receipts_root,
logs_bloom,
expected_receipts_root,
expected_logs_bloom,
)?;
Ok(())
}
/// Compare the calculated receipts root with the expected receipts root, also compare
/// the calculated logs bloom with the expected logs bloom.
pub fn compare_receipts_root_and_logs_bloom(
calculated_receipts_root: B256,
calculated_logs_bloom: Bloom,
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
) -> Result<(), BlockExecutionError> {
if calculated_receipts_root != expected_receipts_root {
return Err(BlockValidationError::ReceiptRootDiff(
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
)
.into())
}
if calculated_logs_bloom != expected_logs_bloom {
return Err(BlockValidationError::BloomLogDiff(
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
)
.into())
}
Ok(())
}

View File

@ -155,7 +155,7 @@ impl ExecutionPayloadValidator {
let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp);
if !shanghai_active && sealed_block.withdrawals.is_some() {
// shanghai not active but withdrawals present
return Err(PayloadError::PreShanghaiBlockWithWitdrawals);
return Err(PayloadError::PreShanghaiBlockWithWitdrawals)
}
// EIP-4844 checks

View File

@ -115,7 +115,7 @@ impl TryFrom<alloy_rpc_types::Transaction> for Transaction {
return Err(ConversionError::Eip2718Error(
RlpError::Custom("EIP-1559 fields are present in a legacy transaction")
.into(),
));
))
}
Ok(Transaction::Legacy(TxLegacy {
chain_id: tx.chain_id,

View File

@ -69,7 +69,7 @@ impl ReusableDecompressor {
reserved_upper_bound = true;
if let Some(upper_bound) = Decompressor::upper_bound(src) {
if let Some(additional) = upper_bound.checked_sub(self.buf.capacity()) {
break 'b additional;
break 'b additional
}
}
}

View File

@ -82,7 +82,9 @@ pub use prune::{
PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig,
MINIMUM_PRUNING_DISTANCE,
};
pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts};
pub use receipt::{
gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts,
};
pub use static_file::StaticFileSegment;
pub use storage::StorageEntry;

View File

@ -1,6 +1,6 @@
#[cfg(feature = "zstd-codec")]
use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR};
use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256};
use crate::{logs_bloom, Bloom, Bytes, TxType, B256};
use alloy_primitives::Log;
use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::{Buf, BufMut};
@ -117,22 +117,6 @@ impl Receipts {
timestamp,
))
}
/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used).
pub fn gas_spent_by_tx(&self) -> Result<Vec<(u64, u64)>, PruneSegmentError> {
let Some(block_r) = self.last() else {
return Ok(vec![]);
};
let mut out = Vec::with_capacity(block_r.len());
for (id, tx_r) in block_r.iter().enumerate() {
if let Some(receipt) = tx_r.as_ref() {
out.push((id as u64, receipt.cumulative_gas_used));
} else {
return Err(PruneSegmentError::ReceiptsPruned);
}
}
Ok(out)
}
}
impl Deref for Receipts {
@ -203,6 +187,17 @@ impl ReceiptWithBloom {
}
}
/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used).
pub fn gas_spent_by_transactions<T: Deref<Target = Receipt>>(
receipts: impl IntoIterator<Item = T>,
) -> Vec<(u64, u64)> {
receipts
.into_iter()
.enumerate()
.map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used))
.collect()
}
#[cfg(any(test, feature = "arbitrary"))]
impl proptest::arbitrary::Arbitrary for Receipt {
type Parameters = ();
@ -312,7 +307,7 @@ impl ReceiptWithBloom {
let b = &mut &**buf;
let rlp_head = alloy_rlp::Header::decode(b)?;
if !rlp_head.list {
return Err(alloy_rlp::Error::UnexpectedString);
return Err(alloy_rlp::Error::UnexpectedString)
}
let started_len = b.len();
@ -357,7 +352,7 @@ impl ReceiptWithBloom {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
});
})
}
*buf = *b;
Ok(this)
@ -510,7 +505,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> {
fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) {
if matches!(self.receipt.tx_type, TxType::Legacy) {
self.encode_fields(out);
return;
return
}
let mut payload = Vec::new();

View File

@ -1424,7 +1424,7 @@ impl Decodable for TransactionSigned {
/// header if the first byte is less than `0xf7`.
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
if buf.is_empty() {
return Err(RlpError::InputTooShort);
return Err(RlpError::InputTooShort)
}
// decode header

View File

@ -151,7 +151,7 @@ where
return Some(batch_response_error(
Id::Null,
reject_too_big_request(max_request_body_size as u32),
));
))
}
// Single request or notification

View File

@ -871,7 +871,7 @@ mod tests {
// and you might want to do something smarter if it's
// critical that "the most recent item" must be sent when it is produced.
if sink.send(notif).await.is_err() {
break Ok(());
break Ok(())
}
closed = c;

View File

@ -755,7 +755,7 @@ mod u256_numeric_string {
match val {
serde_json::Value::String(s) => {
if let Ok(val) = s.parse::<u128>() {
return Ok(U256::from(val));
return Ok(U256::from(val))
}
U256::from_str(&s).map_err(de::Error::custom)
}

View File

@ -129,7 +129,7 @@ where
if tx_len != receipts.len() {
return Err(internal_rpc_err(
"the number of transactions does not match the number of receipts",
));
))
}
// make sure the block is full

View File

@ -240,9 +240,11 @@ where
// Execute the block
let execute_start = Instant::now();
executor.execute_one((&block, td).into()).map_err(|error| StageError::Block {
block: Box::new(block.header.clone().seal_slow()),
error: BlockErrorKind::Execution(error),
executor.execute_and_verify_one((&block, td).into()).map_err(|error| {
StageError::Block {
block: Box::new(block.header.clone().seal_slow()),
error: BlockErrorKind::Execution(error),
}
})?;
execution_duration += execute_start.elapsed();

View File

@ -534,7 +534,7 @@ impl DataReader {
let offset_end = index + self.offset_size as usize;
if offset_end > self.offset_mmap.len() {
return Err(NippyJarError::OffsetOutOfBounds { index });
return Err(NippyJarError::OffsetOutOfBounds { index })
}
buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]);

View File

@ -522,7 +522,7 @@ impl StaticFileProviderRW {
if self.prune_on_commit.is_some() {
return Err(ProviderError::NippyJar(
"Pruning should be comitted before appending or pruning more data".to_string(),
));
))
}
Ok(())
}