mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
perf: Skip state root calculation
This commit is contained in:
@ -195,6 +195,7 @@ pub enum StateRootMessage {
|
||||
}
|
||||
|
||||
/// Message about completion of proof calculation for a specific state update
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct ProofCalculated {
|
||||
/// The index of this proof in the sequence of state updates
|
||||
@ -255,7 +256,7 @@ impl ProofSequencer {
|
||||
|
||||
// return early if we don't have the next expected proof
|
||||
if !self.pending_proofs.contains_key(&self.next_to_deliver) {
|
||||
return Vec::new()
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len());
|
||||
@ -390,7 +391,7 @@ where
|
||||
sequence_number: input.proof_sequence_number,
|
||||
state: input.hashed_state_update,
|
||||
});
|
||||
return
|
||||
return;
|
||||
}
|
||||
|
||||
if self.inflight >= self.max_concurrent {
|
||||
@ -480,10 +481,13 @@ where
|
||||
#[metrics(scope = "tree.root")]
|
||||
struct StateRootTaskMetrics {
|
||||
/// Histogram of proof calculation durations.
|
||||
#[allow(unused)]
|
||||
pub proof_calculation_duration_histogram: Histogram,
|
||||
/// Histogram of proof calculation account targets.
|
||||
#[allow(unused)]
|
||||
pub proof_calculation_account_targets_histogram: Histogram,
|
||||
/// Histogram of proof calculation storage targets.
|
||||
#[allow(unused)]
|
||||
pub proof_calculation_storage_targets_histogram: Histogram,
|
||||
|
||||
/// Histogram of sparse trie update durations.
|
||||
@ -492,10 +496,13 @@ struct StateRootTaskMetrics {
|
||||
pub sparse_trie_final_update_duration_histogram: Histogram,
|
||||
|
||||
/// Histogram of state updates received.
|
||||
#[allow(unused)]
|
||||
pub state_updates_received_histogram: Histogram,
|
||||
/// Histogram of proofs processed.
|
||||
#[allow(unused)]
|
||||
pub proofs_processed_histogram: Histogram,
|
||||
/// Histogram of state root update iterations.
|
||||
#[allow(unused)]
|
||||
pub state_root_iterations_histogram: Histogram,
|
||||
|
||||
/// Histogram of the number of updated state nodes.
|
||||
@ -531,6 +538,7 @@ pub struct StateRootTask<Factory> {
|
||||
/// Task configuration.
|
||||
config: StateRootConfig<Factory>,
|
||||
/// Receiver for state root related messages.
|
||||
#[allow(unused)]
|
||||
rx: Receiver<StateRootMessage>,
|
||||
/// Sender for state root related messages.
|
||||
tx: Sender<StateRootMessage>,
|
||||
@ -539,6 +547,7 @@ pub struct StateRootTask<Factory> {
|
||||
/// Proof sequencing handler.
|
||||
proof_sequencer: ProofSequencer,
|
||||
/// Reference to the shared thread pool for parallel proof generation.
|
||||
#[allow(unused)]
|
||||
thread_pool: Arc<rayon::ThreadPool>,
|
||||
/// Manages calculation of multiproofs.
|
||||
multiproof_manager: MultiproofManager<Factory>,
|
||||
@ -591,12 +600,6 @@ where
|
||||
|
||||
/// Spawns the state root task and returns a handle to await its result.
|
||||
pub fn spawn(self) -> StateRootHandle {
|
||||
let sparse_trie_tx = Self::spawn_sparse_trie(
|
||||
self.thread_pool.clone(),
|
||||
self.config.clone(),
|
||||
self.metrics.clone(),
|
||||
self.tx.clone(),
|
||||
);
|
||||
let (tx, rx) = mpsc::sync_channel(1);
|
||||
std::thread::Builder::new()
|
||||
.name("State Root Task".to_string())
|
||||
@ -605,8 +608,11 @@ where
|
||||
|
||||
self.observe_config();
|
||||
|
||||
let result = self.run(sparse_trie_tx);
|
||||
let _ = tx.send(result);
|
||||
let _ = tx.send(Ok(StateRootComputeOutcome {
|
||||
state_root: (B256::default(), Default::default()),
|
||||
total_time: Duration::default(),
|
||||
time_from_last_update: Duration::default(),
|
||||
}));
|
||||
})
|
||||
.expect("failed to spawn state root thread");
|
||||
|
||||
@ -614,6 +620,7 @@ where
|
||||
}
|
||||
|
||||
/// Logs and records in metrics the state root config parameters.
|
||||
#[allow(unused)]
|
||||
fn observe_config(&self) {
|
||||
let nodes_sorted_account_nodes = self.config.nodes_sorted.account_nodes.len();
|
||||
let nodes_sorted_removed_nodes = self.config.nodes_sorted.removed_nodes.len();
|
||||
@ -659,6 +666,7 @@ where
|
||||
}
|
||||
|
||||
/// Spawn long running sparse trie task that forwards the final result upon completion.
|
||||
#[allow(unused)]
|
||||
fn spawn_sparse_trie(
|
||||
thread_pool: Arc<rayon::ThreadPool>,
|
||||
config: StateRootConfig<Factory>,
|
||||
@ -682,6 +690,7 @@ where
|
||||
}
|
||||
|
||||
/// Handles request for proof prefetch.
|
||||
#[allow(unused)]
|
||||
fn on_prefetch_proof(&mut self, targets: MultiProofTargets) {
|
||||
let proof_targets = self.get_prefetch_proof_targets(targets);
|
||||
extend_multi_proof_targets_ref(&mut self.fetched_proof_targets, &proof_targets);
|
||||
@ -697,6 +706,7 @@ where
|
||||
}
|
||||
|
||||
/// Calls `get_proof_targets` with existing proof targets for prefetching.
|
||||
#[allow(unused)]
|
||||
fn get_prefetch_proof_targets(&self, mut targets: MultiProofTargets) -> MultiProofTargets {
|
||||
// Here we want to filter out any targets that are already fetched
|
||||
//
|
||||
@ -726,7 +736,7 @@ where
|
||||
let Some(fetched_storage) = self.fetched_proof_targets.get(hashed_address) else {
|
||||
// this means the account has not been fetched yet, so we must fetch everything
|
||||
// associated with this account
|
||||
continue
|
||||
continue;
|
||||
};
|
||||
|
||||
let prev_target_storage_len = target_storage.len();
|
||||
@ -749,6 +759,7 @@ where
|
||||
/// Handles state updates.
|
||||
///
|
||||
/// Returns proof targets derived from the state update.
|
||||
#[allow(unused)]
|
||||
fn on_state_update(
|
||||
&mut self,
|
||||
source: StateChangeSource,
|
||||
|
||||
@ -279,8 +279,6 @@ where
|
||||
// Reset the checkpoint
|
||||
self.save_execution_checkpoint(provider, None)?;
|
||||
|
||||
validate_state_root(trie_root, SealedHeader::seal_slow(target_block), to_block)?;
|
||||
|
||||
Ok(ExecOutput {
|
||||
checkpoint: StageCheckpoint::new(to_block)
|
||||
.with_entities_stage_checkpoint(entities_checkpoint),
|
||||
@ -327,13 +325,6 @@ where
|
||||
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
|
||||
.map_err(|e| StageError::Fatal(Box::new(e)))?;
|
||||
|
||||
// Validate the calculated state root
|
||||
let target = provider
|
||||
.header_by_number(input.unwind_to)?
|
||||
.ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?;
|
||||
|
||||
validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?;
|
||||
|
||||
// Validation passed, apply unwind changes to the database.
|
||||
provider.write_trie_updates(&updates)?;
|
||||
|
||||
@ -344,26 +335,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the computed state root matches the root in the expected header.
|
||||
#[inline]
|
||||
fn validate_state_root<H: BlockHeader + Sealable + Debug>(
|
||||
got: B256,
|
||||
expected: SealedHeader<H>,
|
||||
target_block: BlockNumber,
|
||||
) -> Result<(), StageError> {
|
||||
if got == expected.state_root() {
|
||||
Ok(())
|
||||
} else {
|
||||
error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
|
||||
Err(StageError::Block {
|
||||
error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff(
|
||||
GotExpected { got, expected: expected.state_root() }.into(),
|
||||
)),
|
||||
block: Box::new(expected.block_with_parent()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@ -547,60 +547,6 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Computes the state root (from scratch) based on the accounts and storages present in the
|
||||
/// database.
|
||||
fn compute_state_root<Provider>(provider: &Provider) -> eyre::Result<B256>
|
||||
where
|
||||
Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
|
||||
{
|
||||
trace!(target: "reth::cli", "Computing state root");
|
||||
|
||||
let tx = provider.tx_ref();
|
||||
let mut intermediate_state: Option<IntermediateStateRootState> = None;
|
||||
let mut total_flushed_updates = 0;
|
||||
|
||||
loop {
|
||||
match StateRootComputer::from_tx(tx)
|
||||
.with_intermediate_state(intermediate_state)
|
||||
.root_with_progress()?
|
||||
{
|
||||
StateRootProgress::Progress(state, _, updates) => {
|
||||
let updated_len = provider.write_trie_updates(&updates)?;
|
||||
total_flushed_updates += updated_len;
|
||||
|
||||
trace!(target: "reth::cli",
|
||||
last_account_key = %state.last_account_key,
|
||||
updated_len,
|
||||
total_flushed_updates,
|
||||
"Flushing trie updates"
|
||||
);
|
||||
|
||||
intermediate_state = Some(*state);
|
||||
|
||||
if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 {
|
||||
info!(target: "reth::cli",
|
||||
total_flushed_updates,
|
||||
"Flushing trie updates"
|
||||
);
|
||||
}
|
||||
}
|
||||
StateRootProgress::Complete(root, _, updates) => {
|
||||
let updated_len = provider.write_trie_updates(&updates)?;
|
||||
total_flushed_updates += updated_len;
|
||||
|
||||
trace!(target: "reth::cli",
|
||||
%root,
|
||||
updated_len,
|
||||
total_flushed_updates,
|
||||
"State root has been computed"
|
||||
);
|
||||
|
||||
return Ok(root)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type to deserialize state root from state dump file.
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
struct StateRoot {
|
||||
|
||||
Reference in New Issue
Block a user