diff --git a/.gitignore b/.gitignore index 96bb78b92..3184275d0 100644 --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,7 @@ data/ proptest-regressions/ # Release artifacts -dist/ \ No newline at end of file +dist/ + +# VSCode +.vscode \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 93eb30716..222fe94c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5428,6 +5428,7 @@ dependencies = [ "reth-db", "reth-discv4", "reth-downloaders", + "reth-interfaces", "reth-net-nat", "reth-network", "reth-network-api", diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 6233eca86..ca4904fcc 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -177,7 +177,7 @@ impl ImportCommand { }, )), ) - .build(db); + .build(db, self.chain.clone()); let events = pipeline.events().map(Into::into); diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 30b0b2ec4..eff705a16 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -100,7 +100,7 @@ impl Command { reth_db::mdbx::EnvKind::RW, )?; - let mut tool = DbTool::new(&db)?; + let mut tool = DbTool::new(&db, self.chain.clone())?; match self.command { // TODO: We'll need to add this on the DB trait. diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 0e1db2ab9..bb2a1cecf 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -1,6 +1,6 @@ //! Command for debugging execution. use crate::{ - args::{get_secret_key, NetworkArgs}, + args::{get_secret_key, utils::genesis_value_parser, NetworkArgs}, dirs::{DataDirPath, MaybePlatformPath}, node::events, runner::CliContext, @@ -26,17 +26,15 @@ use reth_interfaces::{ use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_primitives::{stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256}; -use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase, Transaction}; +use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase}; use reth_staged_sync::utils::init::{init_db, init_genesis}; - -use crate::args::utils::genesis_value_parser; use reth_stages::{ sets::DefaultStages, stages::{ ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, TotalDifficultyStage, }, - Pipeline, StageSet, + Pipeline, PipelineError, StageSet, }; use reth_tasks::TaskExecutor; use std::{ @@ -146,7 +144,7 @@ impl Command { ExecutionStageThresholds { max_blocks: None, max_changes: None }, )), ) - .build(db); + .build(db, self.chain.clone()); Ok(pipeline) } @@ -252,6 +250,8 @@ impl Command { } let mut current_max_block = latest_block_number; + let shareable_db = ShareableDatabase::new(&db, self.chain.clone()); + while current_max_block < self.to { let next_block = current_max_block + 1; let target_block = self.to.min(current_max_block + self.interval); @@ -266,8 +266,10 @@ impl Command { // Unwind the pipeline without committing. { - let tx = Transaction::new(db.as_ref())?; - tx.take_block_and_execution_range(&self.chain, next_block..=target_block)?; + shareable_db + .provider_rw() + .map_err(PipelineError::Interface)? + .take_block_and_execution_range(&self.chain, next_block..=target_block)?; } // Update latest block diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 193f4f331..c05fc6c54 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -9,14 +9,14 @@ use reth_primitives::{ stage::{StageCheckpoint, StageId}, ChainSpec, }; -use reth_provider::Transaction; +use reth_provider::ShareableDatabase; use reth_staged_sync::utils::init::init_db; use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, StorageHashingStage, }, - ExecInput, Stage, + ExecInput, PipelineError, Stage, }; use std::sync::Arc; @@ -68,10 +68,11 @@ impl Command { std::fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path)?); - let mut tx = Transaction::new(db.as_ref())?; + let shareable_db = ShareableDatabase::new(&db, self.chain.clone()); + let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; let execution_checkpoint_block = - tx.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; + provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number; assert!(execution_checkpoint_block < self.to, "Nothing to run"); // Check if any of hashing or merkle stages aren't on the same block number as @@ -79,7 +80,7 @@ impl Command { let should_reset_stages = [StageId::AccountHashing, StageId::StorageHashing, StageId::MerkleExecute] .into_iter() - .map(|stage_id| tx.get_stage_checkpoint(stage_id)) + .map(|stage_id| provider_rw.get_stage_checkpoint(stage_id)) .collect::, _>>()? .into_iter() .map(Option::unwrap_or_default) @@ -109,7 +110,7 @@ impl Command { execution_stage .execute( - &mut tx, + &mut provider_rw, ExecInput { target: Some(block), checkpoint: block.checked_sub(1).map(StageCheckpoint::new), @@ -121,7 +122,7 @@ impl Command { while !account_hashing_done { let output = account_hashing_stage .execute( - &mut tx, + &mut provider_rw, ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new), @@ -135,7 +136,7 @@ impl Command { while !storage_hashing_done { let output = storage_hashing_stage .execute( - &mut tx, + &mut provider_rw, ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new), @@ -147,7 +148,7 @@ impl Command { let incremental_result = merkle_stage .execute( - &mut tx, + &mut provider_rw, ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new), @@ -157,29 +158,33 @@ impl Command { if incremental_result.is_err() { tracing::warn!(target: "reth::cli", block, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = tx + let incremental_account_trie = provider_rw + .tx_ref() .cursor_read::()? .walk_range(..)? .collect::, _>>()?; - let incremental_storage_trie = tx + let incremental_storage_trie = provider_rw + .tx_ref() .cursor_dup_read::()? .walk_range(..)? .collect::, _>>()?; let clean_input = ExecInput { target: Some(block), checkpoint: None }; loop { - let clean_result = merkle_stage.execute(&mut tx, clean_input).await; + let clean_result = merkle_stage.execute(&mut provider_rw, clean_input).await; assert!(clean_result.is_ok(), "Clean state root calculation failed"); if clean_result.unwrap().done { break } } - let clean_account_trie = tx + let clean_account_trie = provider_rw + .tx_ref() .cursor_read::()? .walk_range(..)? .collect::, _>>()?; - let clean_storage_trie = tx + let clean_storage_trie = provider_rw + .tx_ref() .cursor_dup_read::()? .walk_range(..)? .collect::, _>>()?; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 8877e81ab..17163ed09 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -689,7 +689,7 @@ impl Command { }, )), ) - .build(db); + .build(db, self.chain.clone()); Ok(pipeline) } diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index 482f17c3c..8b4bf2836 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -59,7 +59,7 @@ impl Command { let db = Env::::open(db_path.as_ref(), reth_db::mdbx::EnvKind::RW)?; - let tool = DbTool::new(&db)?; + let tool = DbTool::new(&db, self.chain.clone())?; tool.db.update(|tx| { match &self.stage { diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index c4c313541..8af0e225e 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -4,10 +4,11 @@ use eyre::Result; use reth_db::{ cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx, }; -use reth_primitives::{stage::StageCheckpoint, MAINNET}; -use reth_provider::Transaction; +use reth_primitives::{stage::StageCheckpoint, ChainSpec}; +use reth_provider::ShareableDatabase; +use reth_revm::Factory; use reth_stages::{stages::ExecutionStage, Stage, UnwindInput}; -use std::{ops::DerefMut, path::PathBuf}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_execution_stage( @@ -17,14 +18,14 @@ pub(crate) async fn dump_execution_stage( output_db: &PathBuf, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup::(from, to, output_db, db_tool)?; + let (output_db, tip_block_number) = setup(from, to, output_db, db_tool)?; - import_tables_with_range::(&output_db, db_tool, from, to)?; + import_tables_with_range(&output_db, db_tool, from, to)?; - unwind_and_copy::(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; if should_run { - dry_run(output_db, to, from).await?; + dry_run(db_tool.chain.clone(), output_db, to, from).await?; } Ok(()) @@ -93,13 +94,14 @@ async fn unwind_and_copy( tip_block_number: u64, output_db: &reth_db::mdbx::Env, ) -> eyre::Result<()> { - let mut unwind_tx = Transaction::new(db_tool.db)?; + let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone()); + let mut provider = shareable_db.provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_factory(reth_revm::Factory::new(MAINNET.clone())); + let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(db_tool.chain.clone())); exec_stage .unwind( - &mut unwind_tx, + &mut provider, UnwindInput { unwind_to: from, checkpoint: StageCheckpoint::new(tip_block_number), @@ -108,31 +110,32 @@ async fn unwind_and_copy( ) .await?; - let unwind_inner_tx = unwind_tx.deref_mut(); + let unwind_inner_tx = provider.into_tx(); - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_table::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_table::(unwind_inner_tx))??; - - unwind_tx.drop()?; + output_db + .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; Ok(()) } /// Try to re-execute the stage without committing -async fn dry_run( - output_db: reth_db::mdbx::Env, +async fn dry_run( + chain: Arc, + output_db: DB, to: u64, from: u64, ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage. [dry-run]"); - let mut tx = Transaction::new(&output_db)?; - let mut exec_stage = ExecutionStage::new_with_factory(reth_revm::Factory::new(MAINNET.clone())); + let shareable_db = ShareableDatabase::new(&output_db, chain.clone()); + let mut provider = shareable_db.provider_rw()?; + let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(chain.clone())); exec_stage .execute( - &mut tx, + &mut provider, reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)), @@ -140,8 +143,6 @@ async fn dry_run( ) .await?; - tx.drop()?; - info!(target: "reth::cli", "Success."); Ok(()) diff --git a/bin/reth/src/stage/dump/hashing_account.rs b/bin/reth/src/stage/dump/hashing_account.rs index 642fa525a..d63a14cc8 100644 --- a/bin/reth/src/stage/dump/hashing_account.rs +++ b/bin/reth/src/stage/dump/hashing_account.rs @@ -2,10 +2,10 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; use reth_db::{database::Database, table::TableImporter, tables}; -use reth_primitives::{stage::StageCheckpoint, BlockNumber}; -use reth_provider::Transaction; +use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec}; +use reth_provider::ShareableDatabase; use reth_stages::{stages::AccountHashingStage, Stage, UnwindInput}; -use std::{ops::DerefMut, path::PathBuf}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_hashing_account_stage( @@ -15,17 +15,17 @@ pub(crate) async fn dump_hashing_account_stage( output_db: &PathBuf, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup::(from, to, output_db, db_tool)?; + let (output_db, tip_block_number) = setup(from, to, output_db, db_tool)?; // Import relevant AccountChangeSets output_db.update(|tx| { tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) })??; - unwind_and_copy::(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; if should_run { - dry_run(output_db, to, from).await?; + dry_run(db_tool.chain.clone(), output_db, to, from).await?; } Ok(()) @@ -38,12 +38,13 @@ async fn unwind_and_copy( tip_block_number: u64, output_db: &reth_db::mdbx::Env, ) -> eyre::Result<()> { - let mut unwind_tx = Transaction::new(db_tool.db)?; + let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone()); + let mut provider = shareable_db.provider_rw()?; let mut exec_stage = AccountHashingStage::default(); exec_stage .unwind( - &mut unwind_tx, + &mut provider, UnwindInput { unwind_to: from, checkpoint: StageCheckpoint::new(tip_block_number), @@ -51,24 +52,24 @@ async fn unwind_and_copy( }, ) .await?; - let unwind_inner_tx = unwind_tx.deref_mut(); + let unwind_inner_tx = provider.into_tx(); - output_db.update(|tx| tx.import_table::(unwind_inner_tx))??; - - unwind_tx.drop()?; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; Ok(()) } /// Try to re-execute the stage straightaway -async fn dry_run( - output_db: reth_db::mdbx::Env, +async fn dry_run( + chain: Arc, + output_db: DB, to: u64, from: u64, ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - let mut tx = Transaction::new(&output_db)?; + let shareable_db = ShareableDatabase::new(&output_db, chain); + let mut provider = shareable_db.provider_rw()?; let mut exec_stage = AccountHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() @@ -78,7 +79,7 @@ async fn dry_run( while !exec_output { exec_output = exec_stage .execute( - &mut tx, + &mut provider, reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)), @@ -88,8 +89,6 @@ async fn dry_run( .done; } - tx.drop()?; - info!(target: "reth::cli", "Success."); Ok(()) diff --git a/bin/reth/src/stage/dump/hashing_storage.rs b/bin/reth/src/stage/dump/hashing_storage.rs index 6529541be..6e717544c 100644 --- a/bin/reth/src/stage/dump/hashing_storage.rs +++ b/bin/reth/src/stage/dump/hashing_storage.rs @@ -2,10 +2,10 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; use reth_db::{database::Database, table::TableImporter, tables}; -use reth_primitives::stage::StageCheckpoint; -use reth_provider::Transaction; +use reth_primitives::{stage::StageCheckpoint, ChainSpec}; +use reth_provider::ShareableDatabase; use reth_stages::{stages::StorageHashingStage, Stage, UnwindInput}; -use std::{ops::DerefMut, path::PathBuf}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_hashing_storage_stage( @@ -15,12 +15,12 @@ pub(crate) async fn dump_hashing_storage_stage( output_db: &PathBuf, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup::(from, to, output_db, db_tool)?; + let (output_db, tip_block_number) = setup(from, to, output_db, db_tool)?; - unwind_and_copy::(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; if should_run { - dry_run(output_db, to, from).await?; + dry_run(db_tool.chain.clone(), output_db, to, from).await?; } Ok(()) @@ -33,12 +33,14 @@ async fn unwind_and_copy( tip_block_number: u64, output_db: &reth_db::mdbx::Env, ) -> eyre::Result<()> { - let mut unwind_tx = Transaction::new(db_tool.db)?; + let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone()); + let mut provider = shareable_db.provider_rw()?; + let mut exec_stage = StorageHashingStage::default(); exec_stage .unwind( - &mut unwind_tx, + &mut provider, UnwindInput { unwind_to: from, checkpoint: StageCheckpoint::new(tip_block_number), @@ -46,26 +48,27 @@ async fn unwind_and_copy( }, ) .await?; - let unwind_inner_tx = unwind_tx.deref_mut(); + let unwind_inner_tx = provider.into_tx(); // TODO optimize we can actually just get the entries we need for both these tables - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; - - unwind_tx.drop()?; + output_db + .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; Ok(()) } /// Try to re-execute the stage straightaway -async fn dry_run( - output_db: reth_db::mdbx::Env, +async fn dry_run( + chain: Arc, + output_db: DB, to: u64, from: u64, ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - let mut tx = Transaction::new(&output_db)?; + let shareable_db = ShareableDatabase::new(&output_db, chain); + let mut provider = shareable_db.provider_rw()?; let mut exec_stage = StorageHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() @@ -75,7 +78,7 @@ async fn dry_run( while !exec_output { exec_output = exec_stage .execute( - &mut tx, + &mut provider, reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)), @@ -85,8 +88,6 @@ async fn dry_run( .done; } - tx.drop()?; - info!(target: "reth::cli", "Success."); Ok(()) diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 385afd3a2..3eb38283b 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -2,8 +2,8 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; use reth_db::{database::Database, table::TableImporter, tables}; -use reth_primitives::{stage::StageCheckpoint, BlockNumber, MAINNET}; -use reth_provider::Transaction; +use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec}; +use reth_provider::ShareableDatabase; use reth_stages::{ stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, @@ -11,7 +11,7 @@ use reth_stages::{ }, Stage, UnwindInput, }; -use std::{ops::DerefMut, path::PathBuf}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; pub(crate) async fn dump_merkle_stage( @@ -21,7 +21,7 @@ pub(crate) async fn dump_merkle_stage( output_db: &PathBuf, should_run: bool, ) -> Result<()> { - let (output_db, tip_block_number) = setup::(from, to, output_db, db_tool)?; + let (output_db, tip_block_number) = setup(from, to, output_db, db_tool)?; output_db.update(|tx| { tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) @@ -31,10 +31,10 @@ pub(crate) async fn dump_merkle_stage( tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) })??; - unwind_and_copy::(db_tool, (from, to), tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db).await?; if should_run { - dry_run(output_db, to, from).await?; + dry_run(db_tool.chain.clone(), output_db, to, from).await?; } Ok(()) @@ -48,7 +48,9 @@ async fn unwind_and_copy( output_db: &reth_db::mdbx::Env, ) -> eyre::Result<()> { let (from, to) = range; - let mut unwind_tx = Transaction::new(db_tool.db)?; + let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone()); + let mut provider = shareable_db.provider_rw()?; + let unwind = UnwindInput { unwind_to: from, checkpoint: StageCheckpoint::new(tip_block_number), @@ -58,20 +60,21 @@ async fn unwind_and_copy( reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; // Unwind hashes all the way to FROM - StorageHashingStage::default().unwind(&mut unwind_tx, unwind).await.unwrap(); - AccountHashingStage::default().unwind(&mut unwind_tx, unwind).await.unwrap(); - MerkleStage::default_unwind().unwind(&mut unwind_tx, unwind).await?; + StorageHashingStage::default().unwind(&mut provider, unwind).await.unwrap(); + AccountHashingStage::default().unwind(&mut provider, unwind).await.unwrap(); + + MerkleStage::default_unwind().unwind(&mut provider, unwind).await?; // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - reth_revm::Factory::new(MAINNET.clone()), + reth_revm::Factory::new(db_tool.chain.clone()), ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None }, ); exec_stage .unwind( - &mut unwind_tx, + &mut provider, UnwindInput { unwind_to: to, checkpoint: StageCheckpoint::new(tip_block_number), @@ -81,47 +84,48 @@ async fn unwind_and_copy( .await?; // Bring hashes to TO + AccountHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } - .execute(&mut unwind_tx, execute_input) + .execute(&mut provider, execute_input) .await .unwrap(); StorageHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } - .execute(&mut unwind_tx, execute_input) + .execute(&mut provider, execute_input) .await .unwrap(); - let unwind_inner_tx = unwind_tx.deref_mut(); + let unwind_inner_tx = provider.into_tx(); // TODO optimize we can actually just get the entries we need - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; + output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; - output_db.update(|tx| tx.import_table::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_table::(unwind_inner_tx))??; - output_db.update(|tx| tx.import_dupsort::(unwind_inner_tx))??; - - unwind_tx.drop()?; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; Ok(()) } /// Try to re-execute the stage straightaway -async fn dry_run( - output_db: reth_db::mdbx::Env, +async fn dry_run( + chain: Arc, + output_db: DB, to: u64, from: u64, ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - - let mut tx = Transaction::new(&output_db)?; + let shareable_db = ShareableDatabase::new(&output_db, chain); + let mut provider = shareable_db.provider_rw()?; let mut exec_output = false; while !exec_output { exec_output = MerkleStage::Execution { - clean_threshold: u64::MAX, /* Forces updating the root instead of calculating from + clean_threshold: u64::MAX, /* Forces updating the root instead of calculating + * from * scratch */ } .execute( - &mut tx, + &mut provider, reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)), @@ -131,8 +135,6 @@ async fn dry_run( .done; } - tx.drop()?; - info!(target: "reth::cli", "Success."); Ok(()) diff --git a/bin/reth/src/stage/dump/mod.rs b/bin/reth/src/stage/dump/mod.rs index 116a777ce..c749eb68e 100644 --- a/bin/reth/src/stage/dump/mod.rs +++ b/bin/reth/src/stage/dump/mod.rs @@ -106,7 +106,7 @@ impl Command { reth_db::mdbx::EnvKind::RW, )?; - let mut tool = DbTool::new(&db)?; + let mut tool = DbTool::new(&db, self.chain.clone())?; match &self.command { Stages::Execution(StageCommand { output_db, from, to, dry_run, .. }) => { diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 9f4291830..4bfcea361 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -12,7 +12,7 @@ use reth_beacon_consensus::BeaconConsensus; use reth_config::Config; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; use reth_primitives::ChainSpec; -use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase, Transaction}; +use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase}; use reth_staged_sync::utils::init::init_db; use reth_stages::{ stages::{ @@ -20,9 +20,9 @@ use reth_stages::{ IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, - ExecInput, ExecOutput, Stage, UnwindInput, + ExecInput, ExecOutput, PipelineError, Stage, UnwindInput, }; -use std::{any::Any, net::SocketAddr, ops::Deref, path::PathBuf, sync::Arc}; +use std::{any::Any, net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; /// `reth stage` command @@ -120,7 +120,8 @@ impl Command { info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path)?); - let mut tx = Transaction::new(db.as_ref())?; + let shareable_db = ShareableDatabase::new(&db, self.chain.clone()); + let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; if let Some(listen_addr) = self.metrics { info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); @@ -214,7 +215,8 @@ impl Command { assert!(exec_stage.type_id() == unwind_stage.type_id()); } - let checkpoint = get_stage_checkpoint(tx.deref(), exec_stage.id())?.unwrap_or_default(); + let checkpoint = + get_stage_checkpoint(provider_rw.tx_ref(), exec_stage.id())?.unwrap_or_default(); let unwind_stage = unwind_stage.as_mut().unwrap_or(&mut exec_stage); @@ -226,7 +228,7 @@ impl Command { if !self.skip_unwind { while unwind.checkpoint.block_number > self.from { - let unwind_output = unwind_stage.unwind(&mut tx, unwind).await?; + let unwind_output = unwind_stage.unwind(&mut provider_rw, unwind).await?; unwind.checkpoint = unwind_output.checkpoint; } } @@ -237,12 +239,13 @@ impl Command { }; while let ExecOutput { checkpoint: stage_progress, done: false } = - exec_stage.execute(&mut tx, input).await? + exec_stage.execute(&mut provider_rw, input).await? { input.checkpoint = Some(stage_progress); if self.commit { - tx.commit()?; + provider_rw.commit()?; + provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; } } diff --git a/bin/reth/src/stage/unwind.rs b/bin/reth/src/stage/unwind.rs index 42904bac0..12ba4e7ed 100644 --- a/bin/reth/src/stage/unwind.rs +++ b/bin/reth/src/stage/unwind.rs @@ -1,20 +1,21 @@ //! Unwinding a certain block range -use crate::dirs::{DataDirPath, MaybePlatformPath}; +use crate::{ + args::utils::genesis_value_parser, + dirs::{DataDirPath, MaybePlatformPath}, +}; use clap::{Parser, Subcommand}; use reth_db::{ + cursor::DbCursorRO, database::Database, mdbx::{Env, WriteMap}, tables, transaction::DbTx, }; use reth_primitives::{BlockHashOrNumber, ChainSpec}; -use reth_provider::Transaction; +use reth_provider::ShareableDatabase; use std::{ops::RangeInclusive, sync::Arc}; -use crate::args::utils::genesis_value_parser; -use reth_db::cursor::DbCursorRO; - /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -68,13 +69,14 @@ impl Command { eyre::bail!("Cannot unwind genesis block") } - let mut tx = Transaction::new(&db)?; + let shareable_db = ShareableDatabase::new(&db, self.chain.clone()); + let provider = shareable_db.provider_rw()?; - let blocks_and_execution = tx + let blocks_and_execution = provider .take_block_and_execution_range(&self.chain, range) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err:?}"))?; - tx.commit()?; + provider.commit()?; println!("Unwound {} blocks", blocks_and_execution.len()); diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 87fb8fda7..fc659cec3 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -11,10 +11,11 @@ use reth_interfaces::p2p::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, HeadersDirection, SealedHeader}; +use reth_primitives::{BlockHashOrNumber, ChainSpec, HeadersDirection, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, + sync::Arc, }; use tracing::info; @@ -58,12 +59,13 @@ where /// Wrapper over DB that implements many useful DB queries. pub struct DbTool<'a, DB: Database> { pub(crate) db: &'a DB, + pub(crate) chain: Arc, } impl<'a, DB: Database> DbTool<'a, DB> { /// Takes a DB where the tables have already been created. - pub(crate) fn new(db: &'a DB) -> eyre::Result { - Ok(Self { db }) + pub(crate) fn new(db: &'a DB, chain: Arc) -> eyre::Result { + Ok(Self { db, chain }) } /// Grabs the contents of the table within a certain index range and places the diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index f3a428f00..034275b33 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -22,7 +22,7 @@ use reth_provider::{ chain::{ChainSplit, SplitAt}, post_state::PostState, BlockNumProvider, CanonStateNotification, CanonStateNotificationSender, - CanonStateNotifications, Chain, ExecutorFactory, HeaderProvider, Transaction, + CanonStateNotifications, Chain, DatabaseProvider, ExecutorFactory, HeaderProvider, }; use std::{ collections::{BTreeMap, HashMap}, @@ -993,14 +993,18 @@ impl BlockchainTree /// Canonicalize the given chain and commit it to the database. fn commit_canonical(&mut self, chain: Chain) -> Result<(), Error> { - let mut tx = Transaction::new(&self.externals.db)?; + let mut provider = DatabaseProvider::new_rw( + self.externals.db.tx_mut()?, + self.externals.chain_spec.clone(), + ); let (blocks, state) = chain.into_inner(); - tx.append_blocks_with_post_state(blocks.into_blocks().collect(), state) + provider + .append_blocks_with_post_state(blocks.into_blocks().collect(), state) .map_err(|e| BlockExecutionError::CanonicalCommit { inner: e.to_string() })?; - tx.commit()?; + provider.commit()?; Ok(()) } @@ -1030,17 +1034,20 @@ impl BlockchainTree fn revert_canonical(&mut self, revert_until: BlockNumber) -> Result, Error> { // read data that is needed for new sidechain - let mut tx = Transaction::new(&self.externals.db)?; + let provider = DatabaseProvider::new_rw( + self.externals.db.tx_mut()?, + self.externals.chain_spec.clone(), + ); - let tip = tx.tip_number()?; + let tip = provider.last_block_number()?; let revert_range = (revert_until + 1)..=tip; info!(target: "blockchain_tree", "Unwinding canonical chain blocks: {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. - let blocks_and_execution = tx + let blocks_and_execution = provider .take_block_and_execution_range(self.externals.chain_spec.as_ref(), revert_range) .map_err(|e| BlockExecutionError::CanonicalRevert { inner: e.to_string() })?; - tx.commit()?; + provider.commit()?; if blocks_and_execution.is_empty() { Ok(None) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 54d9f992b..70f555555 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1287,7 +1287,6 @@ mod tests { use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET}; use reth_provider::{ providers::BlockchainProvider, test_utils::TestExecutorFactory, ShareableDatabase, - Transaction, }; use reth_stages::{test_utils::TestStages, ExecOutput, PipelineError, StageError}; use reth_tasks::TokioTaskExecutor; @@ -1384,7 +1383,7 @@ mod tests { let pipeline = Pipeline::builder() .add_stages(TestStages::new(pipeline_exec_outputs, Default::default())) .with_tip_sender(tip_tx) - .build(db.clone()); + .build(db.clone(), chain_spec.clone()); // Setup blockchain tree let externals = @@ -1436,7 +1435,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Err(StageError::ChannelClosed)]), Vec::default(), ); @@ -1465,7 +1464,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Err(StageError::ChannelClosed)]), Vec::default(), ); @@ -1505,7 +1504,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([ Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), Err(StageError::ChannelClosed), @@ -1538,7 +1537,7 @@ mod tests { .build(), ); let (mut consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { checkpoint: StageCheckpoint::new(max_block), done: true, @@ -1557,12 +1556,15 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, DB: Database>(db: &DB, mut blocks: impl Iterator) { - let mut transaction = Transaction::new(db).unwrap(); - blocks - .try_for_each(|b| transaction.insert_block(b.clone(), None)) - .expect("failed to insert"); - transaction.commit().unwrap(); + fn insert_blocks<'a, DB: Database>( + db: &DB, + chain: Arc, + mut blocks: impl Iterator, + ) { + let factory = ShareableDatabase::new(db, chain); + let mut provider = factory.provider_rw().unwrap(); + blocks.try_for_each(|b| provider.insert_block(b.clone(), None)).expect("failed to insert"); + provider.commit().unwrap(); } mod fork_choice_updated { @@ -1581,7 +1583,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1612,7 +1614,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1622,7 +1624,7 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); let block1 = random_block(1, Some(genesis.hash), None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis, &block1].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); env.db .update(|tx| { tx.put::( @@ -1660,7 +1662,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([ Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), @@ -1670,7 +1672,7 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); let block1 = random_block(1, Some(genesis.hash), None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis, &block1].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -1686,7 +1688,7 @@ mod tests { let invalid_rx = env.send_forkchoice_updated(next_forkchoice_state).await; // Insert next head immediately after sending forkchoice update - insert_blocks(env.db.as_ref(), [&next_head].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&next_head].into_iter()); let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); assert_matches!(invalid_rx, Ok(result) => assert_eq!(result, expected_result)); @@ -1709,7 +1711,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1719,7 +1721,7 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); let block1 = random_block(1, Some(genesis.hash), None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis, &block1].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); let engine = spawn_consensus_engine(consensus_engine); @@ -1746,7 +1748,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([ Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), @@ -1766,7 +1768,11 @@ mod tests { let mut block3 = random_block(1, Some(genesis.hash), None, Some(0)); block3.header.difficulty = U256::from(1); - insert_blocks(env.db.as_ref(), [&genesis, &block1, &block2, &block3].into_iter()); + insert_blocks( + env.db.as_ref(), + chain_spec.clone(), + [&genesis, &block1, &block2, &block3].into_iter(), + ); let _engine = spawn_consensus_engine(consensus_engine); @@ -1795,7 +1801,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([ Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0) }), @@ -1806,7 +1812,7 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); let block1 = random_block(1, Some(genesis.hash), None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis, &block1].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); let _engine = spawn_consensus_engine(consensus_engine); @@ -1842,7 +1848,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1875,7 +1881,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1886,7 +1892,11 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); let block1 = random_block(1, Some(genesis.hash), None, Some(0)); let block2 = random_block(2, Some(block1.hash), None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis, &block1, &block2].into_iter()); + insert_blocks( + env.db.as_ref(), + chain_spec.clone(), + [&genesis, &block1, &block2].into_iter(), + ); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -1921,7 +1931,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1931,7 +1941,7 @@ mod tests { let genesis = random_block(0, None, None, Some(0)); - insert_blocks(env.db.as_ref(), [&genesis].into_iter()); + insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis].into_iter()); let mut engine_rx = spawn_consensus_engine(consensus_engine); @@ -1978,7 +1988,7 @@ mod tests { .build(), ); let (consensus_engine, env) = setup_consensus_engine( - chain_spec, + chain_spec.clone(), VecDeque::from([Ok(ExecOutput { done: true, checkpoint: StageCheckpoint::new(0), @@ -1986,7 +1996,11 @@ mod tests { Vec::from([exec_result2]), ); - insert_blocks(env.db.as_ref(), [&data.genesis, &block1].into_iter()); + insert_blocks( + env.db.as_ref(), + chain_spec.clone(), + [&data.genesis, &block1].into_iter(), + ); let mut engine_rx = spawn_consensus_engine(consensus_engine); diff --git a/crates/staged-sync/Cargo.toml b/crates/staged-sync/Cargo.toml index 70d5e2ed5..b224a4a9d 100644 --- a/crates/staged-sync/Cargo.toml +++ b/crates/staged-sync/Cargo.toml @@ -19,6 +19,7 @@ reth-primitives = { path = "../../crates/primitives" } reth-provider = { path = "../../crates/storage/provider", features = ["test-utils"] } reth-net-nat = { path = "../../crates/net/nat" } reth-stages = { path = "../stages" } +reth-interfaces = { path = "../interfaces" } # io serde = "1.0" diff --git a/crates/staged-sync/src/utils/init.rs b/crates/staged-sync/src/utils/init.rs index e0a2342ac..323858049 100644 --- a/crates/staged-sync/src/utils/init.rs +++ b/crates/staged-sync/src/utils/init.rs @@ -6,7 +6,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{stage::StageId, Account, Bytecode, ChainSpec, H256, U256}; -use reth_provider::{PostState, Transaction, TransactionError}; +use reth_provider::{DatabaseProviderRW, PostState, ShareableDatabase, TransactionError}; use std::{path::Path, sync::Arc}; use tracing::debug; @@ -39,6 +39,10 @@ pub enum InitDatabaseError { /// Low-level database error. #[error(transparent)] DBError(#[from] reth_db::DatabaseError), + + /// Internal error. + #[error(transparent)] + InternalError(#[from] reth_interfaces::Error), } /// Write the genesis block if it has not already been written @@ -66,11 +70,11 @@ pub fn init_genesis( drop(tx); debug!("Writing genesis block."); - let tx = db.tx_mut()?; // use transaction to insert genesis header - let transaction = Transaction::new_raw(&db, tx); - insert_genesis_hashes(transaction, genesis)?; + let shareable_db = ShareableDatabase::new(&db, chain.clone()); + let provider_rw = shareable_db.provider_rw()?; + insert_genesis_hashes(provider_rw, genesis)?; // Insert header let tx = db.tx_mut()?; @@ -123,20 +127,21 @@ pub fn insert_genesis_state( /// Inserts hashes for the genesis state. pub fn insert_genesis_hashes( - mut transaction: Transaction<'_, DB>, + provider: DatabaseProviderRW<'_, &DB>, genesis: &reth_primitives::Genesis, ) -> Result<(), InitDatabaseError> { // insert and hash accounts to hashing table let alloc_accounts = genesis.alloc.clone().into_iter().map(|(addr, account)| (addr, Some(account.into()))); - transaction.insert_account_for_hashing(alloc_accounts)?; + provider.insert_account_for_hashing(alloc_accounts)?; let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { // only return Some if there is storage account.storage.map(|storage| (addr, storage.into_iter().map(|(k, v)| (k, v.into())))) }); - transaction.insert_storage_for_hashing(alloc_storage)?; - transaction.commit()?; + provider.insert_storage_for_hashing(alloc_storage)?; + provider.commit()?; + Ok(()) } diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index aa5a000fc..fb252ed2f 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -5,7 +5,8 @@ use criterion::{ use pprof::criterion::{Output, PProfProfiler}; use reth_db::mdbx::{Env, WriteMap}; use reth_interfaces::test_utils::TestConsensus; -use reth_primitives::stage::StageCheckpoint; +use reth_primitives::{stage::StageCheckpoint, MAINNET}; +use reth_provider::ShareableDatabase; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TotalDifficultyStage, TransactionLookupStage}, test_utils::TestTransaction, @@ -135,9 +136,10 @@ fn measure_stage_with_path( }, |_| async { let mut stage = stage.clone(); - let mut db_tx = tx.inner(); - stage.execute(&mut db_tx, input).await.unwrap(); - db_tx.commit().unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + stage.execute(&mut provider, input).await.unwrap(); + provider.commit().unwrap(); }, ) }); diff --git a/crates/stages/benches/setup/account_hashing.rs b/crates/stages/benches/setup/account_hashing.rs index 893e2c931..fa3492adf 100644 --- a/crates/stages/benches/setup/account_hashing.rs +++ b/crates/stages/benches/setup/account_hashing.rs @@ -63,8 +63,9 @@ fn generate_testdata_db(num_blocks: u64) -> (PathBuf, StageRange) { std::fs::create_dir_all(&path).unwrap(); println!("Account Hashing testdata not found, generating to {:?}", path.display()); let tx = TestTransaction::new(&path); - let mut tx = tx.inner(); - let _accounts = AccountHashingStage::seed(&mut tx, opts); + let mut provider = tx.inner(); + let _accounts = AccountHashingStage::seed(&mut provider, opts); + provider.commit().expect("failed to commit"); } (path, (ExecInput { target: Some(num_blocks), ..Default::default() }, UnwindInput::default())) } diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index ef7ce811a..5639a0983 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -9,7 +9,8 @@ use reth_interfaces::test_utils::generators::{ random_block_range, random_contract_account_range, random_eoa_account_range, random_transition_range, }; -use reth_primitives::{Account, Address, SealedBlock, H256}; +use reth_primitives::{Account, Address, SealedBlock, H256, MAINNET}; +use reth_provider::ShareableDatabase; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::TestTransaction, @@ -18,7 +19,6 @@ use reth_stages::{ use reth_trie::StateRoot; use std::{ collections::BTreeMap, - ops::Deref, path::{Path, PathBuf}, }; @@ -38,11 +38,12 @@ pub(crate) fn stage_unwind>>( tokio::runtime::Runtime::new().unwrap().block_on(async { let mut stage = stage.clone(); - let mut db_tx = tx.inner(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); // Clear previous run stage - .unwind(&mut db_tx, unwind) + .unwind(&mut provider, unwind) .await .map_err(|e| { format!( @@ -52,7 +53,7 @@ pub(crate) fn stage_unwind>>( }) .unwrap(); - db_tx.commit().unwrap(); + provider.commit().unwrap(); }); } @@ -65,18 +66,19 @@ pub(crate) fn unwind_hashes>>( tokio::runtime::Runtime::new().unwrap().block_on(async { let mut stage = stage.clone(); - let mut db_tx = tx.inner(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); - StorageHashingStage::default().unwind(&mut db_tx, unwind).await.unwrap(); - AccountHashingStage::default().unwind(&mut db_tx, unwind).await.unwrap(); + StorageHashingStage::default().unwind(&mut provider, unwind).await.unwrap(); + AccountHashingStage::default().unwind(&mut provider, unwind).await.unwrap(); // Clear previous run - stage.unwind(&mut db_tx, unwind).await.unwrap(); + stage.unwind(&mut provider, unwind).await.unwrap(); - AccountHashingStage::default().execute(&mut db_tx, input).await.unwrap(); - StorageHashingStage::default().execute(&mut db_tx, input).await.unwrap(); + AccountHashingStage::default().execute(&mut provider, input).await.unwrap(); + StorageHashingStage::default().execute(&mut provider, input).await.unwrap(); - db_tx.commit().unwrap(); + provider.commit().unwrap(); }); } @@ -121,7 +123,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { tx.insert_accounts_and_storages(start_state.clone()).unwrap(); // make first block after genesis have valid state root - let (root, updates) = StateRoot::new(tx.inner().deref()).root_with_updates().unwrap(); + let (root, updates) = StateRoot::new(tx.inner().tx_ref()).root_with_updates().unwrap(); let second_block = blocks.get_mut(1).unwrap(); let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); @@ -142,8 +144,8 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { // make last block have valid state root let root = { - let mut tx_mut = tx.inner(); - let root = StateRoot::new(tx_mut.deref()).root().unwrap(); + let tx_mut = tx.inner(); + let root = StateRoot::new(tx_mut.tx_ref()).root().unwrap(); tx_mut.commit().unwrap(); root }; diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index dd2ca86c9..b05b091db 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -66,6 +66,9 @@ pub enum StageError { /// rely on external downloaders #[error("Invalid download response: {0}")] Download(#[from] DownloadError), + /// Internal error + #[error(transparent)] + Internal(#[from] reth_interfaces::Error), /// The stage encountered a recoverable error. /// /// These types of errors are caught by the [Pipeline][crate::Pipeline] and trigger a restart @@ -104,6 +107,9 @@ pub enum PipelineError { /// The pipeline encountered a database error. #[error("A database error occurred.")] Database(#[from] DbError), + /// The pipeline encountered an irrecoverable error in one of the stages. + #[error("An interface error occurred.")] + Interface(#[from] reth_interfaces::Error), /// The pipeline encountered an error while trying to send an event. #[error("The pipeline encountered an error while trying to send an event.")] Channel(#[from] SendError), diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 80da43b24..14b31668e 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -20,7 +20,7 @@ //! //! ``` //! # use std::sync::Arc; -//! use reth_db::mdbx::test_utils::create_test_rw_db; +//! # use reth_db::mdbx::test_utils::create_test_rw_db; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::consensus::Consensus; @@ -51,7 +51,7 @@ //! .add_stages( //! DefaultStages::new(HeaderSyncMode::Tip(tip_rx), consensus, headers_downloader, bodies_downloader, factory) //! ) -//! .build(db); +//! .build(db, MAINNET.clone()); //! ``` mod error; mod pipeline; diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages/src/pipeline/builder.rs index 6994ebe57..3cedb35b0 100644 --- a/crates/stages/src/pipeline/builder.rs +++ b/crates/stages/src/pipeline/builder.rs @@ -1,6 +1,8 @@ +use std::sync::Arc; + use crate::{pipeline::BoxedStage, Pipeline, Stage, StageSet}; use reth_db::database::Database; -use reth_primitives::{stage::StageId, BlockNumber, H256}; +use reth_primitives::{stage::StageId, BlockNumber, ChainSpec, H256}; use tokio::sync::watch; /// Builds a [`Pipeline`]. @@ -61,10 +63,11 @@ where /// Builds the final [`Pipeline`] using the given database. /// /// Note: it's expected that this is either an [Arc](std::sync::Arc) or an Arc wrapper type. - pub fn build(self, db: DB) -> Pipeline { + pub fn build(self, db: DB, chain_spec: Arc) -> Pipeline { let Self { stages, max_block, tip_tx } = self; Pipeline { db, + chain_spec, stages, max_block, tip_tx, diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index 2fd3611bd..6586365e8 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -4,10 +4,10 @@ use reth_db::database::Database; use reth_interfaces::executor::BlockExecutionError; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, listener::EventListeners, stage::StageId, - BlockNumber, H256, + BlockNumber, ChainSpec, H256, }; -use reth_provider::{providers::get_stage_checkpoint, Transaction}; -use std::pin::Pin; +use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase}; +use std::{pin::Pin, sync::Arc}; use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -93,6 +93,8 @@ pub type PipelineWithResult = (Pipeline, Result { /// The Database db: DB, + /// Chain spec + chain_spec: Arc, /// All configured stages in the order they will be executed. stages: Vec>, /// The maximum block number to sync to. @@ -245,14 +247,15 @@ where // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); - let mut tx = Transaction::new(&self.db)?; + let shareable_db = ShareableDatabase::new(&self.db, self.chain_spec.clone()); + let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; for stage in unwind_pipeline { let stage_id = stage.id(); let span = info_span!("Unwinding", stage = %stage_id); let _enter = span.enter(); - let mut checkpoint = tx.get_stage_checkpoint(stage_id)?.unwrap_or_default(); + let mut checkpoint = provider_rw.get_stage_checkpoint(stage_id)?.unwrap_or_default(); if checkpoint.block_number < to { debug!(target: "sync::pipeline", from = %checkpoint, %to, "Unwind point too far for stage"); self.listeners.notify(PipelineEvent::Skipped { stage_id }); @@ -264,7 +267,7 @@ where let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; self.listeners.notify(PipelineEvent::Unwinding { stage_id, input }); - let output = stage.unwind(&mut tx, input).await; + let output = stage.unwind(&mut provider_rw, input).await; match output { Ok(unwind_output) => { checkpoint = unwind_output.checkpoint; @@ -282,12 +285,14 @@ where // doesn't change when we unwind. None, ); - tx.save_stage_checkpoint(stage_id, checkpoint)?; + provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; self.listeners .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); - tx.commit()?; + provider_rw.commit()?; + provider_rw = + shareable_db.provider_rw().map_err(PipelineError::Interface)?; } Err(err) => { self.listeners.notify(PipelineEvent::Error { stage_id }); @@ -312,10 +317,11 @@ where let mut made_progress = false; let target = self.max_block.or(previous_stage); - loop { - let mut tx = Transaction::new(&self.db)?; + let shareable_db = ShareableDatabase::new(&self.db, self.chain_spec.clone()); + let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; - let prev_checkpoint = tx.get_stage_checkpoint(stage_id)?; + loop { + let prev_checkpoint = provider_rw.get_stage_checkpoint(stage_id)?; let stage_reached_max_block = prev_checkpoint .zip(self.max_block) @@ -343,7 +349,10 @@ where checkpoint: prev_checkpoint, }); - match stage.execute(&mut tx, ExecInput { target, checkpoint: prev_checkpoint }).await { + match stage + .execute(&mut provider_rw, ExecInput { target, checkpoint: prev_checkpoint }) + .await + { Ok(out @ ExecOutput { checkpoint, done }) => { made_progress |= checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; @@ -356,7 +365,7 @@ where "Stage committed progress" ); self.metrics.stage_checkpoint(stage_id, checkpoint, target); - tx.save_stage_checkpoint(stage_id, checkpoint)?; + provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; self.listeners.notify(PipelineEvent::Ran { pipeline_position: stage_index + 1, @@ -366,7 +375,8 @@ where }); // TODO: Make the commit interval configurable - tx.commit()?; + provider_rw.commit()?; + provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?; if done { let stage_progress = checkpoint.block_number; @@ -466,7 +476,7 @@ mod tests { use reth_interfaces::{ consensus, provider::ProviderError, test_utils::generators::random_header, }; - use reth_primitives::stage::StageCheckpoint; + use reth_primitives::{stage::StageCheckpoint, MAINNET}; use tokio_stream::StreamExt; #[test] @@ -511,7 +521,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db); + .build(db, MAINNET.clone()); let events = pipeline.events(); // Run pipeline @@ -573,7 +583,7 @@ mod tests { .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })), ) .with_max_block(10) - .build(db); + .build(db, MAINNET.clone()); let events = pipeline.events(); // Run pipeline @@ -683,7 +693,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db); + .build(db, MAINNET.clone()); let events = pipeline.events(); // Run pipeline @@ -776,7 +786,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db); + .build(db, MAINNET.clone()); let events = pipeline.events(); // Run pipeline @@ -859,7 +869,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db); + .build(db, MAINNET.clone()); let result = pipeline.run().await; assert_matches!(result, Ok(())); @@ -869,7 +879,7 @@ mod tests { .add_stage(TestStage::new(StageId::Other("Fatal")).add_exec(Err( StageError::DatabaseIntegrity(ProviderError::BlockBodyIndicesNotFound(5)), ))) - .build(db); + .build(db, MAINNET.clone()); let result = pipeline.run().await; assert_matches!( result, diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 15c927bf2..ee4858eaa 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -20,7 +20,7 @@ //! # let db = create_test_rw_db(); //! // Build a pipeline with all offline stages. //! # let pipeline = -//! Pipeline::builder().add_stages(OfflineStages::new(factory)).build(db); +//! Pipeline::builder().add_stages(OfflineStages::new(factory)).build(db, MAINNET.clone()); //! ``` //! //! ```ignore diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 72b4da1fc..a7de48499 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -5,7 +5,7 @@ use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, TxNumber, }; -use reth_provider::{ProviderError, Transaction}; +use reth_provider::{DatabaseProviderRW, ProviderError}; use std::{ cmp::{max, min}, ops::RangeInclusive, @@ -75,11 +75,12 @@ impl ExecInput { /// the number of transactions exceeds the threshold. pub fn next_block_range_with_transaction_threshold( &self, - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, DB>, tx_threshold: u64, ) -> Result<(RangeInclusive, RangeInclusive, bool), StageError> { let start_block = self.next_block(); - let start_block_body = tx + let start_block_body = provider + .tx_ref() .get::(start_block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(start_block))?; @@ -88,7 +89,8 @@ impl ExecInput { let first_tx_number = start_block_body.first_tx_num(); let mut last_tx_number = start_block_body.last_tx_num(); let mut end_block_number = start_block; - let mut body_indices_cursor = tx.cursor_read::()?; + let mut body_indices_cursor = + provider.tx_ref().cursor_read::()?; for entry in body_indices_cursor.walk_range(start_block..=target_block)? { let (block, body) = entry?; last_tx_number = body.last_tx_num(); @@ -171,8 +173,7 @@ pub struct UnwindOutput { /// /// Stages are executed as part of a pipeline where they are executed serially. /// -/// Stages receive [`Transaction`] which manages the lifecycle of a transaction, -/// such as when to commit / reopen a new one etc. +/// Stages receive [`DatabaseProviderRW`]. #[async_trait] pub trait Stage: Send + Sync { /// Get the ID of the stage. @@ -183,14 +184,14 @@ pub trait Stage: Send + Sync { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result; /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result; } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 7ce550495..0108f7828 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -13,8 +13,8 @@ use reth_interfaces::{ p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}, }; use reth_primitives::stage::{EntitiesCheckpoint, StageCheckpoint, StageId}; -use reth_provider::Transaction; -use std::{ops::Deref, sync::Arc}; +use reth_provider::DatabaseProviderRW; +use std::sync::Arc; use tracing::*; // TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) @@ -67,7 +67,7 @@ impl Stage for BodyStage { /// header, limited by the stage's batch size. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -80,6 +80,7 @@ impl Stage for BodyStage { let (from_block, to_block) = range.into_inner(); // Cursors used to write bodies, ommers and transactions + let tx = provider.tx_ref(); let mut block_indices_cursor = tx.cursor_write::()?; let mut tx_cursor = tx.cursor_write::()?; let mut tx_block_cursor = tx.cursor_write::()?; @@ -154,7 +155,7 @@ impl Stage for BodyStage { let done = highest_block == to_block; Ok(ExecOutput { checkpoint: StageCheckpoint::new(highest_block) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), done, }) } @@ -162,9 +163,10 @@ impl Stage for BodyStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { + let tx = provider.tx_ref(); // Cursors to unwind bodies, ommers let mut body_cursor = tx.cursor_write::()?; let mut transaction_cursor = tx.cursor_write::()?; @@ -210,7 +212,7 @@ impl Stage for BodyStage { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), }) } } @@ -219,11 +221,11 @@ impl Stage for BodyStage { // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 0a9d08f9b..f6b40ee05 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -19,7 +19,8 @@ use reth_primitives::{ Block, BlockNumber, BlockWithSenders, Header, TransactionSigned, U256, }; use reth_provider::{ - post_state::PostState, BlockExecutor, ExecutorFactory, LatestStateProviderRef, Transaction, + post_state::PostState, BlockExecutor, BlockProvider, DatabaseProviderRW, ExecutorFactory, + HeaderProvider, LatestStateProviderRef, ProviderError, WithdrawalsProvider, }; use std::{ops::RangeInclusive, time::Instant}; use tracing::*; @@ -83,22 +84,26 @@ impl ExecutionStage { Self::new(executor_factory, ExecutionStageThresholds::default()) } - // TODO: This should be in the block provider trait once we consolidate - // SharedDatabase/Transaction + // TODO(joshie): This should be in the block provider trait once we consolidate fn read_block_with_senders( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, block_number: BlockNumber, ) -> Result<(BlockWithSenders, U256), StageError> { - let header = tx.get_header(block_number)?; - let td = tx.get_td(block_number)?; - let ommers = tx.get::(block_number)?.unwrap_or_default().ommers; - let withdrawals = tx.get::(block_number)?.map(|v| v.withdrawals); + let header = provider + .header_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + let td = provider + .header_td_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + let ommers = provider.ommers(block_number.into())?.unwrap_or_default(); + let withdrawals = provider.withdrawals_by_block(block_number.into(), header.timestamp)?; // Get the block body - let body = tx.get::(block_number)?.unwrap(); + let body = provider.block_body_indices(block_number)?; let tx_range = body.tx_num_range(); // Get the transactions in the body + let tx = provider.tx_ref(); let (transactions, senders) = if tx_range.is_empty() { (Vec::new(), Vec::new()) } else { @@ -135,7 +140,7 @@ impl ExecutionStage { /// Execute the stage. pub fn execute_inner( &self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -146,17 +151,18 @@ impl ExecutionStage { let max_block = input.target(); // Build executor - let mut executor = self.executor_factory.with_sp(LatestStateProviderRef::new(&**tx)); + let mut executor = + self.executor_factory.with_sp(LatestStateProviderRef::new(provider.tx_ref())); // Progress tracking let mut stage_progress = start_block; let mut stage_checkpoint = - execution_checkpoint(tx, start_block, max_block, input.checkpoint())?; + execution_checkpoint(provider, start_block, max_block, input.checkpoint())?; // Execute block range let mut state = PostState::default(); for block_number in start_block..=max_block { - let (block, td) = Self::read_block_with_senders(tx, block_number)?; + let (block, td) = Self::read_block_with_senders(provider, block_number)?; // Configure the executor to use the current state. trace!(target: "sync::stages::execution", number = block_number, txs = block.body.len(), "Executing block"); @@ -190,7 +196,7 @@ impl ExecutionStage { // Write remaining changes trace!(target: "sync::stages::execution", accounts = state.accounts().len(), "Writing updated state to database"); let start = Instant::now(); - state.write_to_db(&**tx)?; + state.write_to_db(provider.tx_ref())?; trace!(target: "sync::stages::execution", took = ?start.elapsed(), "Wrote state"); let done = stage_progress == max_block; @@ -203,7 +209,7 @@ impl ExecutionStage { } fn execution_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -225,7 +231,7 @@ fn execution_checkpoint( block_range: CheckpointBlockRange { from: start_block, to: max_block }, progress: EntitiesCheckpoint { processed, - total: total + calculate_gas_used_from_headers(tx, start_block..=max_block)?, + total: total + calculate_gas_used_from_headers(provider, start_block..=max_block)?, }, }, // If checkpoint block range ends on the same block as our range, we take the previously @@ -242,7 +248,7 @@ fn execution_checkpoint( // to be processed not including the checkpoint range. Some(ExecutionCheckpoint { progress: EntitiesCheckpoint { processed, .. }, .. }) => { let after_checkpoint_block_number = - calculate_gas_used_from_headers(tx, checkpoint.block_number + 1..=max_block)?; + calculate_gas_used_from_headers(provider, checkpoint.block_number + 1..=max_block)?; ExecutionCheckpoint { block_range: CheckpointBlockRange { from: start_block, to: max_block }, @@ -255,14 +261,14 @@ fn execution_checkpoint( // Otherwise, we recalculate the whole stage checkpoint including the amount of gas // already processed, if there's any. _ => { - let processed = calculate_gas_used_from_headers(tx, 0..=start_block - 1)?; + let processed = calculate_gas_used_from_headers(provider, 0..=start_block - 1)?; ExecutionCheckpoint { block_range: CheckpointBlockRange { from: start_block, to: max_block }, progress: EntitiesCheckpoint { processed, total: processed + - calculate_gas_used_from_headers(tx, start_block..=max_block)?, + calculate_gas_used_from_headers(provider, start_block..=max_block)?, }, } } @@ -270,13 +276,13 @@ fn execution_checkpoint( } fn calculate_gas_used_from_headers( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, range: RangeInclusive, ) -> Result { let mut gas_total = 0; let start = Instant::now(); - for entry in tx.cursor_read::()?.walk_range(range.clone())? { + for entry in provider.tx_ref().cursor_read::()?.walk_range(range.clone())? { let (_, Header { gas_used, .. }) = entry?; gas_total += gas_used; } @@ -304,7 +310,7 @@ impl Stage for ExecutionStage { /// Execute the stage async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { // For Ethereum transactions that reaches the max call depth (1024) revm can use more stack @@ -321,7 +327,7 @@ impl Stage for ExecutionStage { .stack_size(BIG_STACK_SIZE) .spawn_scoped(scope, || { // execute and store output to results - self.execute_inner(tx, input) + self.execute_inner(provider, input) }) .expect("Expects that thread name is not null"); handle.join().expect("Expects for thread to not panic") @@ -331,9 +337,10 @@ impl Stage for ExecutionStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { + let tx = provider.tx_ref(); // Acquire changeset cursors let mut account_changeset = tx.cursor_dup_write::()?; let mut storage_changeset = tx.cursor_dup_write::()?; @@ -382,7 +389,7 @@ impl Stage for ExecutionStage { } // Discard unwinded changesets - tx.unwind_table_by_num::(unwind_to)?; + provider.unwind_table_by_num::(unwind_to)?; let mut rev_storage_changeset_walker = storage_changeset.walk_back(None)?; while let Some((key, _)) = rev_storage_changeset_walker.next().transpose()? { @@ -394,7 +401,7 @@ impl Stage for ExecutionStage { } // Look up the start index for the transaction range - let first_tx_num = tx.block_body_indices(*range.start())?.first_tx_num(); + let first_tx_num = provider.block_body_indices(*range.start())?.first_tx_num(); let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); @@ -461,15 +468,12 @@ mod tests { }; use reth_primitives::{ hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode, - ChainSpecBuilder, SealedBlock, StorageEntry, H160, H256, U256, + ChainSpecBuilder, SealedBlock, StorageEntry, H160, H256, MAINNET, U256, }; - use reth_provider::insert_canonical_block; + use reth_provider::{insert_canonical_block, ShareableDatabase}; use reth_revm::Factory; use reth_rlp::Decodable; - use std::{ - ops::{Deref, DerefMut}, - sync::Arc, - }; + use std::sync::Arc; fn stage() -> ExecutionStage { let factory = @@ -483,7 +487,8 @@ mod tests { #[test] fn execution_checkpoint_matches() { let state_db = create_test_db::(EnvKind::RW); - let tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let tx = db.provider_rw().unwrap(); let previous_stage_checkpoint = ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 0, to: 0 }, @@ -507,15 +512,16 @@ mod tests { #[test] fn execution_checkpoint_precedes() { let state_db = create_test_db::(EnvKind::RW); - let mut tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let mut provider = db.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); let previous_stage_checkpoint = ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 0, to: 0 }, @@ -526,7 +532,8 @@ mod tests { stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)), }; - let stage_checkpoint = execution_checkpoint(&tx, 1, 1, previous_checkpoint); + let provider = db.provider_rw().unwrap(); + let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint); assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 1, to: 1 }, @@ -541,15 +548,16 @@ mod tests { #[test] fn execution_checkpoint_recalculate_full_previous_some() { let state_db = create_test_db::(EnvKind::RW); - let mut tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let mut provider = db.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); let previous_stage_checkpoint = ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 0, to: 0 }, @@ -560,7 +568,8 @@ mod tests { stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)), }; - let stage_checkpoint = execution_checkpoint(&tx, 1, 1, previous_checkpoint); + let provider = db.provider_rw().unwrap(); + let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint); assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 1, to: 1 }, @@ -575,19 +584,21 @@ mod tests { #[test] fn execution_checkpoint_recalculate_full_previous_none() { let state_db = create_test_db::(EnvKind::RW); - let mut tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let mut provider = db.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); let previous_checkpoint = StageCheckpoint { block_number: 1, stage_checkpoint: None }; - let stage_checkpoint = execution_checkpoint(&tx, 1, 1, previous_checkpoint); + let provider = db.provider_rw().unwrap(); + let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint); assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint { block_range: CheckpointBlockRange { from: 1, to: 1 }, @@ -603,7 +614,8 @@ mod tests { // TODO cleanup the setup after https://github.com/paradigmxyz/reth/issues/332 // is merged as it has similar framework let state_db = create_test_db::(EnvKind::RW); - let mut tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let mut provider = db.provider_rw().unwrap(); let input = ExecInput { target: Some(1), /// The progress of this stage the last time it was executed. @@ -613,12 +625,13 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); // insert pre state - let db_tx = tx.deref_mut(); + let mut provider = db.provider_rw().unwrap(); + let db_tx = provider.tx_mut(); let acc1 = H160(hex!("1000000000000000000000000000000000000000")); let acc2 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); let code = hex!("5a465a905090036002900360015500"); @@ -637,11 +650,12 @@ mod tests { ) .unwrap(); db_tx.put::(code_hash, Bytecode::new_raw(code.to_vec().into())).unwrap(); - tx.commit().unwrap(); + provider.commit().unwrap(); + let mut provider = db.provider_rw().unwrap(); let mut execution_stage = stage(); - let output = execution_stage.execute(&mut tx, input).await.unwrap(); - tx.commit().unwrap(); + let output = execution_stage.execute(&mut provider, input).await.unwrap(); + provider.commit().unwrap(); assert_matches!(output, ExecOutput { checkpoint: StageCheckpoint { block_number: 1, @@ -658,7 +672,8 @@ mod tests { }, done: true } if processed == total && total == block.gas_used); - let tx = tx.deref_mut(); + let mut provider = db.provider_rw().unwrap(); + let tx = provider.tx_mut(); // check post state let account1 = H160(hex!("1000000000000000000000000000000000000000")); let account1_info = @@ -707,7 +722,8 @@ mod tests { // is merged as it has similar framework let state_db = create_test_db::(EnvKind::RW); - let mut tx = Transaction::new(state_db.as_ref()).unwrap(); + let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone()); + let mut provider = db.provider_rw().unwrap(); let input = ExecInput { target: Some(1), /// The progress of this stage the last time it was executed. @@ -717,16 +733,17 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); // variables let code = hex!("5a465a905090036002900360015500"); let balance = U256::from(0x3635c9adc5dea00000u128); let code_hash = keccak256(code); // pre state - let db_tx = tx.deref_mut(); + let mut provider = db.provider_rw().unwrap(); + let db_tx = provider.tx_mut(); let acc1 = H160(hex!("1000000000000000000000000000000000000000")); let acc1_info = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }; let acc2 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); @@ -735,17 +752,19 @@ mod tests { db_tx.put::(acc1, acc1_info).unwrap(); db_tx.put::(acc2, acc2_info).unwrap(); db_tx.put::(code_hash, Bytecode::new_raw(code.to_vec().into())).unwrap(); - tx.commit().unwrap(); + provider.commit().unwrap(); // execute + let mut provider = db.provider_rw().unwrap(); let mut execution_stage = stage(); - let result = execution_stage.execute(&mut tx, input).await.unwrap(); - tx.commit().unwrap(); + let result = execution_stage.execute(&mut provider, input).await.unwrap(); + provider.commit().unwrap(); + let mut provider = db.provider_rw().unwrap(); let mut stage = stage(); let result = stage .unwind( - &mut tx, + &mut provider, UnwindInput { checkpoint: result.checkpoint, unwind_to: 0, bad_block: None }, ) .await @@ -768,7 +787,7 @@ mod tests { } if total == block.gas_used); // assert unwind stage - let db_tx = tx.deref(); + let db_tx = provider.tx_ref(); assert_eq!( db_tx.get::(acc1), Ok(Some(acc1_info)), @@ -793,7 +812,8 @@ mod tests { #[tokio::test] async fn test_selfdestruct() { let test_tx = TestTransaction::default(); - let mut tx = test_tx.inner(); + let factory = ShareableDatabase::new(test_tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); let input = ExecInput { target: Some(1), /// The progress of this stage the last time it was executed. @@ -803,9 +823,9 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - insert_canonical_block(tx.deref_mut(), genesis, None).unwrap(); - insert_canonical_block(tx.deref_mut(), block.clone(), None).unwrap(); - tx.commit().unwrap(); + insert_canonical_block(provider.tx_mut(), genesis, None).unwrap(); + insert_canonical_block(provider.tx_mut(), block.clone(), None).unwrap(); + provider.commit().unwrap(); // variables let caller_address = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); @@ -817,50 +837,60 @@ mod tests { let code_hash = keccak256(code); // pre state - let db_tx = tx.deref_mut(); let caller_info = Account { nonce: 0, balance, bytecode_hash: None }; let destroyed_info = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }; // set account - db_tx.put::(caller_address, caller_info).unwrap(); - db_tx.put::(destroyed_address, destroyed_info).unwrap(); - db_tx.put::(code_hash, Bytecode::new_raw(code.to_vec().into())).unwrap(); + let provider = factory.provider_rw().unwrap(); + provider.tx_ref().put::(caller_address, caller_info).unwrap(); + provider + .tx_ref() + .put::(destroyed_address, destroyed_info) + .unwrap(); + provider + .tx_ref() + .put::(code_hash, Bytecode::new_raw(code.to_vec().into())) + .unwrap(); // set storage to check when account gets destroyed. - db_tx + provider + .tx_ref() .put::( destroyed_address, StorageEntry { key: H256::zero(), value: U256::ZERO }, ) .unwrap(); - db_tx + provider + .tx_ref() .put::( destroyed_address, StorageEntry { key: H256::from_low_u64_be(1), value: U256::from(1u64) }, ) .unwrap(); - tx.commit().unwrap(); + provider.commit().unwrap(); // execute + let mut provider = factory.provider_rw().unwrap(); let mut execution_stage = stage(); - let _ = execution_stage.execute(&mut tx, input).await.unwrap(); - tx.commit().unwrap(); + let _ = execution_stage.execute(&mut provider, input).await.unwrap(); + provider.commit().unwrap(); // assert unwind stage + let provider = factory.provider_rw().unwrap(); assert_eq!( - tx.deref().get::(destroyed_address), + provider.tx_ref().get::(destroyed_address), Ok(None), "Account was destroyed" ); assert_eq!( - tx.deref().get::(destroyed_address), + provider.tx_ref().get::(destroyed_address), Ok(None), "There is storage for destroyed account" ); // drops tx so that it returns write privilege to test_tx - drop(tx); + drop(provider); let plain_accounts = test_tx.table::().unwrap(); let plain_storage = test_tx.table::().unwrap(); diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index e5f4f9218..bae21c8c7 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -1,7 +1,7 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; use reth_primitives::stage::{StageCheckpoint, StageId}; -use reth_provider::Transaction; +use reth_provider::DatabaseProviderRW; /// The finish stage. /// @@ -18,7 +18,7 @@ impl Stage for FinishStage { async fn execute( &mut self, - _tx: &mut Transaction<'_, DB>, + _provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) @@ -26,7 +26,7 @@ impl Stage for FinishStage { async fn unwind( &mut self, - _tx: &mut Transaction<'_, DB>, + _provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 0ac772502..ab56a3398 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -16,11 +16,11 @@ use reth_primitives::{ StageId, }, }; -use reth_provider::Transaction; +use reth_provider::{AccountExtProvider, DatabaseProviderRW}; use std::{ cmp::max, fmt::Debug, - ops::{Deref, Range, RangeInclusive}, + ops::{Range, RangeInclusive}, }; use tokio::sync::mpsc; use tracing::*; @@ -79,7 +79,7 @@ impl AccountHashingStage { /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSet` table. pub fn seed( - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, DB>, opts: SeedOpts, ) -> Result, StageError> { use reth_db::models::AccountBeforeTx; @@ -92,18 +92,20 @@ impl AccountHashingStage { let blocks = random_block_range(opts.blocks.clone(), H256::zero(), opts.txs); for block in blocks { - insert_canonical_block(&**tx, block, None).unwrap(); + insert_canonical_block(provider.tx_ref(), block, None).unwrap(); } let mut accounts = random_eoa_account_range(opts.accounts); { // Account State generator - let mut account_cursor = tx.cursor_write::()?; + let mut account_cursor = + provider.tx_ref().cursor_write::()?; accounts.sort_by(|a, b| a.0.cmp(&b.0)); for (addr, acc) in accounts.iter() { account_cursor.append(*addr, *acc)?; } - let mut acc_changeset_cursor = tx.cursor_write::()?; + let mut acc_changeset_cursor = + provider.tx_ref().cursor_write::()?; for (t, (addr, acc)) in (opts.blocks).zip(&accounts) { let Account { nonce, balance, .. } = acc; let prev_acc = Account { @@ -116,8 +118,6 @@ impl AccountHashingStage { } } - tx.commit()?; - Ok(accounts) } } @@ -132,7 +132,7 @@ impl Stage for AccountHashingStage { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -146,6 +146,7 @@ impl Stage for AccountHashingStage { // AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as // genesis accounts are not in changeset. if to_block - from_block > self.clean_threshold || from_block == 1 { + let tx = provider.tx_ref(); let stage_checkpoint = input .checkpoint .and_then(|checkpoint| checkpoint.account_hashing_stage_checkpoint()); @@ -231,7 +232,7 @@ impl Stage for AccountHashingStage { AccountHashingCheckpoint { address: Some(next_address.key().unwrap()), block_range: CheckpointBlockRange { from: from_block, to: to_block }, - progress: stage_checkpoint_progress(tx)?, + progress: stage_checkpoint_progress(provider)?, }, ); @@ -240,20 +241,20 @@ impl Stage for AccountHashingStage { } else { // Aggregate all transition changesets and make a list of accounts that have been // changed. - let lists = tx.get_addresses_of_changed_accounts(from_block..=to_block)?; + let lists = provider.changed_accounts_with_range(from_block..=to_block)?; // Iterate over plain state and get newest value. // Assumption we are okay to make is that plainstate represent // `previous_stage_progress` state. - let accounts = tx.get_plainstate_accounts(lists)?; + let accounts = provider.basic_accounts(lists)?; // Insert and hash accounts to hashing table - tx.insert_account_for_hashing(accounts.into_iter())?; + provider.insert_account_for_hashing(accounts.into_iter())?; } // We finished the hashing stage, no future iterations is expected for the same block range, // so no checkpoint is needed. let checkpoint = StageCheckpoint::new(input.target()) .with_account_hashing_stage_checkpoint(AccountHashingCheckpoint { - progress: stage_checkpoint_progress(tx)?, + progress: stage_checkpoint_progress(provider)?, ..Default::default() }); @@ -263,19 +264,19 @@ impl Stage for AccountHashingStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); // Aggregate all transition changesets and make a list of accounts that have been changed. - tx.unwind_account_hashing(range)?; + provider.unwind_account_hashing(range)?; let mut stage_checkpoint = input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default(); - stage_checkpoint.progress = stage_checkpoint_progress(tx)?; + stage_checkpoint.progress = stage_checkpoint_progress(provider)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) @@ -285,11 +286,11 @@ impl Stage for AccountHashingStage { } fn stage_checkpoint_progress( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } @@ -531,11 +532,14 @@ mod tests { type Seed = Vec<(Address, Account)>; fn seed_execution(&mut self, input: ExecInput) -> Result { - Ok(AccountHashingStage::seed( - &mut self.tx.inner(), + let mut provider = self.tx.inner(); + let res = Ok(AccountHashingStage::seed( + &mut provider, SeedOpts { blocks: 1..=input.target(), accounts: 0..10, txs: 0..3 }, ) - .unwrap()) + .unwrap()); + provider.commit().expect("failed to commit"); + res } fn validate_execution( diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index d8dfc0054..acb109b0e 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -16,8 +16,8 @@ use reth_primitives::{ }, StorageEntry, }; -use reth_provider::Transaction; -use std::{collections::BTreeMap, fmt::Debug, ops::Deref}; +use reth_provider::DatabaseProviderRW; +use std::{collections::BTreeMap, fmt::Debug}; use tracing::*; /// Storage hashing stage hashes plain storage. @@ -54,9 +54,10 @@ impl Stage for StorageHashingStage { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { + let tx = provider.tx_ref(); if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -161,7 +162,7 @@ impl Stage for StorageHashingStage { address: current_key, storage: current_subkey, block_range: CheckpointBlockRange { from: from_block, to: to_block }, - progress: stage_checkpoint_progress(tx)?, + progress: stage_checkpoint_progress(provider)?, }, ); @@ -170,19 +171,20 @@ impl Stage for StorageHashingStage { } else { // Aggregate all changesets and and make list of storages that have been // changed. - let lists = tx.get_addresses_and_keys_of_changed_storages(from_block..=to_block)?; + let lists = + provider.get_addresses_and_keys_of_changed_storages(from_block..=to_block)?; // iterate over plain state and get newest storage value. // Assumption we are okay with is that plain state represent // `previous_stage_progress` state. - let storages = tx.get_plainstate_storages(lists)?; - tx.insert_storage_for_hashing(storages.into_iter())?; + let storages = provider.get_plainstate_storages(lists)?; + provider.insert_storage_for_hashing(storages.into_iter())?; } // We finished the hashing stage, no future iterations is expected for the same block range, // so no checkpoint is needed. let checkpoint = StageCheckpoint::new(input.target()) .with_storage_hashing_stage_checkpoint(StorageHashingCheckpoint { - progress: stage_checkpoint_progress(tx)?, + progress: stage_checkpoint_progress(provider)?, ..Default::default() }); @@ -192,18 +194,18 @@ impl Stage for StorageHashingStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - tx.unwind_storage_hashing(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing(BlockNumberAddress::range(range))?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); - stage_checkpoint.progress = stage_checkpoint_progress(tx)?; + stage_checkpoint.progress = stage_checkpoint_progress(provider)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) @@ -213,11 +215,11 @@ impl Stage for StorageHashingStage { } fn stage_checkpoint_progress( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index faf8bdc52..ad857d635 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -19,7 +19,8 @@ use reth_primitives::{ }, BlockHashOrNumber, BlockNumber, SealedHeader, H256, }; -use reth_provider::Transaction; +use reth_provider::DatabaseProviderRW; +use std::ops::Deref; use tokio::sync::watch; use tracing::*; @@ -68,7 +69,7 @@ where fn is_stage_done( &self, - tx: &Transaction<'_, DB>, + tx: &>::TXMut, checkpoint: u64, ) -> Result { let mut header_cursor = tx.cursor_read::()?; @@ -84,12 +85,12 @@ where /// See also [SyncTarget] async fn get_sync_gap( &mut self, - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, checkpoint: u64, ) -> Result { // Create a cursor over canonical header hashes - let mut cursor = tx.cursor_read::()?; - let mut header_cursor = tx.cursor_read::()?; + let mut cursor = provider.tx_ref().cursor_read::()?; + let mut header_cursor = provider.tx_ref().cursor_read::()?; // Get head hash and reposition the cursor let (head_num, head_hash) = cursor @@ -149,7 +150,7 @@ where /// Note: this writes the headers with rising block numbers. fn write_headers( &self, - tx: &Transaction<'_, DB>, + tx: &>::TXMut, headers: Vec, ) -> Result, StageError> { trace!(target: "sync::stages::headers", len = headers.len(), "writing headers"); @@ -195,13 +196,14 @@ where /// starting from the tip of the chain async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { + let tx = provider.tx_ref(); let current_checkpoint = input.checkpoint(); // Lookup the head and tip of the sync range - let gap = self.get_sync_gap(tx, current_checkpoint.block_number).await?; + let gap = self.get_sync_gap(provider.deref(), current_checkpoint.block_number).await?; let local_head = gap.local_head.number; let tip = gap.target.tip(); @@ -301,7 +303,7 @@ where // Write the headers to db self.write_headers::(tx, downloaded_headers)?.unwrap_or_default(); - if self.is_stage_done(tx, current_checkpoint.block_number)? { + if self.is_stage_done::(tx, current_checkpoint.block_number)? { let checkpoint = current_checkpoint.block_number.max( tx.cursor_read::()? .last()? @@ -324,15 +326,15 @@ where /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { // TODO: handle bad block - tx.unwind_table_by_walker::( + provider.unwind_table_by_walker::( input.unwind_to + 1, )?; - tx.unwind_table_by_num::(input.unwind_to)?; - let unwound_headers = tx.unwind_table_by_num::(input.unwind_to)?; + provider.unwind_table_by_num::(input.unwind_to)?; + let unwound_headers = provider.unwind_table_by_num::(input.unwind_to)?; let stage_checkpoint = input.checkpoint.headers_stage_checkpoint().map(|stage_checkpoint| HeadersCheckpoint { @@ -380,13 +382,15 @@ impl SyncGap { #[cfg(test)] mod tests { + use super::*; use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; use reth_interfaces::test_utils::generators::random_header; - use reth_primitives::{stage::StageUnitCheckpoint, H256}; + use reth_primitives::{stage::StageUnitCheckpoint, H256, MAINNET}; + use reth_provider::ShareableDatabase; use test_runner::HeadersTestRunner; mod test_runner { @@ -598,7 +602,9 @@ mod tests { #[tokio::test] async fn head_and_tip_lookup() { let runner = HeadersTestRunner::default(); - let tx = runner.tx().inner(); + let factory = ShareableDatabase::new(runner.tx().tx.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + let tx = provider.tx_ref(); let mut stage = runner.stage(); let consensus_tip = H256::random(); @@ -612,7 +618,7 @@ mod tests { // Empty database assert_matches!( - stage.get_sync_gap(&tx, checkpoint).await, + stage.get_sync_gap(&provider, checkpoint).await, Err(StageError::DatabaseIntegrity(ProviderError::HeaderNotFound(block_number))) if block_number.as_number().unwrap() == checkpoint ); @@ -623,7 +629,7 @@ mod tests { tx.put::(head.number, head.clone().unseal()) .expect("failed to write header"); - let gap = stage.get_sync_gap(&tx, checkpoint).await.unwrap(); + let gap = stage.get_sync_gap(&provider, checkpoint).await.unwrap(); assert_eq!(gap.local_head, head); assert_eq!(gap.target.tip(), consensus_tip.into()); @@ -633,7 +639,7 @@ mod tests { tx.put::(gap_tip.number, gap_tip.clone().unseal()) .expect("failed to write header"); - let gap = stage.get_sync_gap(&tx, checkpoint).await.unwrap(); + let gap = stage.get_sync_gap(&provider, checkpoint).await.unwrap(); assert_eq!(gap.local_head, head); assert_eq!(gap.target.tip(), gap_tip.parent_hash.into()); @@ -644,7 +650,7 @@ mod tests { .expect("failed to write header"); assert_matches!( - stage.get_sync_gap(&tx, checkpoint).await, + stage.get_sync_gap(&provider, checkpoint).await, Err(StageError::StageCheckpoint(_checkpoint)) if _checkpoint == checkpoint ); } diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 108481eb8..f96500909 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -6,11 +6,8 @@ use reth_primitives::{ }, BlockNumber, }; -use reth_provider::Transaction; -use std::{ - fmt::Debug, - ops::{Deref, RangeInclusive}, -}; +use reth_provider::DatabaseProviderRW; +use std::{fmt::Debug, ops::RangeInclusive}; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information @@ -38,7 +35,7 @@ impl Stage for IndexAccountHistoryStage { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -48,18 +45,18 @@ impl Stage for IndexAccountHistoryStage { let (range, is_final_range) = input.next_block_range_with_threshold(self.commit_threshold); let mut stage_checkpoint = stage_checkpoint( - tx, + provider, input.checkpoint(), // It is important to provide the full block range into the checkpoint, // not the one accounting for commit threshold, to get the correct range end. &input.next_block_range(), )?; - let indices = tx.get_account_transition_ids_from_changeset(range.clone())?; + let indices = provider.get_account_transition_ids_from_changeset(range.clone())?; let changesets = indices.values().map(|blocks| blocks.len() as u64).sum::(); // Insert changeset to history index - tx.insert_account_history_index(indices)?; + provider.insert_account_history_index(indices)?; stage_checkpoint.progress.processed += changesets; @@ -73,13 +70,13 @@ impl Stage for IndexAccountHistoryStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - let changesets = tx.unwind_account_history_indices(range)?; + let changesets = provider.unwind_account_history_indices(range)?; let checkpoint = if let Some(mut stage_checkpoint) = input.checkpoint.index_history_stage_checkpoint() { @@ -105,7 +102,7 @@ impl Stage for IndexAccountHistoryStage { /// given block range and calculates the progress by counting the number of processed entries in the /// [tables::AccountChangeSet] table within the given block range. fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, checkpoint: StageCheckpoint, range: &RangeInclusive, ) -> Result { @@ -122,18 +119,19 @@ fn stage_checkpoint( block_range: CheckpointBlockRange::from(range), progress: EntitiesCheckpoint { processed: progress.processed, - total: tx.deref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }, } } _ => IndexHistoryCheckpoint { block_range: CheckpointBlockRange::from(range), progress: EntitiesCheckpoint { - processed: tx + processed: provider + .tx_ref() .cursor_read::()? .walk_range(0..=checkpoint.block_number)? .count() as u64, - total: tx.deref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }, }, }) @@ -142,6 +140,7 @@ fn stage_checkpoint( #[cfg(test)] mod tests { use assert_matches::assert_matches; + use reth_provider::ShareableDatabase; use std::collections::BTreeMap; use super::*; @@ -155,7 +154,7 @@ mod tests { transaction::DbTxMut, BlockNumberList, }; - use reth_primitives::{hex_literal::hex, H160}; + use reth_primitives::{hex_literal::hex, H160, MAINNET}; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); @@ -211,8 +210,9 @@ mod tests { async fn run(tx: &TestTransaction, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); - let mut tx = tx.inner(); - let out = stage.execute(&mut tx, input).await.unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -225,7 +225,7 @@ mod tests { done: true } ); - tx.commit().unwrap(); + provider.commit().unwrap(); } async fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { @@ -235,10 +235,11 @@ mod tests { ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); - let mut tx = tx.inner(); - let out = stage.unwind(&mut tx, input).await.unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + let out = stage.unwind(&mut provider, input).await.unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); - tx.commit().unwrap(); + provider.commit().unwrap(); } #[tokio::test] @@ -448,10 +449,11 @@ mod tests { // run { let mut stage = IndexAccountHistoryStage { commit_threshold: 4 }; // Two runs required - let mut tx = test_tx.inner(); + let factory = ShareableDatabase::new(&test_tx.tx, MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); let mut input = ExecInput { target: Some(5), ..Default::default() }; - let out = stage.execute(&mut tx, input).await.unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -466,7 +468,7 @@ mod tests { ); input.checkpoint = Some(out.checkpoint); - let out = stage.execute(&mut tx, input).await.unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -480,7 +482,7 @@ mod tests { } ); - tx.commit().unwrap(); + provider.commit().unwrap(); } // verify @@ -536,8 +538,11 @@ mod tests { }) .unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + assert_matches!( - stage_checkpoint(&tx.inner(), StageCheckpoint::new(1), &(1..=2)).unwrap(), + stage_checkpoint(&provider, StageCheckpoint::new(1), &(1..=2)).unwrap(), IndexHistoryCheckpoint { block_range: CheckpointBlockRange { from: 1, to: 2 }, progress: EntitiesCheckpoint { processed: 2, total: 4 } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index bc2a42618..cc354a4da 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -9,11 +9,8 @@ use reth_primitives::{ }, BlockNumber, }; -use reth_provider::Transaction; -use std::{ - fmt::Debug, - ops::{Deref, RangeInclusive}, -}; +use reth_provider::DatabaseProviderRW; +use std::{fmt::Debug, ops::RangeInclusive}; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information @@ -41,7 +38,7 @@ impl Stage for IndexStorageHistoryStage { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -51,17 +48,17 @@ impl Stage for IndexStorageHistoryStage { let (range, is_final_range) = input.next_block_range_with_threshold(self.commit_threshold); let mut stage_checkpoint = stage_checkpoint( - tx, + provider, input.checkpoint(), // It is important to provide the full block range into the checkpoint, // not the one accounting for commit threshold, to get the correct range end. &input.next_block_range(), )?; - let indices = tx.get_storage_transition_ids_from_changeset(range.clone())?; + let indices = provider.get_storage_transition_ids_from_changeset(range.clone())?; let changesets = indices.values().map(|blocks| blocks.len() as u64).sum::(); - tx.insert_storage_history_index(indices)?; + provider.insert_storage_history_index(indices)?; stage_checkpoint.progress.processed += changesets; @@ -75,13 +72,14 @@ impl Stage for IndexStorageHistoryStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - let changesets = tx.unwind_storage_history_indices(BlockNumberAddress::range(range))?; + let changesets = + provider.unwind_storage_history_indices(BlockNumberAddress::range(range))?; let checkpoint = if let Some(mut stage_checkpoint) = input.checkpoint.index_history_stage_checkpoint() { @@ -106,7 +104,7 @@ impl Stage for IndexStorageHistoryStage { /// given block range and calculates the progress by counting the number of processed entries in the /// [tables::StorageChangeSet] table within the given block range. fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, checkpoint: StageCheckpoint, range: &RangeInclusive, ) -> Result { @@ -123,18 +121,19 @@ fn stage_checkpoint( block_range: CheckpointBlockRange::from(range), progress: EntitiesCheckpoint { processed: progress.processed, - total: tx.deref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }, } } _ => IndexHistoryCheckpoint { block_range: CheckpointBlockRange::from(range), progress: EntitiesCheckpoint { - processed: tx + processed: provider + .tx_ref() .cursor_read::()? .walk_range(BlockNumberAddress::range(0..=checkpoint.block_number))? .count() as u64, - total: tx.deref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }, }, }) @@ -144,6 +143,7 @@ fn stage_checkpoint( mod tests { use assert_matches::assert_matches; + use reth_provider::ShareableDatabase; use std::collections::BTreeMap; use super::*; @@ -157,7 +157,7 @@ mod tests { transaction::DbTxMut, BlockNumberList, }; - use reth_primitives::{hex_literal::hex, StorageEntry, H160, H256, U256}; + use reth_primitives::{hex_literal::hex, StorageEntry, H160, H256, MAINNET, U256}; const ADDRESS: H160 = H160(hex!("0000000000000000000000000000000000000001")); const STORAGE_KEY: H256 = @@ -223,8 +223,9 @@ mod tests { async fn run(tx: &TestTransaction, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); - let mut tx = tx.inner(); - let out = stage.execute(&mut tx, input).await.unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -237,7 +238,7 @@ mod tests { done: true } ); - tx.commit().unwrap(); + provider.commit().unwrap(); } async fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { @@ -247,10 +248,11 @@ mod tests { ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); - let mut tx = tx.inner(); - let out = stage.unwind(&mut tx, input).await.unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + let out = stage.unwind(&mut provider, input).await.unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); - tx.commit().unwrap(); + provider.commit().unwrap(); } #[tokio::test] @@ -463,10 +465,11 @@ mod tests { // run { let mut stage = IndexStorageHistoryStage { commit_threshold: 4 }; // Two runs required - let mut tx = test_tx.inner(); + let factory = ShareableDatabase::new(&test_tx.tx, MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); let mut input = ExecInput { target: Some(5), ..Default::default() }; - let out = stage.execute(&mut tx, input).await.unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -481,7 +484,7 @@ mod tests { ); input.checkpoint = Some(out.checkpoint); - let out = stage.execute(&mut tx, input).await.unwrap(); + let out = stage.execute(&mut provider, input).await.unwrap(); assert_eq!( out, ExecOutput { @@ -495,7 +498,7 @@ mod tests { } ); - tx.commit().unwrap(); + provider.commit().unwrap(); } // verify @@ -561,8 +564,11 @@ mod tests { }) .unwrap(); + let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + assert_matches!( - stage_checkpoint(&tx.inner(), StageCheckpoint::new(1), &(1..=2)).unwrap(), + stage_checkpoint(&provider, StageCheckpoint::new(1), &(1..=2)).unwrap(), IndexHistoryCheckpoint { block_range: CheckpointBlockRange { from: 1, to: 2 }, progress: EntitiesCheckpoint { processed: 3, total: 6 } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 4eb9f5033..920181578 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -12,12 +12,9 @@ use reth_primitives::{ trie::StoredSubNode, BlockNumber, SealedHeader, H256, }; -use reth_provider::Transaction; +use reth_provider::{DatabaseProviderRW, HeaderProvider, ProviderError}; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress}; -use std::{ - fmt::Debug, - ops::{Deref, DerefMut}, -}; +use std::fmt::Debug; use tracing::*; /// The merkle hashing stage uses input from @@ -93,11 +90,10 @@ impl MerkleStage { /// Gets the hashing progress pub fn get_execution_checkpoint( &self, - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, ) -> Result, StageError> { - let buf = tx - .get::(StageId::MerkleExecute.to_string())? - .unwrap_or_default(); + let buf = + provider.get_stage_checkpoint_progress(StageId::MerkleExecute)?.unwrap_or_default(); if buf.is_empty() { return Ok(None) @@ -110,7 +106,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( &mut self, - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, checkpoint: Option, ) -> Result<(), StageError> { let mut buf = vec![]; @@ -123,8 +119,7 @@ impl MerkleStage { ); checkpoint.to_compact(&mut buf); } - tx.put::(StageId::MerkleExecute.to_string(), buf)?; - Ok(()) + Ok(provider.save_stage_checkpoint_progress(StageId::MerkleExecute, buf)?) } } @@ -143,7 +138,7 @@ impl Stage for MerkleStage { /// Execute the stage. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { let threshold = match self { @@ -160,10 +155,12 @@ impl Stage for MerkleStage { let (from_block, to_block) = range.clone().into_inner(); let current_block = input.target(); - let block = tx.get_header(current_block)?; + let block = provider + .header_by_number(current_block)? + .ok_or_else(|| ProviderError::HeaderNotFound(current_block.into()))?; let block_root = block.state_root; - let mut checkpoint = self.get_execution_checkpoint(tx)?; + let mut checkpoint = self.get_execution_checkpoint(provider)?; let (trie_root, entities_checkpoint) = if range.is_empty() { (block_root, input.checkpoint().entities_stage_checkpoint().unwrap_or_default()) @@ -192,25 +189,27 @@ impl Stage for MerkleStage { ); // Reset the checkpoint and clear trie tables checkpoint = None; - self.save_execution_checkpoint(tx, None)?; - tx.clear::()?; - tx.clear::()?; + self.save_execution_checkpoint(provider, None)?; + provider.tx_ref().clear::()?; + provider.tx_ref().clear::()?; None } .unwrap_or(EntitiesCheckpoint { processed: 0, - total: (tx.deref().entries::()? + - tx.deref().entries::()?) as u64, + total: (provider.tx_ref().entries::()? + + provider.tx_ref().entries::()?) + as u64, }); - let progress = StateRoot::new(tx.deref_mut()) + let tx = provider.tx_ref(); + let progress = StateRoot::new(tx) .with_intermediate_state(checkpoint.map(IntermediateStateRootState::from)) .root_with_progress() .map_err(|e| StageError::Fatal(Box::new(e)))?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - updates.flush(tx.deref_mut())?; + updates.flush(tx)?; let checkpoint = MerkleCheckpoint::new( to_block, @@ -219,7 +218,7 @@ impl Stage for MerkleStage { state.walker_stack.into_iter().map(StoredSubNode::from).collect(), state.hash_builder.into(), ); - self.save_execution_checkpoint(tx, Some(checkpoint))?; + self.save_execution_checkpoint(provider, Some(checkpoint))?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -231,7 +230,7 @@ impl Stage for MerkleStage { }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - updates.flush(tx.deref_mut())?; + updates.flush(tx)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -240,12 +239,13 @@ impl Stage for MerkleStage { } } else { debug!(target: "sync::stages::merkle::exec", current = ?current_block, target = ?to_block, "Updating trie"); - let (root, updates) = StateRoot::incremental_root_with_updates(tx.deref_mut(), range) - .map_err(|e| StageError::Fatal(Box::new(e)))?; - updates.flush(tx.deref_mut())?; + let (root, updates) = + StateRoot::incremental_root_with_updates(provider.tx_ref(), range) + .map_err(|e| StageError::Fatal(Box::new(e)))?; + updates.flush(provider.tx_ref())?; - let total_hashed_entries = (tx.deref().entries::()? + - tx.deref().entries::()?) + let total_hashed_entries = (provider.tx_ref().entries::()? + + provider.tx_ref().entries::()?) as u64; let entities_checkpoint = EntitiesCheckpoint { @@ -260,7 +260,7 @@ impl Stage for MerkleStage { }; // Reset the checkpoint - self.save_execution_checkpoint(tx, None)?; + self.save_execution_checkpoint(provider, None)?; self.validate_state_root(trie_root, block.seal_slow(), to_block)?; @@ -274,9 +274,10 @@ impl Stage for MerkleStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { + let tx = provider.tx_ref(); let range = input.unwind_block_range(); if matches!(self, MerkleStage::Execution { .. }) { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); @@ -286,8 +287,8 @@ impl Stage for MerkleStage { let mut entities_checkpoint = input.checkpoint.entities_stage_checkpoint().unwrap_or(EntitiesCheckpoint { processed: 0, - total: (tx.deref().entries::()? + - tx.deref().entries::()?) as u64, + total: (tx.entries::()? + + tx.entries::()?) as u64, }); if input.unwind_to == 0 { @@ -304,16 +305,17 @@ impl Stage for MerkleStage { // Unwind trie only if there are transitions if !range.is_empty() { - let (block_root, updates) = - StateRoot::incremental_root_with_updates(tx.deref_mut(), range) - .map_err(|e| StageError::Fatal(Box::new(e)))?; + let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range) + .map_err(|e| StageError::Fatal(Box::new(e)))?; // Validate the calulated state root - let target = tx.get_header(input.unwind_to)?; + let target = provider + .header_by_number(input.unwind_to)? + .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; self.validate_state_root(block_root, target.seal_slow(), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - updates.flush(tx.deref_mut())?; + updates.flush(provider.tx_ref())?; // TODO(alexey): update entities checkpoint } else { diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index ab863a8d7..a26cbad7a 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -13,8 +13,8 @@ use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, TransactionSignedNoHash, TxNumber, H160, }; -use reth_provider::{ProviderError, Transaction}; -use std::{fmt::Debug, ops::Deref}; +use reth_provider::{DatabaseProviderRW, HeaderProvider, ProviderError}; +use std::fmt::Debug; use thiserror::Error; use tokio::sync::mpsc; use tracing::*; @@ -56,7 +56,7 @@ impl Stage for SenderRecoveryStage { /// the [`TxSenders`][reth_db::tables::TxSenders] table. async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -64,7 +64,7 @@ impl Stage for SenderRecoveryStage { } let (tx_range, block_range, is_final_range) = - input.next_block_range_with_transaction_threshold(tx, self.commit_threshold)?; + input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)?; let end_block = *block_range.end(); // No transactions to walk over @@ -72,11 +72,13 @@ impl Stage for SenderRecoveryStage { info!(target: "sync::stages::sender_recovery", ?tx_range, "Target transaction already reached"); return Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), done: is_final_range, }) } + let tx = provider.tx_ref(); + // Acquire the cursor for inserting elements let mut senders_cursor = tx.cursor_write::()?; @@ -133,7 +135,9 @@ impl Stage for SenderRecoveryStage { // fetch the sealed header so we can use it in the sender recovery // unwind - let sealed_header = tx.get_sealed_header(block_number)?; + let sealed_header = provider + .sealed_header(block_number)? + .ok_or(ProviderError::HeaderNotFound(block_number.into()))?; return Err(StageError::Validation { block: sealed_header, error: @@ -150,7 +154,7 @@ impl Stage for SenderRecoveryStage { Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), done: is_final_range, }) } @@ -158,18 +162,18 @@ impl Stage for SenderRecoveryStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); // Lookup latest tx id that we should unwind to - let latest_tx_id = tx.block_body_indices(unwind_to)?.last_tx_num(); - tx.unwind_table_by_num::(latest_tx_id)?; + let latest_tx_id = provider.block_body_indices(unwind_to)?.last_tx_num(); + provider.unwind_table_by_num::(latest_tx_id)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), }) } } @@ -194,11 +198,11 @@ fn recover_sender( } fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } diff --git a/crates/stages/src/stages/total_difficulty.rs b/crates/stages/src/stages/total_difficulty.rs index a9b0de762..41afa8213 100644 --- a/crates/stages/src/stages/total_difficulty.rs +++ b/crates/stages/src/stages/total_difficulty.rs @@ -11,8 +11,8 @@ use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, U256, }; -use reth_provider::Transaction; -use std::{ops::Deref, sync::Arc}; +use reth_provider::DatabaseProviderRW; +use std::sync::Arc; use tracing::*; /// The total difficulty stage. @@ -51,9 +51,10 @@ impl Stage for TotalDifficultyStage { /// Write total difficulty entries async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { + let tx = provider.tx_ref(); if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -89,7 +90,7 @@ impl Stage for TotalDifficultyStage { Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), done: is_final_range, }) } @@ -97,26 +98,26 @@ impl Stage for TotalDifficultyStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - tx.unwind_table_by_num::(unwind_to)?; + provider.unwind_table_by_num::(unwind_to)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), }) } } fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 56ea803ab..f379598d9 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -13,8 +13,7 @@ use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, TransactionSignedNoHash, TxNumber, H256, }; -use reth_provider::Transaction; -use std::ops::Deref; +use reth_provider::DatabaseProviderRW; use tokio::sync::mpsc; use tracing::*; @@ -52,19 +51,19 @@ impl Stage for TransactionLookupStage { /// Write transaction hash -> id entries async fn execute( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } - let (tx_range, block_range, is_final_range) = - input.next_block_range_with_transaction_threshold(tx, self.commit_threshold)?; + input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)?; let end_block = *block_range.end(); debug!(target: "sync::stages::transaction_lookup", ?tx_range, "Updating transaction lookup"); + let tx = provider.tx_ref(); let mut tx_cursor = tx.cursor_read::()?; let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -138,7 +137,7 @@ impl Stage for TransactionLookupStage { Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), done: is_final_range, }) } @@ -146,9 +145,10 @@ impl Stage for TransactionLookupStage { /// Unwind the stage. async fn unwind( &mut self, - tx: &mut Transaction<'_, DB>, + provider: &mut DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { + let tx = provider.tx_ref(); let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); // Cursors to unwind tx hash to number @@ -174,17 +174,17 @@ impl Stage for TransactionLookupStage { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) - .with_entities_stage_checkpoint(stage_checkpoint(tx)?), + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), }) } } fn stage_checkpoint( - tx: &Transaction<'_, DB>, + provider: &DatabaseProviderRW<'_, &DB>, ) -> Result { Ok(EntitiesCheckpoint { - processed: tx.deref().entries::()? as u64, - total: tx.deref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, + total: provider.tx_ref().entries::()? as u64, }) } diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index de1b96fea..7666e0755 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,8 +1,9 @@ use super::TestTransaction; use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::mdbx::{Env, WriteMap}; -use reth_provider::Transaction; -use std::borrow::Borrow; +use reth_primitives::MAINNET; +use reth_provider::ShareableDatabase; +use std::{borrow::Borrow, sync::Arc}; use tokio::sync::oneshot; #[derive(thiserror::Error, Debug)] @@ -44,9 +45,11 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { let (tx, rx) = oneshot::channel(); let (db, mut stage) = (self.tx().inner_raw(), self.stage()); tokio::spawn(async move { - let mut db = Transaction::new(db.borrow()).expect("failed to create db container"); - let result = stage.execute(&mut db, input).await; - db.commit().expect("failed to commit"); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + + let result = stage.execute(&mut provider, input).await; + provider.commit().expect("failed to commit"); tx.send(result).expect("failed to send message") }); rx @@ -68,9 +71,11 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner { let (tx, rx) = oneshot::channel(); let (db, mut stage) = (self.tx().inner_raw(), self.stage()); tokio::spawn(async move { - let mut db = Transaction::new(db.borrow()).expect("failed to create db container"); - let result = stage.unwind(&mut db, input).await; - db.commit().expect("failed to commit"); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut provider = factory.provider_rw().unwrap(); + + let result = stage.unwind(&mut provider, input).await; + provider.commit().expect("failed to commit"); tx.send(result).expect("failed to send result"); }); Box::pin(rx).await.unwrap() diff --git a/crates/stages/src/test_utils/stage.rs b/crates/stages/src/test_utils/stage.rs index 81056a5e1..028b74218 100644 --- a/crates/stages/src/test_utils/stage.rs +++ b/crates/stages/src/test_utils/stage.rs @@ -1,7 +1,7 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::database::Database; use reth_primitives::stage::StageId; -use reth_provider::Transaction; +use reth_provider::DatabaseProviderRW; use std::collections::VecDeque; #[derive(Debug)] @@ -48,7 +48,7 @@ impl Stage for TestStage { async fn execute( &mut self, - _: &mut Transaction<'_, DB>, + _: &mut DatabaseProviderRW<'_, &DB>, _input: ExecInput, ) -> Result { self.exec_outputs @@ -58,7 +58,7 @@ impl Stage for TestStage { async fn unwind( &mut self, - _: &mut Transaction<'_, DB>, + _: &mut DatabaseProviderRW<'_, &DB>, _input: UnwindInput, ) -> Result { self.unwind_outputs diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 41a0f4956..9345441cb 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -13,9 +13,10 @@ use reth_db::{ DatabaseError as DbError, }; use reth_primitives::{ - keccak256, Account, Address, BlockNumber, SealedBlock, SealedHeader, StorageEntry, H256, U256, + keccak256, Account, Address, BlockNumber, SealedBlock, SealedHeader, StorageEntry, H256, + MAINNET, U256, }; -use reth_provider::Transaction; +use reth_provider::{DatabaseProviderRW, ShareableDatabase}; use std::{ borrow::Borrow, collections::BTreeMap, @@ -36,26 +37,30 @@ pub struct TestTransaction { /// WriteMap DB pub tx: Arc>, pub path: Option, + factory: ShareableDatabase>>, } impl Default for TestTransaction { /// Create a new instance of [TestTransaction] fn default() -> Self { - Self { tx: create_test_db::(EnvKind::RW), path: None } + let tx = create_test_db::(EnvKind::RW); + Self { tx: tx.clone(), path: None, factory: ShareableDatabase::new(tx, MAINNET.clone()) } } } impl TestTransaction { pub fn new(path: &Path) -> Self { + let tx = create_test_db::(EnvKind::RW); Self { - tx: Arc::new(create_test_db_with_path::(EnvKind::RW, path)), + tx: tx.clone(), path: Some(path.to_path_buf()), + factory: ShareableDatabase::new(tx, MAINNET.clone()), } } - /// Return a database wrapped in [Transaction]. - pub fn inner(&self) -> Transaction<'_, Env> { - Transaction::new(self.tx.borrow()).expect("failed to create db container") + /// Return a database wrapped in [DatabaseProviderRW]. + pub fn inner(&self) -> DatabaseProviderRW<'_, Arc>> { + self.factory.provider_rw().expect("failed to create db container") } /// Get a pointer to an internal database. @@ -69,8 +74,8 @@ impl TestTransaction { F: FnOnce(&mut Tx<'_, RW, WriteMap>) -> Result<(), DbError>, { let mut tx = self.inner(); - f(&mut tx)?; - tx.commit()?; + f(tx.tx_mut())?; + tx.commit().expect("failed to commit"); Ok(()) } @@ -79,7 +84,7 @@ impl TestTransaction { where F: FnOnce(&Tx<'_, RW, WriteMap>) -> Result, { - f(&self.inner()) + f(self.inner().tx_ref()) } /// Check if the table is empty diff --git a/crates/storage/db/src/abstraction/database.rs b/crates/storage/db/src/abstraction/database.rs index a2071b80b..421220d95 100644 --- a/crates/storage/db/src/abstraction/database.rs +++ b/crates/storage/db/src/abstraction/database.rs @@ -4,7 +4,7 @@ use crate::{ transaction::{DbTx, DbTxMut}, DatabaseError, }; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; /// Implements the GAT method from: /// . @@ -12,9 +12,9 @@ use std::sync::Arc; /// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers pub trait DatabaseGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { /// RO database transaction - type TX: DbTx<'a> + Send + Sync; + type TX: DbTx<'a> + Send + Sync + Debug; /// RW database transaction - type TXMut: DbTxMut<'a> + DbTx<'a> + TableImporter<'a> + Send + Sync; + type TXMut: DbTxMut<'a> + DbTx<'a> + TableImporter<'a> + Send + Sync + Debug; } /// Main Database trait that spawns transactions to be executed. diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index 656bae654..c7b340925 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -38,7 +38,7 @@ impl<'a> DatabaseGAT<'a> for DatabaseMock { } /// Mock read only tx -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] pub struct TxMock { /// Table representation _table: BTreeMap, Vec>, diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index d4297db96..b00e6646f 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -11,11 +11,11 @@ /// Various provider traits. mod traits; pub use traits::{ - AccountProvider, BlockExecutor, BlockHashProvider, BlockIdProvider, BlockNumProvider, - BlockProvider, BlockProviderIdExt, BlockSource, BlockchainTreePendingStateProvider, - CanonChainTracker, CanonStateNotification, CanonStateNotificationSender, - CanonStateNotifications, CanonStateSubscriptions, EvmEnvProvider, ExecutorFactory, - HeaderProvider, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, + AccountExtProvider, AccountProvider, BlockExecutor, BlockHashProvider, BlockIdProvider, + BlockNumProvider, BlockProvider, BlockProviderIdExt, BlockSource, + BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, EvmEnvProvider, + ExecutorFactory, HeaderProvider, PostStateDataProvider, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionsProvider, WithdrawalsProvider, }; @@ -23,8 +23,8 @@ pub use traits::{ /// Provider trait implementations. pub mod providers; pub use providers::{ - HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, - LatestStateProviderRef, ShareableDatabase, + DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, + HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ShareableDatabase, }; /// Execution result @@ -33,7 +33,7 @@ pub use post_state::PostState; /// Helper types for interacting with the database mod transaction; -pub use transaction::{Transaction, TransactionError}; +pub use transaction::TransactionError; /// Common database utilities. mod utils; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 61762de4d..54c418336 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -18,7 +18,7 @@ use std::{ops::RangeBounds, sync::Arc}; use tracing::trace; mod provider; -use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; +pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; /// A common provider that fetches data from a database. /// @@ -34,16 +34,17 @@ pub struct ShareableDatabase { impl ShareableDatabase { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] - /// [`BlockHashProvider`] + /// [`BlockHashProvider`]. This may fail if the inner read database transaction fails to open. pub fn provider(&self) -> Result> { Ok(DatabaseProvider::new(self.db.tx()?, self.chain_spec.clone())) } /// Returns a provider with a created `DbTxMut` inside, which allows fetching and updating /// data from the database using different types of providers. Example: [`HeaderProvider`] - /// [`BlockHashProvider`] + /// [`BlockHashProvider`]. This may fail if the inner read/write database transaction fails to + /// open. pub fn provider_rw(&self) -> Result> { - Ok(DatabaseProvider::new_rw(self.db.tx_mut()?, self.chain_spec.clone())) + Ok(DatabaseProviderRW(DatabaseProvider::new_rw(self.db.tx_mut()?, self.chain_spec.clone()))) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 64dd88c0a..a355a09e6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,36 +1,87 @@ use crate::{ - traits::{BlockSource, ReceiptProvider}, - BlockHashProvider, BlockNumProvider, BlockProvider, EvmEnvProvider, HeaderProvider, - ProviderError, StageCheckpointProvider, TransactionsProvider, WithdrawalsProvider, + insert_canonical_block, + post_state::StorageChangeset, + traits::{AccountExtProvider, BlockSource, ReceiptProvider}, + AccountProvider, BlockHashProvider, BlockNumProvider, BlockProvider, EvmEnvProvider, + HeaderProvider, PostState, ProviderError, StageCheckpointProvider, TransactionError, + TransactionsProvider, WithdrawalsProvider, }; +use itertools::{izip, Itertools}; use reth_db::{ - cursor::DbCursorRO, - database::DatabaseGAT, - models::StoredBlockBodyIndices, + common::KeyValue, + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, + database::{Database, DatabaseGAT}, + models::{ + sharded_key, + storage_sharded_key::{self, StorageShardedKey}, + AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, + }, + table::Table, tables, - transaction::{DbTx, DbTxMut}, + transaction::{DbTx, DbTxMut, DbTxMutGAT}, + BlockNumberList, DatabaseError, }; use reth_interfaces::Result; use reth_primitives::{ + keccak256, stage::{StageCheckpoint, StageId}, - Block, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, ChainSpec, Head, Header, Receipt, - SealedBlock, SealedHeader, TransactionMeta, TransactionSigned, TxHash, TxNumber, Withdrawal, - H256, U256, + Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, ChainSpec, + Hardfork, Head, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TxHash, + TxNumber, Withdrawal, H256, U256, }; use reth_revm_primitives::{ config::revm_spec, env::{fill_block_env, fill_cfg_and_block_env, fill_cfg_env}, primitives::{BlockEnv, CfgEnv, SpecId}, }; -use std::{ops::RangeBounds, sync::Arc}; +use reth_trie::StateRoot; +use std::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + fmt::Debug, + ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, + sync::Arc, +}; + +use super::get_stage_checkpoint; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub(crate) type DatabaseProviderRO<'this, DB> = - DatabaseProvider<'this, >::TX>; +pub type DatabaseProviderRO<'this, DB> = DatabaseProvider<'this, >::TX>; /// A [`DatabaseProvider`] that holds a read-write database transaction. -pub(crate) type DatabaseProviderRW<'this, DB> = - DatabaseProvider<'this, >::TXMut>; +/// +/// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. +/// Once that issue is solved, we can probably revert back to being an alias type. +#[derive(Debug)] +pub struct DatabaseProviderRW<'this, DB: Database>( + pub DatabaseProvider<'this, >::TXMut>, +); + +impl<'this, DB: Database> Deref for DatabaseProviderRW<'this, DB> { + type Target = DatabaseProvider<'this, >::TXMut>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'this, DB: Database> DerefMut for DatabaseProviderRW<'this, DB> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'this, DB: Database> DatabaseProviderRW<'this, DB> { + /// Commit database transaction + pub fn commit(self) -> Result { + self.0.commit() + } + + /// Consume `DbTx` or `DbTxMut`. + pub fn into_tx(self) -> >::TXMut { + self.0.into_tx() + } +} /// A provider struct that fetchs data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashProvider`] @@ -43,7 +94,7 @@ where tx: TX, /// Chain spec chain_spec: Arc, - _phantom_data: std::marker::PhantomData<&'this ()>, + _phantom_data: std::marker::PhantomData<&'this TX>, } impl<'this, TX: DbTxMut<'this>> DatabaseProvider<'this, TX> { @@ -53,6 +104,78 @@ impl<'this, TX: DbTxMut<'this>> DatabaseProvider<'this, TX> { } } +/// Unwind all history shards. For boundary shard, remove it from database and +/// return last part of shard with still valid items. If all full shard were removed, return list +/// would be empty. +fn unwind_account_history_shards<'a, TX: reth_db::transaction::DbTxMutGAT<'a>>( + cursor: &mut >::CursorMut, + address: Address, + block_number: BlockNumber, +) -> std::result::Result, TransactionError> { + let mut item = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?; + + while let Some((sharded_key, list)) = item { + // there is no more shard for address + if sharded_key.key != address { + break + } + cursor.delete_current()?; + // check first item and if it is more and eq than `transition_id` delete current + // item. + let first = list.iter(0).next().expect("List can't empty"); + if first >= block_number as usize { + item = cursor.prev()?; + continue + } else if block_number <= sharded_key.highest_block_number { + // if first element is in scope whole list would be removed. + // so at least this first element is present. + return Ok(list.iter(0).take_while(|i| *i < block_number as usize).collect::>()) + } else { + let new_list = list.iter(0).collect::>(); + return Ok(new_list) + } + } + Ok(Vec::new()) +} + +/// Unwind all history shards. For boundary shard, remove it from database and +/// return last part of shard with still valid items. If all full shard were removed, return list +/// would be empty but this does not mean that there is none shard left but that there is no +/// split shards. +fn unwind_storage_history_shards<'a, TX: reth_db::transaction::DbTxMutGAT<'a>>( + cursor: &mut >::CursorMut, + address: Address, + storage_key: H256, + block_number: BlockNumber, +) -> std::result::Result, TransactionError> { + let mut item = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?; + + while let Some((storage_sharded_key, list)) = item { + // there is no more shard for address + if storage_sharded_key.address != address || + storage_sharded_key.sharded_key.key != storage_key + { + // there is no more shard for address and storage_key. + break + } + cursor.delete_current()?; + // check first item and if it is more and eq than `transition_id` delete current + // item. + let first = list.iter(0).next().expect("List can't empty"); + if first >= block_number as usize { + item = cursor.prev()?; + continue + } else if block_number <= storage_sharded_key.sharded_key.highest_block_number { + // if first element is in scope whole list would be removed. + // so at least this first element is present. + return Ok(list.iter(0).take_while(|i| *i < block_number as usize).collect::>()) + } else { + return Ok(list.iter(0).collect::>()) + } + } + Ok(Vec::new()) +} + impl<'this, TX: DbTx<'this>> DatabaseProvider<'this, TX> { /// Creates a provider with an inner read-only transaction. pub fn new(tx: TX, chain_spec: Arc) -> Self { @@ -63,6 +186,162 @@ impl<'this, TX: DbTx<'this>> DatabaseProvider<'this, TX> { pub fn into_tx(self) -> TX { self.tx } + + /// Pass `DbTx` or `DbTxMut` mutable reference. + pub fn tx_mut(&mut self) -> &mut TX { + &mut self.tx + } + + /// Pass `DbTx` or `DbTxMut` immutable reference. + pub fn tx_ref(&self) -> &TX { + &self.tx + } + + /// Return full table as Vec + pub fn table(&self) -> std::result::Result>, DatabaseError> + where + T::Key: Default + Ord, + { + self.tx + .cursor_read::()? + .walk(Some(T::Key::default()))? + .collect::, DatabaseError>>() + } + + // TODO(joshie) TEMPORARY should be moved to trait providers + + /// Iterate over account changesets and return all account address that were changed. + pub fn get_addresses_and_keys_of_changed_storages( + &self, + range: RangeInclusive, + ) -> std::result::Result>, TransactionError> { + Ok(self + .tx + .cursor_read::()? + .walk_range(BlockNumberAddress::range(range))? + .collect::, _>>()? + .into_iter() + // fold all storages and save its old state so we can remove it from HashedStorage + // it is needed as it is dup table. + .fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, + (BlockNumberAddress((_, address)), storage_entry)| { + accounts.entry(address).or_default().insert(storage_entry.key); + accounts + }, + )) + } + + /// Get plainstate storages + #[allow(clippy::type_complexity)] + pub fn get_plainstate_storages( + &self, + iter: impl IntoIterator)>, + ) -> std::result::Result)>, TransactionError> { + let mut plain_storage = self.tx.cursor_dup_read::()?; + + iter.into_iter() + .map(|(address, storage)| { + storage + .into_iter() + .map(|key| -> std::result::Result<_, TransactionError> { + let ret = plain_storage + .seek_by_key_subkey(address, key)? + .filter(|v| v.key == key) + .unwrap_or_default(); + Ok((key, ret.value)) + }) + .collect::, _>>() + .map(|storage| (address, storage)) + }) + .collect::, _>>() + } + + /// Get all transaction ids where account got changed. + /// + /// NOTE: Get inclusive range of blocks. + pub fn get_storage_transition_ids_from_changeset( + &self, + range: RangeInclusive, + ) -> std::result::Result>, TransactionError> { + let storage_changeset = self + .tx + .cursor_read::()? + .walk_range(BlockNumberAddress::range(range))? + .collect::, _>>()?; + + // fold all storages to one set of changes + let storage_changeset_lists = storage_changeset.into_iter().fold( + BTreeMap::new(), + |mut storages: BTreeMap<(Address, H256), Vec>, (index, storage)| { + storages + .entry((index.address(), storage.key)) + .or_default() + .push(index.block_number()); + storages + }, + ); + + Ok(storage_changeset_lists) + } + + /// Get all transaction ids where account got changed. + /// + /// NOTE: Get inclusive range of blocks. + pub fn get_account_transition_ids_from_changeset( + &self, + range: RangeInclusive, + ) -> std::result::Result>, TransactionError> { + let account_changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + + let account_transtions = account_changesets + .into_iter() + // fold all account to one set of changed accounts + .fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, (index, account)| { + accounts.entry(account.address).or_default().push(index); + accounts + }, + ); + + Ok(account_transtions) + } + + /// Iterate over account changesets and return all account address that were changed. + pub fn get_addresses_of_changed_accounts( + &self, + range: RangeInclusive, + ) -> std::result::Result, TransactionError> { + Ok(self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()? + .into_iter() + // fold all account to one set of changed accounts + .fold(BTreeSet::new(), |mut accounts: BTreeSet
, (_, account_before)| { + accounts.insert(account_before.address); + accounts + })) + } + + /// Get plainstate account from iterator + pub fn get_plainstate_accounts( + &self, + iter: impl IntoIterator, + ) -> std::result::Result)>, TransactionError> { + let mut plain_accounts = self.tx.cursor_read::()?; + Ok(iter + .into_iter() + .map(|address| plain_accounts.seek_exact(address).map(|a| (address, a.map(|(_, v)| v)))) + .collect::, _>>()?) + } } impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { @@ -70,6 +349,1136 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { pub fn commit(self) -> Result { Ok(self.tx.commit()?) } + + // TODO(joshie) TEMPORARY should be moved to trait providers + + /// Get range of blocks and its execution result + pub fn get_block_and_execution_range( + &self, + chain_spec: &ChainSpec, + range: RangeInclusive, + ) -> std::result::Result, TransactionError> { + self.get_take_block_and_execution_range::(chain_spec, range) + } + + /// Take range of blocks and its execution result + pub fn take_block_and_execution_range( + &self, + chain_spec: &ChainSpec, + range: RangeInclusive, + ) -> std::result::Result, TransactionError> { + self.get_take_block_and_execution_range::(chain_spec, range) + } + + /// Unwind and clear account hashing + pub fn unwind_account_hashing( + &self, + range: RangeInclusive, + ) -> std::result::Result<(), TransactionError> { + let mut hashed_accounts = self.tx.cursor_write::()?; + + // Aggregate all transition changesets and make a list of accounts that have been changed. + self.tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()? + .into_iter() + .rev() + // fold all account to get the old balance/nonces and account that needs to be removed + .fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, (_, account_before)| { + accounts.insert(account_before.address, account_before.info); + accounts + }, + ) + .into_iter() + // hash addresses and collect it inside sorted BTreeMap. + // We are doing keccak only once per address. + .map(|(address, account)| (keccak256(address), account)) + .collect::>() + .into_iter() + // Apply values to HashedState (if Account is None remove it); + .try_for_each( + |(hashed_address, account)| -> std::result::Result<(), TransactionError> { + if let Some(account) = account { + hashed_accounts.upsert(hashed_address, account)?; + } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { + hashed_accounts.delete_current()?; + } + Ok(()) + }, + )?; + + Ok(()) + } + + /// Unwind and clear storage hashing + pub fn unwind_storage_hashing( + &self, + range: Range, + ) -> std::result::Result<(), TransactionError> { + let mut hashed_storage = self.tx.cursor_dup_write::()?; + + // Aggregate all transition changesets and make list of accounts that have been changed. + self.tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()? + .into_iter() + .rev() + // fold all account to get the old balance/nonces and account that needs to be removed + .fold( + BTreeMap::new(), + |mut accounts: BTreeMap<(Address, H256), U256>, + (BlockNumberAddress((_, address)), storage_entry)| { + accounts.insert((address, storage_entry.key), storage_entry.value); + accounts + }, + ) + .into_iter() + // hash addresses and collect it inside sorted BTreeMap. + // We are doing keccak only once per address. + .map(|((address, key), value)| ((keccak256(address), keccak256(key)), value)) + .collect::>() + .into_iter() + // Apply values to HashedStorage (if Value is zero just remove it); + .try_for_each( + |((hashed_address, key), value)| -> std::result::Result<(), TransactionError> { + if hashed_storage + .seek_by_key_subkey(hashed_address, key)? + .filter(|entry| entry.key == key) + .is_some() + { + hashed_storage.delete_current()?; + } + + if value != U256::ZERO { + hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; + } + Ok(()) + }, + )?; + + Ok(()) + } + + /// Unwind and clear account history indices. + /// + /// Returns number of changesets walked. + pub fn unwind_account_history_indices( + &self, + range: RangeInclusive, + ) -> std::result::Result { + let account_changeset = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + let changesets = account_changeset.len(); + + let last_indices = account_changeset + .into_iter() + // reverse so we can get lowest transition id where we need to unwind account. + .rev() + // fold all account and get last transition index + .fold(BTreeMap::new(), |mut accounts: BTreeMap, (index, account)| { + // we just need address and lowest transition id. + accounts.insert(account.address, index); + accounts + }); + // try to unwind the index + let mut cursor = self.tx.cursor_write::()?; + for (address, rem_index) in last_indices { + let shard_part = unwind_account_history_shards::(&mut cursor, address, rem_index)?; + + // check last shard_part, if present, items needs to be reinserted. + if !shard_part.is_empty() { + // there are items in list + self.tx.put::( + ShardedKey::new(address, u64::MAX), + BlockNumberList::new(shard_part) + .expect("There is at least one element in list and it is sorted."), + )?; + } + } + + Ok(changesets) + } + + /// Unwind and clear storage history indices. + /// + /// Returns number of changesets walked. + pub fn unwind_storage_history_indices( + &self, + range: Range, + ) -> std::result::Result { + let storage_changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + let changesets = storage_changesets.len(); + + let last_indices = storage_changesets + .into_iter() + // reverse so we can get lowest transition id where we need to unwind account. + .rev() + // fold all storages and get last transition index + .fold( + BTreeMap::new(), + |mut accounts: BTreeMap<(Address, H256), u64>, (index, storage)| { + // we just need address and lowest transition id. + accounts.insert((index.address(), storage.key), index.block_number()); + accounts + }, + ); + + let mut cursor = self.tx.cursor_write::()?; + for ((address, storage_key), rem_index) in last_indices { + let shard_part = + unwind_storage_history_shards::(&mut cursor, address, storage_key, rem_index)?; + + // check last shard_part, if present, items needs to be reinserted. + if !shard_part.is_empty() { + // there are items in list + self.tx.put::( + StorageShardedKey::new(address, storage_key, u64::MAX), + BlockNumberList::new(shard_part) + .expect("There is at least one element in list and it is sorted."), + )?; + } + } + + Ok(changesets) + } + + /// Traverse over changesets and plain state and recreate the [`PostState`]s for the given range + /// of blocks. + /// + /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all + /// the transition indices. + /// 2. Iterate over the [StorageChangeSet][tables::StorageChangeSet] table + /// and the [AccountChangeSet][tables::AccountChangeSet] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. + /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, + /// we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the plain state + /// 3. Save the old value to the local state + /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we + /// have seen before we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the local state + /// 3. Set the local state to the value in the changeset + /// + /// If `TAKE` is `true`, the local state will be written to the plain state tables. + /// 5. Get all receipts from table + fn get_take_block_execution_result_range( + &self, + range: RangeInclusive, + ) -> std::result::Result, TransactionError> { + if range.is_empty() { + return Ok(Vec::new()) + } + + // We are not removing block meta as it is used to get block transitions. + let block_bodies = self.get_or_take::(range.clone())?; + + // get transaction receipts + let from_transaction_num = + block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); + let to_transaction_num = + block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); + let receipts = + self.get_or_take::(from_transaction_num..=to_transaction_num)?; + + let storage_range = BlockNumberAddress::range(range.clone()); + + let storage_changeset = + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; + + // iterate previous value and get plain state value to create changeset + // Double option around Account represent if Account state is know (first option) and + // account is removed (Second Option) + type LocalPlainState = BTreeMap>, BTreeMap)>; + + let mut local_plain_state: LocalPlainState = BTreeMap::new(); + + // iterate in reverse and get plain state. + + // Bundle execution changeset to its particular transaction and block + let mut block_states = + BTreeMap::from_iter(block_bodies.iter().map(|(num, _)| (*num, PostState::default()))); + + let mut plain_accounts_cursor = self.tx.cursor_write::()?; + let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + let new_info = match local_plain_state.entry(address) { + Entry::Vacant(entry) => { + let new_account = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); + entry.insert((Some(old_info), BTreeMap::new())); + new_account + } + Entry::Occupied(mut entry) => { + let new_account = std::mem::replace(&mut entry.get_mut().0, Some(old_info)); + new_account.expect("As we are stacking account first, account would always be Some(Some) or Some(None)") + } + }; + + let post_state = block_states.entry(block_number).or_default(); + match (old_info, new_info) { + (Some(old), Some(new)) => { + if new != old { + post_state.change_account(block_number, address, old, new); + } else { + unreachable!("Junk data in database: an account changeset did not represent any change"); + } + } + (None, Some(account)) => post_state.create_account(block_number, address, account), + (Some(old), None) => + post_state.destroy_account(block_number, address, old), + (None, None) => unreachable!("Junk data in database: an account changeset transitioned from no account to no account"), + }; + } + + // add storage changeset changes + let mut storage_changes: BTreeMap = BTreeMap::new(); + for (block_and_address, storage_entry) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((_, address)) = block_and_address; + let new_storage = + match local_plain_state.entry(address).or_default().1.entry(storage_entry.key) { + Entry::Vacant(entry) => { + let new_storage = plain_storage_cursor + .seek_by_key_subkey(address, storage_entry.key)? + .filter(|storage| storage.key == storage_entry.key) + .unwrap_or_default(); + entry.insert(storage_entry.value); + new_storage.value + } + Entry::Occupied(mut entry) => { + std::mem::replace(entry.get_mut(), storage_entry.value) + } + }; + storage_changes.entry(block_and_address).or_default().insert( + U256::from_be_bytes(storage_entry.key.0), + (storage_entry.value, new_storage), + ); + } + + for (BlockNumberAddress((block_number, address)), storage_changeset) in + storage_changes.into_iter() + { + block_states.entry(block_number).or_default().change_storage( + block_number, + address, + storage_changeset, + ); + } + + if TAKE { + // iterate over local plain state remove all account and all storages. + for (address, (account, storage)) in local_plain_state.into_iter() { + // revert account + if let Some(account) = account { + let existing_entry = plain_accounts_cursor.seek_exact(address)?; + if let Some(account) = account { + plain_accounts_cursor.upsert(address, account)?; + } else if existing_entry.is_some() { + plain_accounts_cursor.delete_current()?; + } + } + + // revert storages + for (storage_key, storage_value) in storage.into_iter() { + let storage_entry = StorageEntry { key: storage_key, value: storage_value }; + // delete previous value + // TODO: This does not use dupsort features + if plain_storage_cursor + .seek_by_key_subkey(address, storage_key)? + .filter(|s| s.key == storage_key) + .is_some() + { + plain_storage_cursor.delete_current()? + } + + // TODO: This does not use dupsort features + // insert value if needed + if storage_value != U256::ZERO { + plain_storage_cursor.upsert(address, storage_entry)?; + } + } + } + } + + // iterate over block body and create ExecutionResult + let mut receipt_iter = receipts.into_iter(); + + // loop break if we are at the end of the blocks. + for (block_number, block_body) in block_bodies.into_iter() { + for _ in block_body.tx_num_range() { + if let Some((_, receipt)) = receipt_iter.next() { + block_states + .entry(block_number) + .or_default() + .add_receipt(block_number, receipt); + } + } + } + Ok(block_states.into_values().collect()) + } + + /// Return range of blocks and its execution result + pub fn get_take_block_and_execution_range( + &self, + chain_spec: &ChainSpec, + range: RangeInclusive, + ) -> std::result::Result, TransactionError> { + if TAKE { + let storage_range = BlockNumberAddress::range(range.clone()); + + self.unwind_account_hashing(range.clone())?; + self.unwind_account_history_indices(range.clone())?; + self.unwind_storage_hashing(storage_range.clone())?; + self.unwind_storage_history_indices(storage_range)?; + + // merkle tree + let (new_state_root, trie_updates) = + StateRoot::incremental_root_with_updates(&self.tx, range.clone())?; + + let parent_number = range.start().saturating_sub(1); + let parent_state_root = self + .tx + .get::(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? + .state_root; + + // state root should be always correct as we are reverting state. + // but for sake of double verification we will check it again. + if new_state_root != parent_state_root { + let parent_hash = self + .tx + .get::(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; + return Err(TransactionError::UnwindStateRootMismatch { + got: new_state_root, + expected: parent_state_root, + block_number: parent_number, + block_hash: parent_hash, + }) + } + trie_updates.flush(&self.tx)?; + } + // get blocks + let blocks = self.get_take_block_range::(chain_spec, range.clone())?; + let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + // get execution res + let execution_res = self.get_take_block_execution_result_range::(range.clone())?; + // combine them + let blocks_with_exec_result: Vec<_> = + blocks.into_iter().zip(execution_res.into_iter()).collect(); + + // remove block bodies it is needed for both get block range and get block execution results + // that is why it is deleted afterwards. + if TAKE { + // rm block bodies + self.get_or_take::(range)?; + + // Update pipeline progress + if let Some(fork_number) = unwind_to { + self.update_pipeline_stages(fork_number, true)?; + } + } + + // return them + Ok(blocks_with_exec_result) + } + + /// Return list of entries from table + /// + /// If TAKE is true, opened cursor would be write and it would delete all values from db. + #[inline] + pub fn get_or_take( + &self, + range: impl RangeBounds, + ) -> std::result::Result>, DatabaseError> { + if TAKE { + let mut cursor_write = self.tx.cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + let mut items = Vec::new(); + while let Some(i) = walker.next().transpose()? { + walker.delete_current()?; + items.push(i) + } + Ok(items) + } else { + self.tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>() + } + } + + /// Get requested blocks transaction with signer + fn get_take_block_transaction_range( + &self, + range: impl RangeBounds + Clone, + ) -> std::result::Result)>, TransactionError> + { + // Raad range of block bodies to get all transactions id's of this range. + let block_bodies = self.get_or_take::(range)?; + + if block_bodies.is_empty() { + return Ok(Vec::new()) + } + + // Compute the first and last tx ID in the range + let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); + let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); + + // If this is the case then all of the blocks in the range are empty + if last_transaction < first_transaction { + return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) + } + + // Get transactions and senders + let transactions = self + .get_or_take::(first_transaction..=last_transaction)? + .into_iter() + .map(|(id, tx)| (id, tx.into())) + .collect::>(); + + let senders = + self.get_or_take::(first_transaction..=last_transaction)?; + + if TAKE { + // Remove TxHashNumber + let mut tx_hash_cursor = self.tx.cursor_write::()?; + for (_, tx) in transactions.iter() { + if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { + tx_hash_cursor.delete_current()?; + } + } + + // Remove TransactionBlock index if there are transaction present + if !transactions.is_empty() { + let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; + self.get_or_take::(tx_id_range)?; + } + } + + // Merge transaction into blocks + let mut block_tx = Vec::with_capacity(block_bodies.len()); + let mut senders = senders.into_iter(); + let mut transactions = transactions.into_iter(); + for (block_number, block_body) in block_bodies { + let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); + for _ in block_body.tx_num_range() { + let tx = transactions.next(); + let sender = senders.next(); + + let recovered = match (tx, sender) { + (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { + if tx_id != sender_tx_id { + Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) + } else { + Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) + } + } + (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { + Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) + } + (None, None) => Err(ProviderError::BlockBodyTransactionCount), + }?; + one_block_tx.push(recovered) + } + block_tx.push((block_number, one_block_tx)); + } + + Ok(block_tx) + } + + /// Return range of blocks and its execution result + fn get_take_block_range( + &self, + chain_spec: &ChainSpec, + range: impl RangeBounds + Clone, + ) -> std::result::Result, TransactionError> { + // For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers + + let block_headers = self.get_or_take::(range.clone())?; + if block_headers.is_empty() { + return Ok(Vec::new()) + } + + let block_header_hashes = + self.get_or_take::(range.clone())?; + let block_ommers = self.get_or_take::(range.clone())?; + let block_withdrawals = + self.get_or_take::(range.clone())?; + + let block_tx = self.get_take_block_transaction_range::(range.clone())?; + + if TAKE { + // rm HeaderTD + self.get_or_take::(range)?; + // rm HeaderNumbers + let mut header_number_cursor = self.tx.cursor_write::()?; + for (_, hash) in block_header_hashes.iter() { + if header_number_cursor.seek_exact(*hash)?.is_some() { + header_number_cursor.delete_current()?; + } + } + } + + // merge all into block + let block_header_iter = block_headers.into_iter(); + let block_header_hashes_iter = block_header_hashes.into_iter(); + let block_tx_iter = block_tx.into_iter(); + + // Ommers can be empty for some blocks + let mut block_ommers_iter = block_ommers.into_iter(); + let mut block_withdrawals_iter = block_withdrawals.into_iter(); + let mut block_ommers = block_ommers_iter.next(); + let mut block_withdrawals = block_withdrawals_iter.next(); + + let mut blocks = Vec::new(); + for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!( + block_header_iter.into_iter(), + block_header_hashes_iter.into_iter(), + block_tx_iter.into_iter() + ) { + let header = header.seal(header_hash); + + let (body, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); + + // Ommers can be missing + let mut ommers = Vec::new(); + if let Some((block_number, _)) = block_ommers.as_ref() { + if *block_number == main_block_number { + ommers = block_ommers.take().unwrap().1.ommers; + block_ommers = block_ommers_iter.next(); + } + }; + + // withdrawal can be missing + let shanghai_is_active = + chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(header.timestamp); + let mut withdrawals = Some(Vec::new()); + if shanghai_is_active { + if let Some((block_number, _)) = block_withdrawals.as_ref() { + if *block_number == main_block_number { + withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); + block_withdrawals = block_withdrawals_iter.next(); + } + } + } else { + withdrawals = None + } + + blocks.push(SealedBlockWithSenders { + block: SealedBlock { header, body, ommers, withdrawals }, + senders, + }) + } + + Ok(blocks) + } + + /// Update all pipeline sync stage progress. + pub fn update_pipeline_stages( + &self, + block_number: BlockNumber, + drop_stage_checkpoint: bool, + ) -> std::result::Result<(), TransactionError> { + // iterate over all existing stages in the table and update its progress. + let mut cursor = self.tx.cursor_write::()?; + while let Some((stage_name, checkpoint)) = cursor.next()? { + cursor.upsert( + stage_name, + StageCheckpoint { + block_number, + ..if drop_stage_checkpoint { Default::default() } else { checkpoint } + }, + )? + } + + Ok(()) + } + + /// Insert storage change index to database. Used inside StorageHistoryIndex stage + pub fn insert_storage_history_index( + &self, + storage_transitions: BTreeMap<(Address, H256), Vec>, + ) -> std::result::Result<(), TransactionError> { + for ((address, storage_key), mut indices) in storage_transitions { + let mut last_shard = self.take_last_storage_shard(address, storage_key)?; + last_shard.append(&mut indices); + + // chunk indices and insert them in shards of N size. + let mut chunks = last_shard + .iter() + .chunks(storage_sharded_key::NUM_OF_INDICES_IN_SHARD) + .into_iter() + .map(|chunks| chunks.map(|i| *i as usize).collect::>()) + .collect::>(); + let last_chunk = chunks.pop(); + + // chunk indices and insert them in shards of N size. + chunks.into_iter().try_for_each(|list| { + self.tx.put::( + StorageShardedKey::new( + address, + storage_key, + *list.last().expect("Chuck does not return empty list") as BlockNumber, + ), + BlockNumberList::new(list).expect("Indices are presorted and not empty"), + ) + })?; + // Insert last list with u64::MAX + if let Some(last_list) = last_chunk { + self.tx.put::( + StorageShardedKey::new(address, storage_key, u64::MAX), + BlockNumberList::new(last_list).expect("Indices are presorted and not empty"), + )?; + } + } + Ok(()) + } + + /// Insert account change index to database. Used inside AccountHistoryIndex stage + pub fn insert_account_history_index( + &self, + account_transitions: BTreeMap>, + ) -> std::result::Result<(), TransactionError> { + // insert indexes to AccountHistory. + for (address, mut indices) in account_transitions { + let mut last_shard = self.take_last_account_shard(address)?; + last_shard.append(&mut indices); + // chunk indices and insert them in shards of N size. + let mut chunks = last_shard + .iter() + .chunks(sharded_key::NUM_OF_INDICES_IN_SHARD) + .into_iter() + .map(|chunks| chunks.map(|i| *i as usize).collect::>()) + .collect::>(); + let last_chunk = chunks.pop(); + + chunks.into_iter().try_for_each(|list| { + self.tx.put::( + ShardedKey::new( + address, + *list.last().expect("Chuck does not return empty list") as BlockNumber, + ), + BlockNumberList::new(list).expect("Indices are presorted and not empty"), + ) + })?; + // Insert last list with u64::MAX + if let Some(last_list) = last_chunk { + self.tx.put::( + ShardedKey::new(address, u64::MAX), + BlockNumberList::new(last_list).expect("Indices are presorted and not empty"), + )? + } + } + Ok(()) + } + + /// Get the stage checkpoint. + pub fn get_stage_checkpoint( + &self, + id: StageId, + ) -> std::result::Result, DatabaseError> { + get_stage_checkpoint(&self.tx, id) + } + + /// Save stage checkpoint. + pub fn save_stage_checkpoint( + &self, + id: StageId, + checkpoint: StageCheckpoint, + ) -> std::result::Result<(), DatabaseError> { + self.tx.put::(id.to_string(), checkpoint)?; + Ok(()) + } + + /// Get stage checkpoint progress. + pub fn get_stage_checkpoint_progress( + &self, + id: StageId, + ) -> std::result::Result>, DatabaseError> { + self.tx.get::(id.to_string()) + } + + /// Save stage checkpoint progress. + pub fn save_stage_checkpoint_progress( + &self, + id: StageId, + checkpoint: Vec, + ) -> std::result::Result<(), DatabaseError> { + self.tx.put::(id.to_string(), checkpoint)?; + Ok(()) + } + + /// Get lastest block number. + pub fn tip_number(&self) -> std::result::Result { + Ok(self.tx.cursor_read::()?.last()?.unwrap_or_default().0) + } + + /// Query [tables::CanonicalHeaders] table for block hash by block number + pub fn get_block_hash( + &self, + block_number: BlockNumber, + ) -> std::result::Result { + let hash = self + .tx + .get::(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + Ok(hash) + } + + /// Query the block body by number. + pub fn block_body_indices( + &self, + number: BlockNumber, + ) -> std::result::Result { + let body = self + .tx + .get::(number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(number))?; + Ok(body) + } + + /// Query the block header by number + pub fn get_header(&self, number: BlockNumber) -> std::result::Result { + let header = self + .tx + .get::(number)? + .ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + Ok(header) + } + + /// Get the total difficulty for a block. + pub fn get_td(&self, block: BlockNumber) -> std::result::Result { + let td = self + .tx + .get::(block)? + .ok_or(ProviderError::TotalDifficultyNotFound { number: block })?; + Ok(td.into()) + } + + /// Unwind table by some number key. + /// Returns number of rows unwound. + /// + /// Note: Key is not inclusive and specified key would stay in db. + #[inline] + pub fn unwind_table_by_num(&self, num: u64) -> std::result::Result + where + T: Table, + { + self.unwind_table::(num, |key| key) + } + + /// Unwind the table to a provided number key. + /// Returns number of rows unwound. + /// + /// Note: Key is not inclusive and specified key would stay in db. + pub(crate) fn unwind_table( + &self, + key: u64, + mut selector: F, + ) -> std::result::Result + where + T: Table, + F: FnMut(T::Key) -> u64, + { + let mut cursor = self.tx.cursor_write::()?; + let mut reverse_walker = cursor.walk_back(None)?; + let mut deleted = 0; + + while let Some(Ok((entry_key, _))) = reverse_walker.next() { + if selector(entry_key.clone()) <= key { + break + } + reverse_walker.delete_current()?; + deleted += 1; + } + + Ok(deleted) + } + + /// Unwind a table forward by a [Walker][reth_db::abstraction::cursor::Walker] on another table + pub fn unwind_table_by_walker( + &self, + start_at: T1::Key, + ) -> std::result::Result<(), DatabaseError> + where + T1: Table, + T2: Table, + { + let mut cursor = self.tx.cursor_write::()?; + let mut walker = cursor.walk(Some(start_at))?; + while let Some((_, value)) = walker.next().transpose()? { + self.tx.delete::(value, None)?; + } + Ok(()) + } + + /// Load last shard and check if it is full and remove if it is not. If list is empty, last + /// shard was full or there is no shards at all. + fn take_last_account_shard( + &self, + address: Address, + ) -> std::result::Result, TransactionError> { + let mut cursor = self.tx.cursor_read::()?; + let last = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?; + if let Some((shard_key, list)) = last { + // delete old shard so new one can be inserted. + self.tx.delete::(shard_key, None)?; + let list = list.iter(0).map(|i| i as u64).collect::>(); + return Ok(list) + } + Ok(Vec::new()) + } + + /// Load last shard and check if it is full and remove if it is not. If list is empty, last + /// shard was full or there is no shards at all. + pub fn take_last_storage_shard( + &self, + address: Address, + storage_key: H256, + ) -> std::result::Result, TransactionError> { + let mut cursor = self.tx.cursor_read::()?; + let last = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?; + if let Some((storage_shard_key, list)) = last { + // delete old shard so new one can be inserted. + self.tx.delete::(storage_shard_key, None)?; + let list = list.iter(0).map(|i| i as u64).collect::>(); + return Ok(list) + } + Ok(Vec::new()) + } + /// iterate over storages and insert them to hashing table + pub fn insert_storage_for_hashing( + &self, + storages: impl IntoIterator)>, + ) -> std::result::Result<(), TransactionError> { + // hash values + let hashed = storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| { + let storage = storage.into_iter().fold(BTreeMap::new(), |mut map, (key, value)| { + map.insert(keccak256(key), value); + map + }); + map.insert(keccak256(address), storage); + map + }); + + let mut hashed_storage = self.tx.cursor_dup_write::()?; + // Hash the address and key and apply them to HashedStorage (if Storage is None + // just remove it); + hashed.into_iter().try_for_each(|(hashed_address, storage)| { + storage.into_iter().try_for_each( + |(key, value)| -> std::result::Result<(), TransactionError> { + if hashed_storage + .seek_by_key_subkey(hashed_address, key)? + .filter(|entry| entry.key == key) + .is_some() + { + hashed_storage.delete_current()?; + } + + if value != U256::ZERO { + hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; + } + Ok(()) + }, + ) + })?; + Ok(()) + } + + /// iterate over accounts and insert them to hashing table + pub fn insert_account_for_hashing( + &self, + accounts: impl IntoIterator)>, + ) -> std::result::Result<(), TransactionError> { + let mut hashed_accounts = self.tx.cursor_write::()?; + + let hashes_accounts = accounts.into_iter().fold( + BTreeMap::new(), + |mut map: BTreeMap>, (address, account)| { + map.insert(keccak256(address), account); + map + }, + ); + + hashes_accounts.into_iter().try_for_each( + |(hashed_address, account)| -> std::result::Result<(), TransactionError> { + if let Some(account) = account { + hashed_accounts.upsert(hashed_address, account)? + } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { + hashed_accounts.delete_current()?; + } + Ok(()) + }, + )?; + Ok(()) + } + + /// Append blocks and insert its post state. + /// This will insert block data to all related tables and will update pipeline progress. + pub fn append_blocks_with_post_state( + &mut self, + blocks: Vec, + state: PostState, + ) -> std::result::Result<(), TransactionError> { + if blocks.is_empty() { + return Ok(()) + } + let new_tip = blocks.last().unwrap(); + let new_tip_number = new_tip.number; + + let first_number = blocks.first().unwrap().number; + + let last = blocks.last().unwrap(); + let last_block_number = last.number; + let last_block_hash = last.hash(); + let expected_state_root = last.state_root; + + // Insert the blocks + for block in blocks { + let (block, senders) = block.into_components(); + insert_canonical_block(self.tx_mut(), block, Some(senders))?; + } + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + state.write_to_db(self.tx_mut())?; + + self.insert_hashes(first_number..=last_block_number, last_block_hash, expected_state_root)?; + + self.calculate_history_indices(first_number..=last_block_number)?; + + // Update pipeline progress + self.update_pipeline_stages(new_tip_number, false)?; + + Ok(()) + } + + /// Insert full block and make it canonical. + pub fn insert_block( + &mut self, + block: SealedBlock, + senders: Option>, + ) -> std::result::Result<(), TransactionError> { + insert_canonical_block(self.tx_mut(), block, senders)?; + Ok(()) + } + + /// Read account/storage changesets and update account/storage history indices. + pub fn calculate_history_indices( + &mut self, + range: RangeInclusive, + ) -> std::result::Result<(), TransactionError> { + // account history stage + { + let indices = self.get_account_transition_ids_from_changeset(range.clone())?; + self.insert_account_history_index(indices)?; + } + + // storage history stage + { + let indices = self.get_storage_transition_ids_from_changeset(range)?; + self.insert_storage_history_index(indices)?; + } + + Ok(()) + } + + /// Calculate the hashes of all changed accounts and storages, and finally calculate the state + /// root. + /// + /// The chain goes from `fork_block_number + 1` to `current_block_number`, and hashes are + /// calculated from `from_transition_id` to `to_transition_id`. + /// + /// The resulting state root is compared with `expected_state_root`. + pub fn insert_hashes( + &mut self, + range: RangeInclusive, + end_block_hash: H256, + expected_state_root: H256, + ) -> std::result::Result<(), TransactionError> { + // storage hashing stage + { + let lists = self.get_addresses_and_keys_of_changed_storages(range.clone())?; + let storages = self.get_plainstate_storages(lists.into_iter())?; + self.insert_storage_for_hashing(storages.into_iter())?; + } + + // account hashing stage + { + let lists = self.get_addresses_of_changed_accounts(range.clone())?; + let accounts = self.get_plainstate_accounts(lists.into_iter())?; + self.insert_account_for_hashing(accounts.into_iter())?; + } + + // merkle tree + { + let (state_root, trie_updates) = + StateRoot::incremental_root_with_updates(&self.tx, range.clone())?; + if state_root != expected_state_root { + return Err(TransactionError::StateRootMismatch { + got: state_root, + expected: expected_state_root, + block_number: *range.end(), + block_hash: end_block_hash, + }) + } + trie_updates.flush(&self.tx)?; + } + Ok(()) + } +} + +impl<'this, TX: DbTx<'this>> AccountProvider for DatabaseProvider<'this, TX> { + fn basic_account(&self, address: Address) -> Result> { + Ok(self.tx.get::(address)?) + } +} + +impl<'this, TX: DbTx<'this>> AccountExtProvider for DatabaseProvider<'this, TX> { + fn changed_accounts_with_range( + &self, + range: impl RangeBounds, + ) -> Result> { + Ok(self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()? + .into_iter() + // fold all account to one set of changed accounts + .fold(BTreeSet::new(), |mut accounts: BTreeSet
, (_, account_before)| { + accounts.insert(account_before.address); + accounts + })) + } + fn basic_accounts( + &self, + iter: impl IntoIterator, + ) -> Result)>> { + let mut plain_accounts = self.tx.cursor_read::()?; + Ok(iter + .into_iter() + .map(|address| plain_accounts.seek_exact(address).map(|a| (address, a.map(|(_, v)| v)))) + .collect::, _>>()?) + } } impl<'this, TX: DbTx<'this>> HeaderProvider for DatabaseProvider<'this, TX> { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index a9349698c..7df40d45e 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,6 +1,6 @@ //! Dummy blocks and data for tests -use crate::{post_state::PostState, Transaction}; +use crate::{post_state::PostState, DatabaseProviderRW}; use reth_db::{database::Database, models::StoredBlockBodyIndices, tables}; use reth_primitives::{ hex_literal::hex, Account, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock, @@ -10,9 +10,11 @@ use reth_rlp::Decodable; use std::collections::BTreeMap; /// Assert genesis block -pub fn assert_genesis_block(tx: &Transaction<'_, DB>, g: SealedBlock) { +pub fn assert_genesis_block(provider: &DatabaseProviderRW<'_, DB>, g: SealedBlock) { let n = g.number; let h = H256::zero(); + let tx = provider; + // check if all tables are empty assert_eq!(tx.table::().unwrap(), vec![(g.number, g.header.clone().unseal())]); diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/provider/src/traits/account.rs index 5660ebb07..6a48104f5 100644 --- a/crates/storage/provider/src/traits/account.rs +++ b/crates/storage/provider/src/traits/account.rs @@ -1,6 +1,7 @@ use auto_impl::auto_impl; use reth_interfaces::Result; -use reth_primitives::{Account, Address}; +use reth_primitives::{Account, Address, BlockNumber}; +use std::{collections::BTreeSet, ops::RangeBounds}; /// Account provider #[auto_impl(&, Arc, Box)] @@ -10,3 +11,22 @@ pub trait AccountProvider: Send + Sync { /// Returns `None` if the account doesn't exist. fn basic_account(&self, address: Address) -> Result>; } + +/// Account provider +#[auto_impl(&, Arc, Box)] +pub trait AccountExtProvider: Send + Sync { + /// Iterate over account changesets and return all account address that were changed. + fn changed_accounts_with_range( + &self, + _range: impl RangeBounds, + ) -> Result>; + + /// Get basic account information for multiple accounts. A more efficient version than calling + /// [`AccountProvider::basic_account`] repeatedly. + /// + /// Returns `None` if the account doesn't exist. + fn basic_accounts( + &self, + _iter: impl IntoIterator, + ) -> Result)>>; +} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 11439a87f..1c7c56807 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -1,7 +1,7 @@ //! Collection of common provider traits. mod account; -pub use account::AccountProvider; +pub use account::{AccountExtProvider, AccountProvider}; mod block; pub use block::{BlockProvider, BlockProviderIdExt, BlockSource}; diff --git a/crates/storage/provider/src/transaction.rs b/crates/storage/provider/src/transaction.rs index 954e615a5..24a470ebd 100644 --- a/crates/storage/provider/src/transaction.rs +++ b/crates/storage/provider/src/transaction.rs @@ -1,1412 +1,7 @@ -use crate::{ - insert_canonical_block, - post_state::{PostState, StorageChangeset}, - providers::get_stage_checkpoint, -}; -use itertools::{izip, Itertools}; -use reth_db::{ - common::KeyValue, - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - database::{Database, DatabaseGAT}, - models::{ - sharded_key, - storage_sharded_key::{self, StorageShardedKey}, - AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, - }, - table::Table, - tables, - transaction::{DbTx, DbTxMut, DbTxMutGAT}, - BlockNumberList, -}; use reth_interfaces::{db::DatabaseError as DbError, provider::ProviderError}; -use reth_primitives::{ - keccak256, - stage::{StageCheckpoint, StageId}, - Account, Address, BlockHash, BlockNumber, ChainSpec, Hardfork, Header, SealedBlock, - SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionSigned, - TransactionSignedEcRecovered, H256, U256, -}; -use reth_trie::{StateRoot, StateRootError}; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet}, - fmt::Debug, - ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, -}; - -/// A container for any DB transaction that will open a new inner transaction when the current -/// one is committed. -// NOTE: This container is needed since `Transaction::commit` takes `mut self`, so methods in -// the pipeline that just take a reference will not be able to commit their transaction and let -// the pipeline continue. Is there a better way to do this? -// -// TODO: Re-evaluate if this is actually needed, this was introduced as a way to manage the -// lifetime of the `TXMut` and having a nice API for re-opening a new transaction after `commit` -pub struct Transaction<'this, DB: Database> { - /// A handle to the DB. - pub(crate) db: &'this DB, - tx: Option<>::TXMut>, -} - -impl<'a, DB: Database> Debug for Transaction<'a, DB> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Transaction").finish() - } -} - -impl<'a, DB: Database> Deref for Transaction<'a, DB> { - type Target = >::TXMut; - - /// Dereference as the inner transaction. - /// - /// # Panics - /// - /// Panics if an inner transaction does not exist. This should never be the case unless - /// [Transaction::close] was called without following up with a call to [Transaction::open]. - fn deref(&self) -> &Self::Target { - self.tx.as_ref().expect("Tried getting a reference to a non-existent transaction") - } -} - -impl<'a, DB: Database> DerefMut for Transaction<'a, DB> { - /// Dereference as a mutable reference to the inner transaction. - /// - /// # Panics - /// - /// Panics if an inner transaction does not exist. This should never be the case unless - /// [Transaction::close] was called without following up with a call to [Transaction::open]. - fn deref_mut(&mut self) -> &mut Self::Target { - self.tx.as_mut().expect("Tried getting a mutable reference to a non-existent transaction") - } -} - -// === Core impl === - -impl<'this, DB> Transaction<'this, DB> -where - DB: Database, -{ - /// Create a new container with the given database handle. - /// - /// A new inner transaction will be opened. - pub fn new(db: &'this DB) -> Result { - Ok(Self { db, tx: Some(db.tx_mut()?) }) - } - - /// Creates a new container with given database and transaction handles. - pub fn new_raw(db: &'this DB, tx: >::TXMut) -> Self { - Self { db, tx: Some(tx) } - } - - /// Accessor to the internal Database - pub fn inner(&self) -> &'this DB { - self.db - } - - /// Drops the current inner transaction and open a new one. - pub fn drop(&mut self) -> Result<(), DbError> { - if let Some(tx) = self.tx.take() { - drop(tx); - } - - self.tx = Some(self.db.tx_mut()?); - - Ok(()) - } - - /// Open a new inner transaction. - pub fn open(&mut self) -> Result<(), DbError> { - self.tx = Some(self.db.tx_mut()?); - Ok(()) - } - - /// Close the current inner transaction. - pub fn close(&mut self) { - self.tx.take(); - } - - /// Commit the current inner transaction and open a new one. - /// - /// # Panics - /// - /// Panics if an inner transaction does not exist. This should never be the case unless - /// [Transaction::close] was called without following up with a call to [Transaction::open]. - pub fn commit(&mut self) -> Result { - let success = if let Some(tx) = self.tx.take() { tx.commit()? } else { false }; - self.tx = Some(self.db.tx_mut()?); - Ok(success) - } -} - -// === Misc helpers === - -impl<'this, DB> Transaction<'this, DB> -where - DB: Database, -{ - /// Get lastest block number. - pub fn tip_number(&self) -> Result { - Ok(self.cursor_read::()?.last()?.unwrap_or_default().0) - } - - /// Query [tables::CanonicalHeaders] table for block hash by block number - pub fn get_block_hash(&self, block_number: BlockNumber) -> Result { - let hash = self - .get::(block_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; - Ok(hash) - } - - /// Query the block body by number. - pub fn block_body_indices( - &self, - number: BlockNumber, - ) -> Result { - let body = self - .get::(number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(number))?; - Ok(body) - } - - /// Query the block header by number - pub fn get_header(&self, number: BlockNumber) -> Result { - let header = self - .get::(number)? - .ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; - Ok(header) - } - - /// Get the total difficulty for a block. - pub fn get_td(&self, block: BlockNumber) -> Result { - let td = self - .get::(block)? - .ok_or(ProviderError::TotalDifficultyNotFound { number: block })?; - Ok(td.into()) - } - - /// Query the sealed header by number - pub fn get_sealed_header(&self, number: BlockNumber) -> Result { - let header = self.get_header(number)?; - let block_hash = self.get_block_hash(number)?; - Ok(header.seal(block_hash)) - } - - /// Unwind table by some number key. - /// Returns number of rows unwound. - /// - /// Note: Key is not inclusive and specified key would stay in db. - #[inline] - pub fn unwind_table_by_num(&self, num: u64) -> Result - where - DB: Database, - T: Table, - { - self.unwind_table::(num, |key| key) - } - - /// Unwind the table to a provided number key. - /// Returns number of rows unwound. - /// - /// Note: Key is not inclusive and specified key would stay in db. - pub(crate) fn unwind_table(&self, key: u64, mut selector: F) -> Result - where - DB: Database, - T: Table, - F: FnMut(T::Key) -> u64, - { - let mut cursor = self.cursor_write::()?; - let mut reverse_walker = cursor.walk_back(None)?; - let mut deleted = 0; - - while let Some(Ok((entry_key, _))) = reverse_walker.next() { - if selector(entry_key.clone()) <= key { - break - } - reverse_walker.delete_current()?; - deleted += 1; - } - - Ok(deleted) - } - - /// Unwind a table forward by a [Walker][reth_db::abstraction::cursor::Walker] on another table - pub fn unwind_table_by_walker(&self, start_at: T1::Key) -> Result<(), DbError> - where - DB: Database, - T1: Table, - T2: Table, - { - let mut cursor = self.cursor_write::()?; - let mut walker = cursor.walk(Some(start_at))?; - while let Some((_, value)) = walker.next().transpose()? { - self.delete::(value, None)?; - } - Ok(()) - } - - /// Load last shard and check if it is full and remove if it is not. If list is empty, last - /// shard was full or there is no shards at all. - fn take_last_account_shard(&self, address: Address) -> Result, TransactionError> { - let mut cursor = self.cursor_read::()?; - let last = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?; - if let Some((shard_key, list)) = last { - // delete old shard so new one can be inserted. - self.delete::(shard_key, None)?; - let list = list.iter(0).map(|i| i as u64).collect::>(); - return Ok(list) - } - Ok(Vec::new()) - } - - /// Load last shard and check if it is full and remove if it is not. If list is empty, last - /// shard was full or there is no shards at all. - pub fn take_last_storage_shard( - &self, - address: Address, - storage_key: H256, - ) -> Result, TransactionError> { - let mut cursor = self.cursor_read::()?; - let last = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?; - if let Some((storage_shard_key, list)) = last { - // delete old shard so new one can be inserted. - self.delete::(storage_shard_key, None)?; - let list = list.iter(0).map(|i| i as u64).collect::>(); - return Ok(list) - } - Ok(Vec::new()) - } -} - -// === Stages impl === - -impl<'this, DB> Transaction<'this, DB> -where - DB: Database, -{ - /// Get range of blocks and its execution result - pub fn get_block_and_execution_range( - &self, - chain_spec: &ChainSpec, - range: RangeInclusive, - ) -> Result, TransactionError> { - self.get_take_block_and_execution_range::(chain_spec, range) - } - - /// Take range of blocks and its execution result - pub fn take_block_and_execution_range( - &self, - chain_spec: &ChainSpec, - range: RangeInclusive, - ) -> Result, TransactionError> { - self.get_take_block_and_execution_range::(chain_spec, range) - } - - /// Unwind and clear account hashing. - pub fn unwind_account_hashing( - &self, - range: RangeInclusive, - ) -> Result<(), TransactionError> { - let mut hashed_accounts = self.cursor_write::()?; - - // Aggregate all transition changesets and make a list of accounts that have been changed. - self.cursor_read::()? - .walk_range(range)? - .collect::, _>>()? - .into_iter() - .rev() - // fold all account to get the old balance/nonces and account that needs to be removed - .fold( - BTreeMap::new(), - |mut accounts: BTreeMap>, (_, account_before)| { - accounts.insert(account_before.address, account_before.info); - accounts - }, - ) - .into_iter() - // hash addresses and collect it inside sorted BTreeMap. - // We are doing keccak only once per address. - .map(|(address, account)| (keccak256(address), account)) - .collect::>() - .into_iter() - // Apply values to HashedState (if Account is None remove it); - .try_for_each(|(hashed_address, account)| -> Result<(), TransactionError> { - if let Some(account) = account { - hashed_accounts.upsert(hashed_address, account)?; - } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { - hashed_accounts.delete_current()?; - } - Ok(()) - })?; - - Ok(()) - } - - /// Unwind and clear storage hashing. - pub fn unwind_storage_hashing( - &self, - range: Range, - ) -> Result<(), TransactionError> { - let mut hashed_storage = self.cursor_dup_write::()?; - - // Aggregate all transition changesets and make list of accounts that have been changed. - self.cursor_read::()? - .walk_range(range)? - .collect::, _>>()? - .into_iter() - .rev() - // fold all account to get the old balance/nonces and account that needs to be removed - .fold( - BTreeMap::new(), - |mut accounts: BTreeMap<(Address, H256), U256>, - (BlockNumberAddress((_, address)), storage_entry)| { - accounts.insert((address, storage_entry.key), storage_entry.value); - accounts - }, - ) - .into_iter() - // hash addresses and collect it inside sorted BTreeMap. - // We are doing keccak only once per address. - .map(|((address, key), value)| ((keccak256(address), keccak256(key)), value)) - .collect::>() - .into_iter() - // Apply values to HashedStorage (if Value is zero just remove it); - .try_for_each(|((hashed_address, key), value)| -> Result<(), TransactionError> { - if hashed_storage - .seek_by_key_subkey(hashed_address, key)? - .filter(|entry| entry.key == key) - .is_some() - { - hashed_storage.delete_current()?; - } - - if value != U256::ZERO { - hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; - } - Ok(()) - })?; - - Ok(()) - } - - /// Unwind and clear account history indices. - /// - /// Returns number of changesets walked. - pub fn unwind_account_history_indices( - &self, - range: RangeInclusive, - ) -> Result { - let account_changeset = self - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; - let changesets = account_changeset.len(); - - let last_indices = account_changeset - .into_iter() - // reverse so we can get lowest transition id where we need to unwind account. - .rev() - // fold all account and get last transition index - .fold(BTreeMap::new(), |mut accounts: BTreeMap, (index, account)| { - // we just need address and lowest transition id. - accounts.insert(account.address, index); - accounts - }); - - // try to unwind the index - let mut cursor = self.cursor_write::()?; - for (address, rem_index) in last_indices { - let shard_part = unwind_account_history_shards::(&mut cursor, address, rem_index)?; - - // check last shard_part, if present, items needs to be reinserted. - if !shard_part.is_empty() { - // there are items in list - self.put::( - ShardedKey::new(address, u64::MAX), - BlockNumberList::new(shard_part) - .expect("There is at least one element in list and it is sorted."), - )?; - } - } - - Ok(changesets) - } - - /// Unwind and clear storage history indices. - /// - /// Returns number of changesets walked. - pub fn unwind_storage_history_indices( - &self, - range: Range, - ) -> Result { - let storage_changesets = self - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; - let changesets = storage_changesets.len(); - - let last_indices = storage_changesets - .into_iter() - // reverse so we can get lowest transition id where we need to unwind account. - .rev() - // fold all storages and get last transition index - .fold( - BTreeMap::new(), - |mut accounts: BTreeMap<(Address, H256), u64>, (index, storage)| { - // we just need address and lowest transition id. - accounts.insert((index.address(), storage.key), index.block_number()); - accounts - }, - ); - - let mut cursor = self.cursor_write::()?; - for ((address, storage_key), rem_index) in last_indices { - let shard_part = - unwind_storage_history_shards::(&mut cursor, address, storage_key, rem_index)?; - - // check last shard_part, if present, items needs to be reinserted. - if !shard_part.is_empty() { - // there are items in list - self.put::( - StorageShardedKey::new(address, storage_key, u64::MAX), - BlockNumberList::new(shard_part) - .expect("There is at least one element in list and it is sorted."), - )?; - } - } - - Ok(changesets) - } - - /// Append blocks and insert its post state. - /// This will insert block data to all related tables and will update pipeline progress. - pub fn append_blocks_with_post_state( - &mut self, - blocks: Vec, - state: PostState, - ) -> Result<(), TransactionError> { - if blocks.is_empty() { - return Ok(()) - } - let new_tip = blocks.last().unwrap(); - let new_tip_number = new_tip.number; - - let first_number = blocks.first().unwrap().number; - - let last = blocks.last().unwrap(); - let last_block_number = last.number; - let last_block_hash = last.hash(); - let expected_state_root = last.state_root; - - // Insert the blocks - for block in blocks { - let (block, senders) = block.into_components(); - insert_canonical_block(self.deref_mut(), block, Some(senders))?; - } - - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - state.write_to_db(self.deref_mut())?; - - self.insert_hashes(first_number..=last_block_number, last_block_hash, expected_state_root)?; - - self.calculate_history_indices(first_number..=last_block_number)?; - - // Update pipeline progress - self.update_pipeline_stages(new_tip_number, false)?; - - Ok(()) - } - - /// Insert full block and make it canonical. - pub fn insert_block( - &mut self, - block: SealedBlock, - senders: Option>, - ) -> Result<(), TransactionError> { - insert_canonical_block(self.deref_mut(), block, senders)?; - Ok(()) - } - - /// Read account/storage changesets and update account/storage history indices. - pub fn calculate_history_indices( - &mut self, - range: RangeInclusive, - ) -> Result<(), TransactionError> { - // account history stage - { - let indices = self.get_account_transition_ids_from_changeset(range.clone())?; - self.insert_account_history_index(indices)?; - } - - // storage history stage - { - let indices = self.get_storage_transition_ids_from_changeset(range)?; - self.insert_storage_history_index(indices)?; - } - - Ok(()) - } - - /// Calculate the hashes of all changed accounts and storages, and finally calculate the state - /// root. - /// - /// The chain goes from `fork_block_number + 1` to `current_block_number`, and hashes are - /// calculated from `from_transition_id` to `to_transition_id`. - /// - /// The resulting state root is compared with `expected_state_root`. - pub fn insert_hashes( - &mut self, - range: RangeInclusive, - end_block_hash: H256, - expected_state_root: H256, - ) -> Result<(), TransactionError> { - // storage hashing stage - { - let lists = self.get_addresses_and_keys_of_changed_storages(range.clone())?; - let storages = self.get_plainstate_storages(lists.into_iter())?; - self.insert_storage_for_hashing(storages.into_iter())?; - } - - // account hashing stage - { - let lists = self.get_addresses_of_changed_accounts(range.clone())?; - let accounts = self.get_plainstate_accounts(lists.into_iter())?; - self.insert_account_for_hashing(accounts.into_iter())?; - } - - // merkle tree - { - let (state_root, trie_updates) = - StateRoot::incremental_root_with_updates(self.deref_mut(), range.clone())?; - if state_root != expected_state_root { - return Err(TransactionError::StateRootMismatch { - got: state_root, - expected: expected_state_root, - block_number: *range.end(), - block_hash: end_block_hash, - }) - } - trie_updates.flush(self.deref_mut())?; - } - Ok(()) - } - - /// Return list of entries from table - /// - /// If TAKE is true, opened cursor would be write and it would delete all values from db. - #[inline] - pub fn get_or_take( - &self, - range: impl RangeBounds, - ) -> Result>, DbError> { - if TAKE { - let mut cursor_write = self.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - let mut items = Vec::new(); - while let Some(i) = walker.next().transpose()? { - walker.delete_current()?; - items.push(i) - } - Ok(items) - } else { - self.cursor_read::()?.walk_range(range)?.collect::, _>>() - } - } - - /// Get requested blocks transaction with signer - fn get_take_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> Result)>, TransactionError> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get_or_take::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .get_or_take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let senders = - self.get_or_take::(first_transaction..=last_transaction)?; - - if TAKE { - // Remove TxHashNumber - let mut tx_hash_cursor = self.cursor_write::()?; - for (_, tx) in transactions.iter() { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlock index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.get_or_take::(tx_id_range)?; - } - } - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id != sender_tx_id { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } else { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Return range of blocks and its execution result - fn get_take_block_range( - &self, - chain_spec: &ChainSpec, - range: impl RangeBounds + Clone, - ) -> Result, TransactionError> { - // For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers - - let block_headers = self.get_or_take::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - let block_header_hashes = - self.get_or_take::(range.clone())?; - let block_ommers = self.get_or_take::(range.clone())?; - let block_withdrawals = - self.get_or_take::(range.clone())?; - - let block_tx = self.get_take_block_transaction_range::(range.clone())?; - - if TAKE { - // rm HeaderTD - self.get_or_take::(range)?; - // rm HeaderNumbers - let mut header_number_cursor = self.cursor_write::()?; - for (_, hash) in block_header_hashes.iter() { - if header_number_cursor.seek_exact(*hash)?.is_some() { - header_number_cursor.delete_current()?; - } - } - } - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - let mut blocks = Vec::new(); - for ((main_block_number, header), (_, header_hash), (_, tx)) in izip!( - block_header_iter.into_iter(), - block_header_hashes_iter.into_iter(), - block_tx_iter.into_iter() - ) { - let header = header.seal(header_hash); - - let (body, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - chain_spec.fork(Hardfork::Shanghai).active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Vec::new()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { header, body, ommers, withdrawals }, - senders, - }) - } - - Ok(blocks) - } - - /// Traverse over changesets and plain state and recreate the [`PostState`]s for the given range - /// of blocks. - /// - /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all - /// the transition indices. - /// 2. Iterate over the [StorageChangeSet][tables::StorageChangeSet] table - /// and the [AccountChangeSet][tables::AccountChangeSet] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - /// - /// If `TAKE` is `true`, the local state will be written to the plain state tables. - /// 5. Get all receipts from table - fn get_take_block_execution_result_range( - &self, - range: RangeInclusive, - ) -> Result, TransactionError> { - if range.is_empty() { - return Ok(Vec::new()) - } - - // We are not removing block meta as it is used to get block transitions. - let block_bodies = self.get_or_take::(range.clone())?; - - // get transaction receipts - let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); - let receipts = - self.get_or_take::(from_transaction_num..=to_transaction_num)?; - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; - - // iterate previous value and get plain state value to create changeset - // Double option around Account represent if Account state is know (first option) and - // account is removed (Second Option) - type LocalPlainState = BTreeMap>, BTreeMap)>; - - let mut local_plain_state: LocalPlainState = BTreeMap::new(); - - // iterate in reverse and get plain state. - - // Bundle execution changeset to its particular transaction and block - let mut block_states = - BTreeMap::from_iter(block_bodies.iter().map(|(num, _)| (*num, PostState::default()))); - - let mut plain_accounts_cursor = self.cursor_write::()?; - let mut plain_storage_cursor = self.cursor_dup_write::()?; - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - let new_info = match local_plain_state.entry(address) { - Entry::Vacant(entry) => { - let new_account = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((Some(old_info), BTreeMap::new())); - new_account - } - Entry::Occupied(mut entry) => { - let new_account = std::mem::replace(&mut entry.get_mut().0, Some(old_info)); - new_account.expect("As we are stacking account first, account would always be Some(Some) or Some(None)") - } - }; - - let post_state = block_states.entry(block_number).or_default(); - match (old_info, new_info) { - (Some(old), Some(new)) => { - if new != old { - post_state.change_account(block_number, address, old, new); - } else { - unreachable!("Junk data in database: an account changeset did not represent any change"); - } - } - (None, Some(account)) => post_state.create_account(block_number, address, account), - (Some(old), None) => - post_state.destroy_account(block_number, address, old), - (None, None) => unreachable!("Junk data in database: an account changeset transitioned from no account to no account"), - }; - } - - // add storage changeset changes - let mut storage_changes: BTreeMap = BTreeMap::new(); - for (block_and_address, storage_entry) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((_, address)) = block_and_address; - let new_storage = - match local_plain_state.entry(address).or_default().1.entry(storage_entry.key) { - Entry::Vacant(entry) => { - let new_storage = plain_storage_cursor - .seek_by_key_subkey(address, storage_entry.key)? - .filter(|storage| storage.key == storage_entry.key) - .unwrap_or_default(); - entry.insert(storage_entry.value); - new_storage.value - } - Entry::Occupied(mut entry) => { - std::mem::replace(entry.get_mut(), storage_entry.value) - } - }; - storage_changes.entry(block_and_address).or_default().insert( - U256::from_be_bytes(storage_entry.key.0), - (storage_entry.value, new_storage), - ); - } - - for (BlockNumberAddress((block_number, address)), storage_changeset) in - storage_changes.into_iter() - { - block_states.entry(block_number).or_default().change_storage( - block_number, - address, - storage_changeset, - ); - } - - if TAKE { - // iterate over local plain state remove all account and all storages. - for (address, (account, storage)) in local_plain_state.into_iter() { - // revert account - if let Some(account) = account { - let existing_entry = plain_accounts_cursor.seek_exact(address)?; - if let Some(account) = account { - plain_accounts_cursor.upsert(address, account)?; - } else if existing_entry.is_some() { - plain_accounts_cursor.delete_current()?; - } - } - - // revert storages - for (storage_key, storage_value) in storage.into_iter() { - let storage_entry = StorageEntry { key: storage_key, value: storage_value }; - // delete previous value - // TODO: This does not use dupsort features - if plain_storage_cursor - .seek_by_key_subkey(address, storage_key)? - .filter(|s| s.key == storage_key) - .is_some() - { - plain_storage_cursor.delete_current()? - } - - // TODO: This does not use dupsort features - // insert value if needed - if storage_value != U256::ZERO { - plain_storage_cursor.upsert(address, storage_entry)?; - } - } - } - } - - // iterate over block body and create ExecutionResult - let mut receipt_iter = receipts.into_iter(); - - // loop break if we are at the end of the blocks. - for (block_number, block_body) in block_bodies.into_iter() { - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_states - .entry(block_number) - .or_default() - .add_receipt(block_number, receipt); - } - } - } - Ok(block_states.into_values().collect()) - } - - /// Return range of blocks and its execution result - pub fn get_take_block_and_execution_range( - &self, - chain_spec: &ChainSpec, - range: RangeInclusive, - ) -> Result, TransactionError> { - if TAKE { - let storage_range = BlockNumberAddress::range(range.clone()); - - self.unwind_account_hashing(range.clone())?; - self.unwind_account_history_indices(range.clone())?; - self.unwind_storage_hashing(storage_range.clone())?; - self.unwind_storage_history_indices(storage_range)?; - - // merkle tree - let (new_state_root, trie_updates) = - StateRoot::incremental_root_with_updates(self.deref(), range.clone())?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self.get_header(parent_number)?.state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self.get_block_hash(parent_number)?; - return Err(TransactionError::UnwindStateRootMismatch { - got: new_state_root, - expected: parent_state_root, - block_number: parent_number, - block_hash: parent_hash, - }) - } - trie_updates.flush(self.deref())?; - } - // get blocks - let blocks = self.get_take_block_range::(chain_spec, range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); - // get execution res - let execution_res = self.get_take_block_execution_result_range::(range.clone())?; - // combine them - let blocks_with_exec_result: Vec<_> = - blocks.into_iter().zip(execution_res.into_iter()).collect(); - - // remove block bodies it is needed for both get block range and get block execution results - // that is why it is deleted afterwards. - if TAKE { - // rm block bodies - self.get_or_take::(range)?; - - // Update pipeline progress - if let Some(fork_number) = unwind_to { - self.update_pipeline_stages(fork_number, true)?; - } - } - - // return them - Ok(blocks_with_exec_result) - } - - /// Update all pipeline sync stage progress. - pub fn update_pipeline_stages( - &self, - block_number: BlockNumber, - drop_stage_checkpoint: bool, - ) -> Result<(), TransactionError> { - // iterate over all existing stages in the table and update its progress. - let mut cursor = self.cursor_write::()?; - while let Some((stage_name, checkpoint)) = cursor.next()? { - cursor.upsert( - stage_name, - StageCheckpoint { - block_number, - ..if drop_stage_checkpoint { Default::default() } else { checkpoint } - }, - )? - } - - Ok(()) - } - - /// Iterate over account changesets and return all account address that were changed. - pub fn get_addresses_and_keys_of_changed_storages( - &self, - range: RangeInclusive, - ) -> Result>, TransactionError> { - Ok(self - .cursor_read::()? - .walk_range(BlockNumberAddress::range(range))? - .collect::, _>>()? - .into_iter() - // fold all storages and save its old state so we can remove it from HashedStorage - // it is needed as it is dup table. - .fold( - BTreeMap::new(), - |mut accounts: BTreeMap>, - (BlockNumberAddress((_, address)), storage_entry)| { - accounts.entry(address).or_default().insert(storage_entry.key); - accounts - }, - )) - } - - /// Get plainstate storages - #[allow(clippy::type_complexity)] - pub fn get_plainstate_storages( - &self, - iter: impl IntoIterator)>, - ) -> Result)>, TransactionError> { - let mut plain_storage = self.cursor_dup_read::()?; - - iter.into_iter() - .map(|(address, storage)| { - storage - .into_iter() - .map(|key| -> Result<_, TransactionError> { - let ret = plain_storage - .seek_by_key_subkey(address, key)? - .filter(|v| v.key == key) - .unwrap_or_default(); - Ok((key, ret.value)) - }) - .collect::, _>>() - .map(|storage| (address, storage)) - }) - .collect::, _>>() - } - - /// iterate over storages and insert them to hashing table - pub fn insert_storage_for_hashing( - &self, - storages: impl IntoIterator)>, - ) -> Result<(), TransactionError> { - // hash values - let hashed = storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| { - let storage = storage.into_iter().fold(BTreeMap::new(), |mut map, (key, value)| { - map.insert(keccak256(key), value); - map - }); - map.insert(keccak256(address), storage); - map - }); - - let mut hashed_storage = self.cursor_dup_write::()?; - // Hash the address and key and apply them to HashedStorage (if Storage is None - // just remove it); - hashed.into_iter().try_for_each(|(hashed_address, storage)| { - storage.into_iter().try_for_each(|(key, value)| -> Result<(), TransactionError> { - if hashed_storage - .seek_by_key_subkey(hashed_address, key)? - .filter(|entry| entry.key == key) - .is_some() - { - hashed_storage.delete_current()?; - } - - if value != U256::ZERO { - hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; - } - Ok(()) - }) - })?; - Ok(()) - } - - /// Iterate over account changesets and return all account address that were changed. - pub fn get_addresses_of_changed_accounts( - &self, - range: RangeInclusive, - ) -> Result, TransactionError> { - Ok(self - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()? - .into_iter() - // fold all account to one set of changed accounts - .fold(BTreeSet::new(), |mut accounts: BTreeSet
, (_, account_before)| { - accounts.insert(account_before.address); - accounts - })) - } - - /// Get plainstate account from iterator - pub fn get_plainstate_accounts( - &self, - iter: impl IntoIterator, - ) -> Result)>, TransactionError> { - let mut plain_accounts = self.cursor_read::()?; - Ok(iter - .into_iter() - .map(|address| plain_accounts.seek_exact(address).map(|a| (address, a.map(|(_, v)| v)))) - .collect::, _>>()?) - } - - /// iterate over accounts and insert them to hashing table - pub fn insert_account_for_hashing( - &self, - accounts: impl IntoIterator)>, - ) -> Result<(), TransactionError> { - let mut hashed_accounts = self.cursor_write::()?; - - let hashes_accounts = accounts.into_iter().fold( - BTreeMap::new(), - |mut map: BTreeMap>, (address, account)| { - map.insert(keccak256(address), account); - map - }, - ); - - hashes_accounts.into_iter().try_for_each( - |(hashed_address, account)| -> Result<(), TransactionError> { - if let Some(account) = account { - hashed_accounts.upsert(hashed_address, account)? - } else if hashed_accounts.seek_exact(hashed_address)?.is_some() { - hashed_accounts.delete_current()?; - } - Ok(()) - }, - )?; - Ok(()) - } - - /// Get all transaction ids where account got changed. - /// - /// NOTE: Get inclusive range of blocks. - pub fn get_storage_transition_ids_from_changeset( - &self, - range: RangeInclusive, - ) -> Result>, TransactionError> { - let storage_changeset = self - .cursor_read::()? - .walk_range(BlockNumberAddress::range(range))? - .collect::, _>>()?; - - // fold all storages to one set of changes - let storage_changeset_lists = storage_changeset.into_iter().fold( - BTreeMap::new(), - |mut storages: BTreeMap<(Address, H256), Vec>, (index, storage)| { - storages - .entry((index.address(), storage.key)) - .or_default() - .push(index.block_number()); - storages - }, - ); - Ok(storage_changeset_lists) - } - - /// Get all transaction ids where account got changed. - /// - /// NOTE: Get inclusive range of blocks. - pub fn get_account_transition_ids_from_changeset( - &self, - range: RangeInclusive, - ) -> Result>, TransactionError> { - let account_changesets = self - .cursor_read::()? - .walk_range(range)? - .collect::, _>>()?; - - let account_transitions = account_changesets - .into_iter() - // fold all account to one set of changed accounts - .fold( - BTreeMap::new(), - |mut accounts: BTreeMap>, (index, account)| { - accounts.entry(account.address).or_default().push(index); - accounts - }, - ); - Ok(account_transitions) - } - - /// Insert storage change index to database. Used inside StorageHistoryIndex stage - pub fn insert_storage_history_index( - &self, - storage_transitions: BTreeMap<(Address, H256), Vec>, - ) -> Result<(), TransactionError> { - for ((address, storage_key), mut indices) in storage_transitions { - let mut last_shard = self.take_last_storage_shard(address, storage_key)?; - last_shard.append(&mut indices); - - // chunk indices and insert them in shards of N size. - let mut chunks = last_shard - .iter() - .chunks(storage_sharded_key::NUM_OF_INDICES_IN_SHARD) - .into_iter() - .map(|chunks| chunks.map(|i| *i as usize).collect::>()) - .collect::>(); - let last_chunk = chunks.pop(); - - // chunk indices and insert them in shards of N size. - chunks.into_iter().try_for_each(|list| { - self.put::( - StorageShardedKey::new( - address, - storage_key, - *list.last().expect("Chuck does not return empty list") as BlockNumber, - ), - BlockNumberList::new(list).expect("Indices are presorted and not empty"), - ) - })?; - // Insert last list with u64::MAX - if let Some(last_list) = last_chunk { - self.put::( - StorageShardedKey::new(address, storage_key, u64::MAX), - BlockNumberList::new(last_list).expect("Indices are presorted and not empty"), - )?; - } - } - Ok(()) - } - - /// Insert account change index to database. Used inside AccountHistoryIndex stage - pub fn insert_account_history_index( - &self, - account_transitions: BTreeMap>, - ) -> Result<(), TransactionError> { - // insert indexes to AccountHistory. - for (address, mut indices) in account_transitions { - let mut last_shard = self.take_last_account_shard(address)?; - last_shard.append(&mut indices); - // chunk indices and insert them in shards of N size. - let mut chunks = last_shard - .iter() - .chunks(sharded_key::NUM_OF_INDICES_IN_SHARD) - .into_iter() - .map(|chunks| chunks.map(|i| *i as usize).collect::>()) - .collect::>(); - let last_chunk = chunks.pop(); - - chunks.into_iter().try_for_each(|list| { - self.put::( - ShardedKey::new( - address, - *list.last().expect("Chuck does not return empty list") as BlockNumber, - ), - BlockNumberList::new(list).expect("Indices are presorted and not empty"), - ) - })?; - // Insert last list with u64::MAX - if let Some(last_list) = last_chunk { - self.put::( - ShardedKey::new(address, u64::MAX), - BlockNumberList::new(last_list).expect("Indices are presorted and not empty"), - )? - } - } - Ok(()) - } - - /// Get the stage checkpoint. - pub fn get_stage_checkpoint(&self, id: StageId) -> Result, DbError> { - get_stage_checkpoint(self.deref(), id) - } - - /// Save stage checkpoint. - pub fn save_stage_checkpoint( - &self, - id: StageId, - checkpoint: StageCheckpoint, - ) -> Result<(), DbError> { - self.put::(id.to_string(), checkpoint)?; - Ok(()) - } - - /// Return full table as Vec - pub fn table(&self) -> Result>, DbError> - where - T::Key: Default + Ord, - { - self.cursor_read::()?.walk(Some(T::Key::default()))?.collect::, DbError>>() - } -} - -/// Unwind all history shards. For boundary shard, remove it from database and -/// return last part of shard with still valid items. If all full shard were removed, return list -/// would be empty. -fn unwind_account_history_shards( - cursor: &mut <>::TXMut as DbTxMutGAT<'_>>::CursorMut< - tables::AccountHistory, - >, - address: Address, - block_number: BlockNumber, -) -> Result, TransactionError> { - let mut item = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?; - - while let Some((sharded_key, list)) = item { - // there is no more shard for address - if sharded_key.key != address { - break - } - cursor.delete_current()?; - // check first item and if it is more and eq than `transition_id` delete current - // item. - let first = list.iter(0).next().expect("List can't empty"); - if first >= block_number as usize { - item = cursor.prev()?; - continue - } else if block_number <= sharded_key.highest_block_number { - // if first element is in scope whole list would be removed. - // so at least this first element is present. - return Ok(list.iter(0).take_while(|i| *i < block_number as usize).collect::>()) - } else { - let new_list = list.iter(0).collect::>(); - return Ok(new_list) - } - } - Ok(Vec::new()) -} - -/// Unwind all history shards. For boundary shard, remove it from database and -/// return last part of shard with still valid items. If all full shard were removed, return list -/// would be empty but this does not mean that there is none shard left but that there is no -/// split shards. -fn unwind_storage_history_shards( - cursor: &mut <>::TXMut as DbTxMutGAT<'_>>::CursorMut< - tables::StorageHistory, - >, - address: Address, - storage_key: H256, - block_number: BlockNumber, -) -> Result, TransactionError> { - let mut item = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?; - - while let Some((storage_sharded_key, list)) = item { - // there is no more shard for address - if storage_sharded_key.address != address || - storage_sharded_key.sharded_key.key != storage_key - { - // there is no more shard for address and storage_key. - break - } - cursor.delete_current()?; - // check first item and if it is more and eq than `transition_id` delete current - // item. - let first = list.iter(0).next().expect("List can't empty"); - if first >= block_number as usize { - item = cursor.prev()?; - continue - } else if block_number <= storage_sharded_key.sharded_key.highest_block_number { - // if first element is in scope whole list would be removed. - // so at least this first element is present. - return Ok(list.iter(0).take_while(|i| *i < block_number as usize).collect::>()) - } else { - return Ok(list.iter(0).collect::>()) - } - } - Ok(Vec::new()) -} +use reth_primitives::{BlockHash, BlockNumber, H256}; +use reth_trie::StateRootError; +use std::fmt::Debug; /// An error that can occur when using the transaction container #[derive(Debug, PartialEq, Eq, Clone, thiserror::Error)] @@ -1449,8 +44,7 @@ pub enum TransactionError { #[cfg(test)] mod test { use crate::{ - insert_canonical_block, test_utils::blocks::*, ShareableDatabase, Transaction, - TransactionsProvider, + insert_canonical_block, test_utils::blocks::*, ShareableDatabase, TransactionsProvider, }; use reth_db::{ mdbx::test_utils::create_test_rw_db, @@ -1458,20 +52,22 @@ mod test { tables, }; use reth_primitives::{ChainSpecBuilder, IntegerList, H160, MAINNET, U256}; - use std::ops::DerefMut; + use std::sync::Arc; #[test] fn insert_block_and_hashes_get_take() { let db = create_test_rw_db(); // setup - let mut tx = Transaction::new(db.as_ref()).unwrap(); let chain_spec = ChainSpecBuilder::default() .chain(MAINNET.chain) .genesis(MAINNET.genesis.clone()) .shanghai_activated() .build(); + let factory = ShareableDatabase::new(db.as_ref(), Arc::new(chain_spec.clone())); + let mut provider = factory.provider_rw().unwrap(); + let data = BlockChainTestData::default(); let genesis = data.genesis.clone(); let (block1, exec_res1) = data.blocks[0].clone(); @@ -1482,60 +78,60 @@ mod test { let storage1_shard_key = StorageShardedKey::new(H160([0x60; 20]), U256::from(5).into(), u64::MAX); - insert_canonical_block(tx.deref_mut(), data.genesis.clone(), None).unwrap(); + insert_canonical_block(provider.tx_ref(), data.genesis.clone(), None).unwrap(); - assert_genesis_block(&tx, data.genesis); + assert_genesis_block(&provider, data.genesis); - tx.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); + provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); assert_eq!( - tx.table::().unwrap(), + provider.table::().unwrap(), vec![ (acc1_shard_key.clone(), IntegerList::new(vec![1]).unwrap()), (acc2_shard_key.clone(), IntegerList::new(vec![1]).unwrap()) ] ); assert_eq!( - tx.table::().unwrap(), + provider.table::().unwrap(), vec![(storage1_shard_key.clone(), IntegerList::new(vec![1]).unwrap())] ); // get one block - let get = tx.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); let get_block = get[0].0.clone(); let get_state = get[0].1.clone(); assert_eq!(get_block, block1); assert_eq!(get_state, exec_res1); // take one block - let take = tx.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); + let take = provider.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]); - assert_genesis_block(&tx, genesis.clone()); + assert_genesis_block(&provider, genesis.clone()); // check if history is empty. - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(provider.table::().unwrap(), vec![]); + assert_eq!(provider.table::().unwrap(), vec![]); - tx.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); - tx.append_blocks_with_post_state(vec![block2.clone()], exec_res2.clone()).unwrap(); + provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); + provider.append_blocks_with_post_state(vec![block2.clone()], exec_res2.clone()).unwrap(); // check history of two blocks assert_eq!( - tx.table::().unwrap(), + provider.table::().unwrap(), vec![ (acc1_shard_key, IntegerList::new(vec![1, 2]).unwrap()), (acc2_shard_key, IntegerList::new(vec![1]).unwrap()) ] ); assert_eq!( - tx.table::().unwrap(), + provider.table::().unwrap(), vec![(storage1_shard_key, IntegerList::new(vec![1, 2]).unwrap())] ); - tx.commit().unwrap(); + provider.commit().unwrap(); // Check that transactions map onto blocks correctly. { - let provider = ShareableDatabase::new(tx.db, MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); assert_eq!( provider.transaction_block(0).unwrap(), Some(1), @@ -1553,23 +149,24 @@ mod test { ); } + let provider = factory.provider_rw().unwrap(); // get second block - let get = tx.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); assert_eq!(get, vec![(block2.clone(), exec_res2.clone())]); // get two blocks - let get = tx.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); assert_eq!(get[0].0, block1); assert_eq!(get[1].0, block2); assert_eq!(get[0].1, exec_res1); assert_eq!(get[1].1, exec_res2); // take two blocks - let get = tx.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); + let get = provider.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); assert_eq!(get, vec![(block1, exec_res1), (block2, exec_res2)]); // assert genesis state - assert_genesis_block(&tx, genesis); + assert_genesis_block(&provider, genesis); } #[test] @@ -1577,58 +174,64 @@ mod test { let db = create_test_rw_db(); // setup - let mut tx = Transaction::new(db.as_ref()).unwrap(); - let chain_spec = ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .shanghai_activated() + .build(), + ); + + let factory = ShareableDatabase::new(db.as_ref(), chain_spec.clone()); + let mut provider = factory.provider_rw().unwrap(); let data = BlockChainTestData::default(); let genesis = data.genesis.clone(); let (block1, exec_res1) = data.blocks[0].clone(); let (block2, exec_res2) = data.blocks[1].clone(); - insert_canonical_block(tx.deref_mut(), data.genesis.clone(), None).unwrap(); + insert_canonical_block(provider.tx_mut(), data.genesis.clone(), None).unwrap(); - assert_genesis_block(&tx, data.genesis); + assert_genesis_block(&provider, data.genesis); - tx.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); + provider.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap(); // get one block - let get = tx.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 1..=1).unwrap(); assert_eq!(get, vec![(block1.clone(), exec_res1.clone())]); // take one block - let take = tx.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); + let take = provider.take_block_and_execution_range(&chain_spec, 1..=1).unwrap(); assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]); - assert_genesis_block(&tx, genesis.clone()); + assert_genesis_block(&provider, genesis.clone()); // insert two blocks let mut merged_state = exec_res1.clone(); merged_state.extend(exec_res2.clone()); - tx.append_blocks_with_post_state( - vec![block1.clone(), block2.clone()], - merged_state.clone(), - ) - .unwrap(); + provider + .append_blocks_with_post_state( + vec![block1.clone(), block2.clone()], + merged_state.clone(), + ) + .unwrap(); // get second block - let get = tx.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 2..=2).unwrap(); assert_eq!(get, vec![(block2.clone(), exec_res2.clone())]); // get two blocks - let get = tx.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); + let get = provider.get_block_and_execution_range(&chain_spec, 1..=2).unwrap(); assert_eq!( get, vec![(block1.clone(), exec_res1.clone()), (block2.clone(), exec_res2.clone())] ); // take two blocks - let get = tx.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); + let get = provider.take_block_and_execution_range(&chain_spec, 1..=2).unwrap(); assert_eq!(get, vec![(block1, exec_res1), (block2, exec_res2)]); // assert genesis state - assert_genesis_block(&tx, genesis); + assert_genesis_block(&provider, genesis); } } diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 92c82f881..0c9b1b453 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -523,14 +523,10 @@ mod tests { keccak256, proofs::KeccakHasher, trie::{BranchNodeCompact, TrieMask}, - Account, Address, H256, U256, - }; - use reth_provider::Transaction; - use std::{ - collections::BTreeMap, - ops::{Deref, DerefMut, Mul}, - str::FromStr, + Account, Address, H256, MAINNET, U256, }; + use reth_provider::{DatabaseProviderRW, ShareableDatabase}; + use std::{collections::BTreeMap, ops::Mul, str::FromStr}; fn insert_account<'a, TX: DbTxMut<'a>>( tx: &mut TX, @@ -559,10 +555,12 @@ mod tests { fn incremental_vs_full_root(inputs: &[&str], modified: &str) { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let hashed_address = H256::from_low_u64_be(1); - let mut hashed_storage_cursor = tx.cursor_dup_write::().unwrap(); + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); let data = inputs.iter().map(|x| H256::from_str(x).unwrap()); let value = U256::from(0); for key in data { @@ -571,7 +569,7 @@ mod tests { // Generate the intermediate nodes on the receiving end of the channel let (_, _, trie_updates) = - StorageRoot::new_hashed(tx.deref(), hashed_address).root_with_updates().unwrap(); + StorageRoot::new_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); // 1. Some state transition happens, update the hashed storage to the new value let modified_key = H256::from_str(modified).unwrap(); @@ -585,16 +583,16 @@ mod tests { .unwrap(); // 2. Calculate full merkle root - let loader = StorageRoot::new_hashed(tx.deref(), hashed_address); + let loader = StorageRoot::new_hashed(tx.tx_ref(), hashed_address); let modified_root = loader.root().unwrap(); // Update the intermediate roots table so that we can run the incremental verification - trie_updates.flush(tx.deref()).unwrap(); + trie_updates.flush(tx.tx_ref()).unwrap(); // 3. Calculate the incremental root let mut storage_changes = PrefixSet::default(); storage_changes.insert(Nibbles::unpack(modified_key)); - let loader = StorageRoot::new_hashed(tx.deref_mut(), hashed_address) + let loader = StorageRoot::new_hashed(tx.tx_mut(), hashed_address) .with_changed_prefixes(storage_changes); let incremental_root = loader.root().unwrap(); @@ -624,9 +622,10 @@ mod tests { let hashed_address = keccak256(address); let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let tx = factory.provider_rw().unwrap(); for (key, value) in &storage { - tx.put::( + tx.tx_ref().put::( hashed_address, StorageEntry { key: keccak256(key), value: *value }, ) @@ -634,7 +633,8 @@ mod tests { } tx.commit().unwrap(); - let got = StorageRoot::new(tx.deref_mut(), address).root().unwrap(); + let mut tx = factory.provider_rw().unwrap(); + let got = StorageRoot::new(tx.tx_mut(), address).root().unwrap(); let expected = storage_root(storage.into_iter()); assert_eq!(expected, got); }); @@ -680,7 +680,8 @@ mod tests { // This ensures we return an empty root when there are no storage entries fn test_empty_storage_root() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let address = Address::random(); let code = "el buen fla"; @@ -689,10 +690,11 @@ mod tests { balance: U256::from(414241124u32), bytecode_hash: Some(keccak256(code)), }; - insert_account(&mut *tx, address, account, &Default::default()); + insert_account(tx.tx_mut(), address, account, &Default::default()); tx.commit().unwrap(); - let got = StorageRoot::new(tx.deref_mut(), address).root().unwrap(); + let mut tx = factory.provider_rw().unwrap(); + let got = StorageRoot::new(tx.tx_mut(), address).root().unwrap(); assert_eq!(got, EMPTY_ROOT); } @@ -700,7 +702,8 @@ mod tests { // This ensures that the walker goes over all the storage slots fn test_storage_root() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let address = Address::random(); let storage = BTreeMap::from([ @@ -715,10 +718,11 @@ mod tests { bytecode_hash: Some(keccak256(code)), }; - insert_account(&mut *tx, address, account, &storage); + insert_account(tx.tx_mut(), address, account, &storage); tx.commit().unwrap(); - let got = StorageRoot::new(tx.deref_mut(), address).root().unwrap(); + let mut tx = factory.provider_rw().unwrap(); + let got = StorageRoot::new(tx.tx_mut(), address).root().unwrap(); assert_eq!(storage_root(storage.into_iter()), got); } @@ -742,12 +746,15 @@ mod tests { state.values().map(|(_, slots)| slots.len()).sum::(); let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); for (address, (account, storage)) in &state { - insert_account(&mut *tx, *address, *account, storage) + insert_account(tx.tx_mut(), *address, *account, storage) } tx.commit().unwrap(); + let mut tx = factory.provider_rw().unwrap(); + let expected = state_root(state.into_iter()); let threshold = 10; @@ -756,7 +763,7 @@ mod tests { let mut intermediate_state: Option> = None; while got.is_none() { - let calculator = StateRoot::new(tx.deref_mut()) + let calculator = StateRoot::new(tx.tx_mut()) .with_threshold(threshold) .with_intermediate_state(intermediate_state.take().map(|state| *state)); match calculator.root_with_progress().unwrap() { @@ -778,15 +785,17 @@ mod tests { fn test_state_root_with_state(state: State) { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); for (address, (account, storage)) in &state { - insert_account(&mut *tx, *address, *account, storage) + insert_account(tx.tx_mut(), *address, *account, storage) } tx.commit().unwrap(); let expected = state_root(state.into_iter()); - let got = StateRoot::new(tx.deref_mut()).root().unwrap(); + let mut tx = factory.provider_rw().unwrap(); + let got = StateRoot::new(tx.tx_mut()).root().unwrap(); assert_eq!(expected, got); } @@ -803,7 +812,8 @@ mod tests { #[test] fn storage_root_regression() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let tx = factory.provider_rw().unwrap(); // Some address whose hash starts with 0xB041 let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); let key3 = keccak256(address3); @@ -820,13 +830,15 @@ mod tests { .map(|(slot, val)| (H256::from_str(slot).unwrap(), U256::from(val))), ); - let mut hashed_storage_cursor = tx.cursor_dup_write::().unwrap(); + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); for (hashed_slot, value) in storage.clone() { hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); } tx.commit().unwrap(); + let mut tx = factory.provider_rw().unwrap(); - let account3_storage_root = StorageRoot::new(tx.deref_mut(), address3).root().unwrap(); + let account3_storage_root = StorageRoot::new(tx.tx_mut(), address3).root().unwrap(); let expected_root = storage_root_prehashed(storage.into_iter()); assert_eq!(expected_root, account3_storage_root); } @@ -845,10 +857,13 @@ mod tests { ); let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = tx.cursor_write::().unwrap(); - let mut hashed_storage_cursor = tx.cursor_dup_write::().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); let mut hash_builder = HashBuilder::default(); @@ -891,7 +906,7 @@ mod tests { } hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); } - let account3_storage_root = StorageRoot::new(tx.deref_mut(), address3).root().unwrap(); + let account3_storage_root = StorageRoot::new(tx.tx_mut(), address3).root().unwrap(); hash_builder.add_leaf( Nibbles::unpack(key3), &encode_account(account3, Some(account3_storage_root)), @@ -940,7 +955,7 @@ mod tests { assert_eq!(hash_builder.root(), computed_expected_root); // Check state root calculation from scratch - let (root, trie_updates) = StateRoot::new(tx.deref()).root_with_updates().unwrap(); + let (root, trie_updates) = StateRoot::new(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(root, computed_expected_root); // Check account trie @@ -1005,7 +1020,7 @@ mod tests { H256::from_str("8e263cd4eefb0c3cbbb14e5541a66a755cad25bcfab1e10dd9d706263e811b28") .unwrap(); - let (root, trie_updates) = StateRoot::new(tx.deref()) + let (root, trie_updates) = StateRoot::new(tx.tx_ref()) .with_changed_account_prefixes(prefix_set) .root_with_updates() .unwrap(); @@ -1035,9 +1050,11 @@ mod tests { assert_eq!(nibbles2b.inner[..], [0xB, 0x0]); assert_eq!(node2a, node2b); tx.commit().unwrap(); + let tx = factory.provider_rw().unwrap(); { - let mut hashed_account_cursor = tx.cursor_write::().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); @@ -1055,7 +1072,7 @@ mod tests { (key6, encode_account(account6, None)), ]); - let (root, trie_updates) = StateRoot::new(tx.deref()) + let (root, trie_updates) = StateRoot::new(tx.tx_ref()) .with_changed_account_prefixes(account_prefix_set) .root_with_updates() .unwrap(); @@ -1085,11 +1102,13 @@ mod tests { assert_ne!(node1c.hashes[0], node1b.hashes[0]); assert_eq!(node1c.hashes[1], node1b.hashes[1]); assert_eq!(node1c.hashes[2], node1b.hashes[2]); - tx.drop().unwrap(); + drop(tx); } + let mut tx = factory.provider_rw().unwrap(); { - let mut hashed_account_cursor = tx.cursor_write::().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); @@ -1110,7 +1129,7 @@ mod tests { (key6, encode_account(account6, None)), ]); - let (root, trie_updates) = StateRoot::new(tx.deref_mut()) + let (root, trie_updates) = StateRoot::new(tx.tx_mut()) .with_changed_account_prefixes(account_prefix_set) .root_with_updates() .unwrap(); @@ -1145,11 +1164,12 @@ mod tests { #[test] fn account_trie_around_extension_node() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let expected = extension_node_trie(&mut tx); - let (got, updates) = StateRoot::new(tx.deref_mut()).root_with_updates().unwrap(); + let (got, updates) = StateRoot::new(tx.tx_mut()).root_with_updates().unwrap(); assert_eq!(expected, got); // Check account trie @@ -1170,16 +1190,17 @@ mod tests { fn account_trie_around_extension_node_with_dbtrie() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let expected = extension_node_trie(&mut tx); - let (got, updates) = StateRoot::new(tx.deref_mut()).root_with_updates().unwrap(); + let (got, updates) = StateRoot::new(tx.tx_mut()).root_with_updates().unwrap(); assert_eq!(expected, got); - updates.flush(tx.deref_mut()).unwrap(); + updates.flush(tx.tx_mut()).unwrap(); // read the account updates from the db - let mut accounts_trie = tx.cursor_read::().unwrap(); + let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); let walker = accounts_trie.walk(None).unwrap(); let mut account_updates = HashMap::new(); for item in walker { @@ -1197,8 +1218,9 @@ mod tests { tokio::runtime::Runtime::new().unwrap().block_on(async { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); - let mut hashed_account_cursor = tx.cursor_write::().unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); let mut state = BTreeMap::default(); for accounts in account_changes { @@ -1211,7 +1233,7 @@ mod tests { } } - let (state_root, trie_updates) = StateRoot::new(tx.deref_mut()) + let (state_root, trie_updates) = StateRoot::new(tx.tx_mut()) .with_changed_account_prefixes(changes) .root_with_updates() .unwrap(); @@ -1221,7 +1243,7 @@ mod tests { state.clone().into_iter().map(|(key, balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); assert_eq!(expected_root, state_root); - trie_updates.flush(tx.deref_mut()).unwrap(); + trie_updates.flush(tx.tx_mut()).unwrap(); } }); } @@ -1230,14 +1252,15 @@ mod tests { #[test] fn storage_trie_around_extension_node() { let db = create_test_rw_db(); - let mut tx = Transaction::new(db.as_ref()).unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let mut tx = factory.provider_rw().unwrap(); let hashed_address = H256::random(); let (expected_root, expected_updates) = extension_node_storage_trie(&mut tx, hashed_address); let (got, _, updates) = - StorageRoot::new_hashed(tx.deref_mut(), hashed_address).root_with_updates().unwrap(); + StorageRoot::new_hashed(tx.tx_mut(), hashed_address).root_with_updates().unwrap(); assert_eq!(expected_root, got); // Check account trie @@ -1256,12 +1279,12 @@ mod tests { } fn extension_node_storage_trie( - tx: &mut Transaction<'_, Env>, + tx: &mut DatabaseProviderRW<'_, &Env>, hashed_address: H256, ) -> (H256, HashMap) { let value = U256::from(1); - let mut hashed_storage = tx.cursor_write::().unwrap(); + let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); let mut hb = HashBuilder::default().with_updates(true); @@ -1282,12 +1305,12 @@ mod tests { (root, updates) } - fn extension_node_trie(tx: &mut Transaction<'_, Env>) -> H256 { + fn extension_node_trie(tx: &mut DatabaseProviderRW<'_, &Env>) -> H256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(H256::random()) }; let val = encode_account(a, None); - let mut hashed_accounts = tx.cursor_write::().unwrap(); + let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); let mut hb = HashBuilder::default(); for key in [ diff --git a/crates/trie/src/trie_cursor/account_cursor.rs b/crates/trie/src/trie_cursor/account_cursor.rs index cb2db71d8..a74789e7c 100644 --- a/crates/trie/src/trie_cursor/account_cursor.rs +++ b/crates/trie/src/trie_cursor/account_cursor.rs @@ -38,6 +38,7 @@ where #[cfg(test)] mod tests { + use super::*; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, @@ -45,14 +46,15 @@ mod tests { tables, transaction::DbTxMut, }; - use reth_primitives::hex_literal::hex; - use reth_provider::Transaction; + use reth_primitives::{hex_literal::hex, MAINNET}; + use reth_provider::ShareableDatabase; #[test] fn test_account_trie_order() { let db = create_test_rw_db(); - let tx = Transaction::new(db.as_ref()).unwrap(); - let mut cursor = tx.cursor_write::().unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + let mut cursor = provider.tx_ref().cursor_write::().unwrap(); let data = vec![ hex!("0303040e").to_vec(), diff --git a/crates/trie/src/trie_cursor/storage_cursor.rs b/crates/trie/src/trie_cursor/storage_cursor.rs index 317bf690d..677cae497 100644 --- a/crates/trie/src/trie_cursor/storage_cursor.rs +++ b/crates/trie/src/trie_cursor/storage_cursor.rs @@ -55,19 +55,24 @@ where #[cfg(test)] mod tests { + use super::*; use reth_db::{ cursor::DbCursorRW, mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut, }; - use reth_primitives::trie::{BranchNodeCompact, StorageTrieEntry}; - use reth_provider::Transaction; + use reth_primitives::{ + trie::{BranchNodeCompact, StorageTrieEntry}, + MAINNET, + }; + use reth_provider::ShareableDatabase; // tests that upsert and seek match on the storagetrie cursor #[test] fn test_storage_cursor_abstraction() { let db = create_test_rw_db(); - let tx = Transaction::new(db.as_ref()).unwrap(); - let mut cursor = tx.cursor_dup_write::().unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); + let mut cursor = provider.tx_ref().cursor_dup_write::().unwrap(); let hashed_address = H256::random(); let key = vec![0x2, 0x3]; diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index 3f6c86196..f91f25843 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -256,13 +256,14 @@ impl<'a, K: Key + From>, C: TrieCursor> TrieWalker<'a, K, C> { #[cfg(test)] mod tests { + use super::*; use crate::trie_cursor::{AccountTrieCursor, StorageTrieCursor}; use reth_db::{ cursor::DbCursorRW, mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut, }; - use reth_primitives::trie::StorageTrieEntry; - use reth_provider::Transaction; + use reth_primitives::{trie::StorageTrieEntry, MAINNET}; + use reth_provider::ShareableDatabase; #[test] fn walk_nodes_with_common_prefix() { @@ -288,8 +289,11 @@ mod tests { ]; let db = create_test_rw_db(); - let tx = Transaction::new(db.as_ref()).unwrap(); - let mut account_cursor = tx.cursor_write::().unwrap(); + + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let tx = factory.provider_rw().unwrap(); + + let mut account_cursor = tx.tx_ref().cursor_write::().unwrap(); for (k, v) in &inputs { account_cursor.upsert(k.clone().into(), v.clone()).unwrap(); } @@ -297,7 +301,7 @@ mod tests { test_cursor(account_trie, &expected); let hashed_address = H256::random(); - let mut storage_cursor = tx.cursor_dup_write::().unwrap(); + let mut storage_cursor = tx.tx_ref().cursor_dup_write::().unwrap(); for (k, v) in &inputs { storage_cursor .upsert( @@ -332,8 +336,9 @@ mod tests { #[test] fn cursor_rootnode_with_changesets() { let db = create_test_rw_db(); - let tx = Transaction::new(db.as_ref()).unwrap(); - let mut cursor = tx.cursor_dup_write::().unwrap(); + let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone()); + let tx = factory.provider_rw().unwrap(); + let mut cursor = tx.tx_ref().cursor_dup_write::().unwrap(); let nodes = vec![ ( diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index ec295bd03..e57ebfc3e 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -6,9 +6,9 @@ use crate::{ }; use reth_db::mdbx::test_utils::create_test_rw_db; use reth_primitives::{BlockBody, SealedBlock}; -use reth_provider::Transaction; +use reth_provider::ShareableDatabase; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; -use std::{collections::BTreeMap, ffi::OsStr, fs, ops::Deref, path::Path, sync::Arc}; +use std::{collections::BTreeMap, ffi::OsStr, fs, path::Path, sync::Arc}; /// A handler for the blockchain test suite. #[derive(Debug)] @@ -75,19 +75,21 @@ impl Case for BlockchainTestCase { // Create the database let db = create_test_rw_db(); - let mut transaction = Transaction::new(db.as_ref())?; + let factory = + ShareableDatabase::new(db.as_ref(), Arc::new(case.network.clone().into())); + let mut provider = factory.provider_rw().unwrap(); // Insert test state reth_provider::insert_canonical_block( - transaction.deref(), + provider.tx_ref(), SealedBlock::new(case.genesis_block_header.clone().into(), BlockBody::default()), None, )?; - case.pre.write_to_db(transaction.deref())?; + case.pre.write_to_db(provider.tx_ref())?; let mut last_block = None; for block in case.blocks.iter() { - last_block = Some(block.write_to_db(transaction.deref())?); + last_block = Some(block.write_to_db(provider.tx_ref())?); } // Call execution stage @@ -103,7 +105,7 @@ impl Case for BlockchainTestCase { // ignore error let _ = stage .execute( - &mut transaction, + &mut provider, ExecInput { target: last_block, checkpoint: None }, ) .await; @@ -118,13 +120,13 @@ impl Case for BlockchainTestCase { } Some(RootOrState::State(state)) => { for (&address, account) in state.iter() { - account.assert_db(address, transaction.deref())?; + account.assert_db(address, provider.tx_ref())?; } } None => println!("No post-state"), } - transaction.close(); + drop(provider); } Ok(()) }