mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore: rename ShareableDatabase to ProviderFactory (#3121)
This commit is contained in:
@ -26,7 +26,7 @@ use reth_interfaces::{
|
||||
use reth_network::NetworkHandle;
|
||||
use reth_network_api::NetworkInfo;
|
||||
use reth_primitives::{stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256};
|
||||
use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase};
|
||||
use reth_provider::{providers::get_stage_checkpoint, ProviderFactory};
|
||||
use reth_staged_sync::utils::init::{init_db, init_genesis};
|
||||
use reth_stages::{
|
||||
sets::DefaultStages,
|
||||
@ -170,7 +170,7 @@ impl Command {
|
||||
Ipv4Addr::UNSPECIFIED,
|
||||
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
|
||||
)))
|
||||
.build(ShareableDatabase::new(db, self.chain.clone()))
|
||||
.build(ProviderFactory::new(db, self.chain.clone()))
|
||||
.start_network()
|
||||
.await?;
|
||||
info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network");
|
||||
@ -250,7 +250,7 @@ impl Command {
|
||||
}
|
||||
|
||||
let mut current_max_block = latest_block_number;
|
||||
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
|
||||
while current_max_block < self.to {
|
||||
let next_block = current_max_block + 1;
|
||||
@ -266,7 +266,7 @@ impl Command {
|
||||
|
||||
// Unwind the pipeline without committing.
|
||||
{
|
||||
shareable_db
|
||||
factory
|
||||
.provider_rw()
|
||||
.map_err(PipelineError::Interface)?
|
||||
.take_block_and_execution_range(&self.chain, next_block..=target_block)?;
|
||||
|
||||
@ -9,7 +9,7 @@ use reth_primitives::{
|
||||
stage::{StageCheckpoint, StageId},
|
||||
ChainSpec,
|
||||
};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_staged_sync::utils::init::init_db;
|
||||
use reth_stages::{
|
||||
stages::{
|
||||
@ -68,8 +68,8 @@ impl Command {
|
||||
std::fs::create_dir_all(&db_path)?;
|
||||
|
||||
let db = Arc::new(init_db(db_path)?);
|
||||
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
|
||||
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
|
||||
let execution_checkpoint_block =
|
||||
provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number;
|
||||
|
||||
@ -46,7 +46,7 @@ use reth_primitives::{
|
||||
};
|
||||
use reth_provider::{
|
||||
providers::get_stage_checkpoint, BlockProvider, CanonStateSubscriptions, HeaderProvider,
|
||||
ShareableDatabase,
|
||||
ProviderFactory,
|
||||
};
|
||||
use reth_revm::Factory;
|
||||
use reth_revm_inspectors::stack::Hook;
|
||||
@ -199,8 +199,8 @@ impl Command {
|
||||
)?);
|
||||
|
||||
// setup the blockchain provider
|
||||
let shareable_db = ShareableDatabase::new(Arc::clone(&db), Arc::clone(&self.chain));
|
||||
let blockchain_db = BlockchainProvider::new(shareable_db, blockchain_tree.clone())?;
|
||||
let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain));
|
||||
let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?;
|
||||
|
||||
let transaction_pool = reth_transaction_pool::Pool::eth_pool(
|
||||
EthTransactionValidator::new(blockchain_db.clone(), Arc::clone(&self.chain)),
|
||||
@ -600,7 +600,7 @@ impl Command {
|
||||
executor: TaskExecutor,
|
||||
secret_key: SecretKey,
|
||||
default_peers_path: PathBuf,
|
||||
) -> NetworkConfig<ShareableDatabase<Arc<Env<WriteMap>>>> {
|
||||
) -> NetworkConfig<ProviderFactory<Arc<Env<WriteMap>>>> {
|
||||
let head = self.lookup_head(Arc::clone(&db)).expect("the head block is missing");
|
||||
|
||||
self.network
|
||||
@ -615,7 +615,7 @@ impl Command {
|
||||
Ipv4Addr::UNSPECIFIED,
|
||||
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
|
||||
)))
|
||||
.build(ShareableDatabase::new(db, self.chain.clone()))
|
||||
.build(ProviderFactory::new(db, self.chain.clone()))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
|
||||
@ -15,7 +15,7 @@ use reth_db::mdbx::{Env, EnvKind, WriteMap};
|
||||
use reth_discv4::NatResolver;
|
||||
use reth_interfaces::p2p::bodies::client::BodiesClient;
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
/// `reth p2p` command
|
||||
@ -129,7 +129,7 @@ impl Command {
|
||||
network_config_builder = self.discovery.apply_to_builder(network_config_builder);
|
||||
|
||||
let network = network_config_builder
|
||||
.build(Arc::new(ShareableDatabase::new(noop_db, self.chain.clone())))
|
||||
.build(Arc::new(ProviderFactory::new(noop_db, self.chain.clone())))
|
||||
.start_network()
|
||||
.await?;
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ use reth_db::{
|
||||
cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx,
|
||||
};
|
||||
use reth_primitives::{stage::StageCheckpoint, ChainSpec};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_revm::Factory;
|
||||
use reth_stages::{stages::ExecutionStage, Stage, UnwindInput};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
@ -94,8 +94,8 @@ async fn unwind_and_copy<DB: Database>(
|
||||
tip_block_number: u64,
|
||||
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
|
||||
) -> eyre::Result<()> {
|
||||
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = factory.provider_rw()?;
|
||||
|
||||
let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(db_tool.chain.clone()));
|
||||
|
||||
@ -129,8 +129,8 @@ async fn dry_run<DB: Database>(
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "Executing stage. [dry-run]");
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&output_db, chain.clone());
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&output_db, chain.clone());
|
||||
let mut provider = factory.provider_rw()?;
|
||||
let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(chain.clone()));
|
||||
|
||||
exec_stage
|
||||
|
||||
@ -3,7 +3,7 @@ use crate::utils::DbTool;
|
||||
use eyre::Result;
|
||||
use reth_db::{database::Database, table::TableImporter, tables};
|
||||
use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{stages::AccountHashingStage, Stage, UnwindInput};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tracing::info;
|
||||
@ -38,8 +38,8 @@ async fn unwind_and_copy<DB: Database>(
|
||||
tip_block_number: u64,
|
||||
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
|
||||
) -> eyre::Result<()> {
|
||||
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = factory.provider_rw()?;
|
||||
let mut exec_stage = AccountHashingStage::default();
|
||||
|
||||
exec_stage
|
||||
@ -68,8 +68,8 @@ async fn dry_run<DB: Database>(
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "Executing stage.");
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&output_db, chain);
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&output_db, chain);
|
||||
let mut provider = factory.provider_rw()?;
|
||||
let mut exec_stage = AccountHashingStage {
|
||||
clean_threshold: 1, // Forces hashing from scratch
|
||||
..Default::default()
|
||||
|
||||
@ -3,7 +3,7 @@ use crate::utils::DbTool;
|
||||
use eyre::Result;
|
||||
use reth_db::{database::Database, table::TableImporter, tables};
|
||||
use reth_primitives::{stage::StageCheckpoint, ChainSpec};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{stages::StorageHashingStage, Stage, UnwindInput};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tracing::info;
|
||||
@ -33,8 +33,8 @@ async fn unwind_and_copy<DB: Database>(
|
||||
tip_block_number: u64,
|
||||
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
|
||||
) -> eyre::Result<()> {
|
||||
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = factory.provider_rw()?;
|
||||
|
||||
let mut exec_stage = StorageHashingStage::default();
|
||||
|
||||
@ -67,8 +67,8 @@ async fn dry_run<DB: Database>(
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "Executing stage.");
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&output_db, chain);
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&output_db, chain);
|
||||
let mut provider = factory.provider_rw()?;
|
||||
let mut exec_stage = StorageHashingStage {
|
||||
clean_threshold: 1, // Forces hashing from scratch
|
||||
..Default::default()
|
||||
|
||||
@ -3,7 +3,7 @@ use crate::utils::DbTool;
|
||||
use eyre::Result;
|
||||
use reth_db::{database::Database, table::TableImporter, tables};
|
||||
use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{
|
||||
stages::{
|
||||
AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage,
|
||||
@ -48,8 +48,8 @@ async fn unwind_and_copy<DB: Database>(
|
||||
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
|
||||
) -> eyre::Result<()> {
|
||||
let (from, to) = range;
|
||||
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
|
||||
let mut provider = factory.provider_rw()?;
|
||||
|
||||
let unwind = UnwindInput {
|
||||
unwind_to: from,
|
||||
@ -115,8 +115,8 @@ async fn dry_run<DB: Database>(
|
||||
from: u64,
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "Executing stage.");
|
||||
let shareable_db = ShareableDatabase::new(&output_db, chain);
|
||||
let mut provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&output_db, chain);
|
||||
let mut provider = factory.provider_rw()?;
|
||||
let mut exec_output = false;
|
||||
while !exec_output {
|
||||
exec_output = MerkleStage::Execution {
|
||||
|
||||
@ -12,7 +12,7 @@ use reth_beacon_consensus::BeaconConsensus;
|
||||
use reth_config::Config;
|
||||
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase};
|
||||
use reth_provider::{providers::get_stage_checkpoint, ProviderFactory};
|
||||
use reth_staged_sync::utils::init::init_db;
|
||||
use reth_stages::{
|
||||
stages::{
|
||||
@ -120,8 +120,8 @@ impl Command {
|
||||
|
||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||
let db = Arc::new(init_db(db_path)?);
|
||||
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
|
||||
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
|
||||
if let Some(listen_addr) = self.metrics {
|
||||
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
|
||||
@ -160,7 +160,7 @@ impl Command {
|
||||
p2p_secret_key,
|
||||
default_peers_path,
|
||||
)
|
||||
.build(Arc::new(ShareableDatabase::new(db.clone(), self.chain.clone())))
|
||||
.build(Arc::new(ProviderFactory::new(db.clone(), self.chain.clone())))
|
||||
.start_network()
|
||||
.await?;
|
||||
let fetch_client = Arc::new(network.fetch_client().await?);
|
||||
@ -250,7 +250,7 @@ impl Command {
|
||||
|
||||
if self.commit {
|
||||
provider_rw.commit()?;
|
||||
provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ use reth_db::{
|
||||
transaction::DbTx,
|
||||
};
|
||||
use reth_primitives::{BlockHashOrNumber, ChainSpec};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::{ops::RangeInclusive, sync::Arc};
|
||||
|
||||
/// `reth stage unwind` command
|
||||
@ -69,8 +69,8 @@ impl Command {
|
||||
eyre::bail!("Cannot unwind genesis block")
|
||||
}
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
|
||||
let provider = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&db, self.chain.clone());
|
||||
let provider = factory.provider_rw()?;
|
||||
|
||||
let blocks_and_execution = provider
|
||||
.take_block_and_execution_range(&self.chain, range)
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
use reth_db::database::Database;
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A container for external components.
|
||||
@ -35,7 +35,7 @@ impl<DB, C, EF> TreeExternals<DB, C, EF> {
|
||||
|
||||
impl<DB: Database, C, EF> TreeExternals<DB, C, EF> {
|
||||
/// Return shareable database helper structure.
|
||||
pub fn database(&self) -> ShareableDatabase<&DB> {
|
||||
ShareableDatabase::new(&self.db, self.chain_spec.clone())
|
||||
pub fn database(&self) -> ProviderFactory<&DB> {
|
||||
ProviderFactory::new(&self.db, self.chain_spec.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@ -1286,7 +1286,7 @@ mod tests {
|
||||
use reth_payload_builder::test_utils::spawn_test_payload_service;
|
||||
use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET};
|
||||
use reth_provider::{
|
||||
providers::BlockchainProvider, test_utils::TestExecutorFactory, ShareableDatabase,
|
||||
providers::BlockchainProvider, test_utils::TestExecutorFactory, ProviderFactory,
|
||||
};
|
||||
use reth_stages::{test_utils::TestStages, ExecOutput, PipelineError, StageError};
|
||||
use reth_tasks::TokioTaskExecutor;
|
||||
@ -1394,9 +1394,9 @@ mod tests {
|
||||
BlockchainTree::new(externals, canon_state_notification_sender, config)
|
||||
.expect("failed to create tree"),
|
||||
);
|
||||
let shareable_db = ShareableDatabase::new(db.clone(), chain_spec.clone());
|
||||
let factory = ProviderFactory::new(db.clone(), chain_spec.clone());
|
||||
let latest = chain_spec.genesis_header().seal_slow();
|
||||
let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest);
|
||||
let blockchain_provider = BlockchainProvider::with_latest(factory, tree, latest);
|
||||
let (engine, handle) = BeaconConsensusEngine::new(
|
||||
NoopFullBlockClient::default(),
|
||||
pipeline,
|
||||
@ -1561,7 +1561,7 @@ mod tests {
|
||||
chain: Arc<ChainSpec>,
|
||||
mut blocks: impl Iterator<Item = &'a SealedBlock>,
|
||||
) {
|
||||
let factory = ShareableDatabase::new(db, chain);
|
||||
let factory = ProviderFactory::new(db, chain);
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
blocks.try_for_each(|b| provider.insert_block(b.clone(), None)).expect("failed to insert");
|
||||
provider.commit().unwrap();
|
||||
|
||||
@ -6,7 +6,7 @@ use reth_db::{
|
||||
transaction::{DbTx, DbTxMut},
|
||||
};
|
||||
use reth_primitives::{stage::StageId, Account, Bytecode, ChainSpec, H256, U256};
|
||||
use reth_provider::{DatabaseProviderRW, PostState, ShareableDatabase, TransactionError};
|
||||
use reth_provider::{DatabaseProviderRW, PostState, ProviderFactory, TransactionError};
|
||||
use std::{path::Path, sync::Arc};
|
||||
use tracing::debug;
|
||||
|
||||
@ -72,8 +72,8 @@ pub fn init_genesis<DB: Database>(
|
||||
debug!("Writing genesis block.");
|
||||
|
||||
// use transaction to insert genesis header
|
||||
let shareable_db = ShareableDatabase::new(&db, chain.clone());
|
||||
let provider_rw = shareable_db.provider_rw()?;
|
||||
let factory = ProviderFactory::new(&db, chain.clone());
|
||||
let provider_rw = factory.provider_rw()?;
|
||||
insert_genesis_hashes(provider_rw, genesis)?;
|
||||
|
||||
// Insert header
|
||||
|
||||
@ -6,7 +6,7 @@ use pprof::criterion::{Output, PProfProfiler};
|
||||
use reth_db::mdbx::{Env, WriteMap};
|
||||
use reth_interfaces::test_utils::TestConsensus;
|
||||
use reth_primitives::{stage::StageCheckpoint, MAINNET};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{
|
||||
stages::{MerkleStage, SenderRecoveryStage, TotalDifficultyStage, TransactionLookupStage},
|
||||
test_utils::TestTransaction,
|
||||
@ -136,7 +136,7 @@ fn measure_stage_with_path<F, S>(
|
||||
},
|
||||
|_| async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
stage.execute(&mut provider, input).await.unwrap();
|
||||
provider.commit().unwrap();
|
||||
|
||||
@ -10,7 +10,7 @@ use reth_interfaces::test_utils::generators::{
|
||||
random_transition_range,
|
||||
};
|
||||
use reth_primitives::{Account, Address, SealedBlock, H256, MAINNET};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{
|
||||
stages::{AccountHashingStage, StorageHashingStage},
|
||||
test_utils::TestTransaction,
|
||||
@ -38,7 +38,7 @@ pub(crate) fn stage_unwind<S: Clone + Stage<Env<WriteMap>>>(
|
||||
|
||||
tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
// Clear previous run
|
||||
@ -66,7 +66,7 @@ pub(crate) fn unwind_hashes<S: Clone + Stage<Env<WriteMap>>>(
|
||||
|
||||
tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
StorageHashingStage::default().unwind(&mut provider, unwind).await.unwrap();
|
||||
|
||||
@ -6,7 +6,7 @@ use reth_primitives::{
|
||||
constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, listener::EventListeners, stage::StageId,
|
||||
BlockNumber, ChainSpec, H256,
|
||||
};
|
||||
use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase};
|
||||
use reth_provider::{providers::get_stage_checkpoint, ProviderFactory};
|
||||
use std::{pin::Pin, sync::Arc};
|
||||
use tokio::sync::watch;
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
@ -247,8 +247,8 @@ where
|
||||
// Unwind stages in reverse order of execution
|
||||
let unwind_pipeline = self.stages.iter_mut().rev();
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&self.db, self.chain_spec.clone());
|
||||
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
let factory = ProviderFactory::new(&self.db, self.chain_spec.clone());
|
||||
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
|
||||
for stage in unwind_pipeline {
|
||||
let stage_id = stage.id();
|
||||
@ -291,8 +291,7 @@ where
|
||||
.notify(PipelineEvent::Unwound { stage_id, result: unwind_output });
|
||||
|
||||
provider_rw.commit()?;
|
||||
provider_rw =
|
||||
shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
}
|
||||
Err(err) => {
|
||||
self.listeners.notify(PipelineEvent::Error { stage_id });
|
||||
@ -317,8 +316,8 @@ where
|
||||
let mut made_progress = false;
|
||||
let target = self.max_block.or(previous_stage);
|
||||
|
||||
let shareable_db = ShareableDatabase::new(&self.db, self.chain_spec.clone());
|
||||
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
let factory = ProviderFactory::new(&self.db, self.chain_spec.clone());
|
||||
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
|
||||
loop {
|
||||
let prev_checkpoint = provider_rw.get_stage_checkpoint(stage_id)?;
|
||||
@ -376,7 +375,7 @@ where
|
||||
|
||||
// TODO: Make the commit interval configurable
|
||||
provider_rw.commit()?;
|
||||
provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
|
||||
provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
|
||||
|
||||
if done {
|
||||
let stage_progress = checkpoint.block_number;
|
||||
|
||||
@ -470,7 +470,7 @@ mod tests {
|
||||
hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode,
|
||||
ChainSpecBuilder, SealedBlock, StorageEntry, H160, H256, MAINNET, U256,
|
||||
};
|
||||
use reth_provider::{insert_canonical_block, ShareableDatabase};
|
||||
use reth_provider::{insert_canonical_block, ProviderFactory};
|
||||
use reth_revm::Factory;
|
||||
use reth_rlp::Decodable;
|
||||
use std::sync::Arc;
|
||||
@ -487,8 +487,8 @@ mod tests {
|
||||
#[test]
|
||||
fn execution_checkpoint_matches() {
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let tx = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
|
||||
let previous_stage_checkpoint = ExecutionCheckpoint {
|
||||
block_range: CheckpointBlockRange { from: 0, to: 0 },
|
||||
@ -512,8 +512,8 @@ mod tests {
|
||||
#[test]
|
||||
fn execution_checkpoint_precedes() {
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice();
|
||||
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
|
||||
@ -532,7 +532,7 @@ mod tests {
|
||||
stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)),
|
||||
};
|
||||
|
||||
let provider = db.provider_rw().unwrap();
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint);
|
||||
|
||||
assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint {
|
||||
@ -548,8 +548,8 @@ mod tests {
|
||||
#[test]
|
||||
fn execution_checkpoint_recalculate_full_previous_some() {
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice();
|
||||
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
|
||||
@ -568,7 +568,7 @@ mod tests {
|
||||
stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)),
|
||||
};
|
||||
|
||||
let provider = db.provider_rw().unwrap();
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint);
|
||||
|
||||
assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint {
|
||||
@ -584,8 +584,8 @@ mod tests {
|
||||
#[test]
|
||||
fn execution_checkpoint_recalculate_full_previous_none() {
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice();
|
||||
let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap();
|
||||
@ -597,7 +597,7 @@ mod tests {
|
||||
|
||||
let previous_checkpoint = StageCheckpoint { block_number: 1, stage_checkpoint: None };
|
||||
|
||||
let provider = db.provider_rw().unwrap();
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let stage_checkpoint = execution_checkpoint(&provider, 1, 1, previous_checkpoint);
|
||||
|
||||
assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint {
|
||||
@ -614,8 +614,8 @@ mod tests {
|
||||
// TODO cleanup the setup after https://github.com/paradigmxyz/reth/issues/332
|
||||
// is merged as it has similar framework
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let input = ExecInput {
|
||||
target: Some(1),
|
||||
/// The progress of this stage the last time it was executed.
|
||||
@ -630,7 +630,7 @@ mod tests {
|
||||
provider.commit().unwrap();
|
||||
|
||||
// insert pre state
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let db_tx = provider.tx_mut();
|
||||
let acc1 = H160(hex!("1000000000000000000000000000000000000000"));
|
||||
let acc2 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b"));
|
||||
@ -652,7 +652,7 @@ mod tests {
|
||||
db_tx.put::<tables::Bytecodes>(code_hash, Bytecode::new_raw(code.to_vec().into())).unwrap();
|
||||
provider.commit().unwrap();
|
||||
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let mut execution_stage = stage();
|
||||
let output = execution_stage.execute(&mut provider, input).await.unwrap();
|
||||
provider.commit().unwrap();
|
||||
@ -672,7 +672,7 @@ mod tests {
|
||||
},
|
||||
done: true
|
||||
} if processed == total && total == block.gas_used);
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let tx = provider.tx_mut();
|
||||
// check post state
|
||||
let account1 = H160(hex!("1000000000000000000000000000000000000000"));
|
||||
@ -722,8 +722,8 @@ mod tests {
|
||||
// is merged as it has similar framework
|
||||
|
||||
let state_db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let input = ExecInput {
|
||||
target: Some(1),
|
||||
/// The progress of this stage the last time it was executed.
|
||||
@ -742,7 +742,7 @@ mod tests {
|
||||
let balance = U256::from(0x3635c9adc5dea00000u128);
|
||||
let code_hash = keccak256(code);
|
||||
// pre state
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let db_tx = provider.tx_mut();
|
||||
let acc1 = H160(hex!("1000000000000000000000000000000000000000"));
|
||||
let acc1_info = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) };
|
||||
@ -755,12 +755,12 @@ mod tests {
|
||||
provider.commit().unwrap();
|
||||
|
||||
// execute
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let mut execution_stage = stage();
|
||||
let result = execution_stage.execute(&mut provider, input).await.unwrap();
|
||||
provider.commit().unwrap();
|
||||
|
||||
let mut provider = db.provider_rw().unwrap();
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let mut stage = stage();
|
||||
let result = stage
|
||||
.unwind(
|
||||
@ -812,7 +812,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_selfdestruct() {
|
||||
let test_tx = TestTransaction::default();
|
||||
let factory = ShareableDatabase::new(test_tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let input = ExecInput {
|
||||
target: Some(1),
|
||||
|
||||
@ -390,7 +390,7 @@ mod tests {
|
||||
use assert_matches::assert_matches;
|
||||
use reth_interfaces::test_utils::generators::random_header;
|
||||
use reth_primitives::{stage::StageUnitCheckpoint, H256, MAINNET};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use test_runner::HeadersTestRunner;
|
||||
|
||||
mod test_runner {
|
||||
@ -602,7 +602,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn head_and_tip_lookup() {
|
||||
let runner = HeadersTestRunner::default();
|
||||
let factory = ShareableDatabase::new(runner.tx().tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(runner.tx().tx.as_ref(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let tx = provider.tx_ref();
|
||||
let mut stage = runner.stage();
|
||||
|
||||
@ -140,7 +140,7 @@ fn stage_checkpoint<DB: Database>(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use assert_matches::assert_matches;
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::*;
|
||||
@ -210,7 +210,7 @@ mod tests {
|
||||
async fn run(tx: &TestTransaction, run_to: u64) {
|
||||
let input = ExecInput { target: Some(run_to), ..Default::default() };
|
||||
let mut stage = IndexAccountHistoryStage::default();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let out = stage.execute(&mut provider, input).await.unwrap();
|
||||
assert_eq!(
|
||||
@ -235,7 +235,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let mut stage = IndexAccountHistoryStage::default();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let out = stage.unwind(&mut provider, input).await.unwrap();
|
||||
assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) });
|
||||
@ -449,7 +449,7 @@ mod tests {
|
||||
// run
|
||||
{
|
||||
let mut stage = IndexAccountHistoryStage { commit_threshold: 4 }; // Two runs required
|
||||
let factory = ShareableDatabase::new(&test_tx.tx, MAINNET.clone());
|
||||
let factory = ProviderFactory::new(&test_tx.tx, MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let mut input = ExecInput { target: Some(5), ..Default::default() };
|
||||
@ -538,7 +538,7 @@ mod tests {
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
assert_matches!(
|
||||
|
||||
@ -143,7 +143,7 @@ fn stage_checkpoint<DB: Database>(
|
||||
mod tests {
|
||||
|
||||
use assert_matches::assert_matches;
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::*;
|
||||
@ -223,7 +223,7 @@ mod tests {
|
||||
async fn run(tx: &TestTransaction, run_to: u64) {
|
||||
let input = ExecInput { target: Some(run_to), ..Default::default() };
|
||||
let mut stage = IndexStorageHistoryStage::default();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let out = stage.execute(&mut provider, input).await.unwrap();
|
||||
assert_eq!(
|
||||
@ -248,7 +248,7 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let mut stage = IndexStorageHistoryStage::default();
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
let out = stage.unwind(&mut provider, input).await.unwrap();
|
||||
assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) });
|
||||
@ -465,7 +465,7 @@ mod tests {
|
||||
// run
|
||||
{
|
||||
let mut stage = IndexStorageHistoryStage { commit_threshold: 4 }; // Two runs required
|
||||
let factory = ShareableDatabase::new(&test_tx.tx, MAINNET.clone());
|
||||
let factory = ProviderFactory::new(&test_tx.tx, MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let mut input = ExecInput { target: Some(5), ..Default::default() };
|
||||
@ -564,7 +564,7 @@ mod tests {
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let factory = ShareableDatabase::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
assert_matches!(
|
||||
|
||||
@ -2,7 +2,7 @@ use super::TestTransaction;
|
||||
use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput};
|
||||
use reth_db::mdbx::{Env, WriteMap};
|
||||
use reth_primitives::MAINNET;
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::{borrow::Borrow, sync::Arc};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
@ -45,7 +45,7 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let (db, mut stage) = (self.tx().inner_raw(), self.stage());
|
||||
tokio::spawn(async move {
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let result = stage.execute(&mut provider, input).await;
|
||||
@ -71,7 +71,7 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let (db, mut stage) = (self.tx().inner_raw(), self.stage());
|
||||
tokio::spawn(async move {
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let result = stage.unwind(&mut provider, input).await;
|
||||
|
||||
@ -16,7 +16,7 @@ use reth_primitives::{
|
||||
keccak256, Account, Address, BlockNumber, SealedBlock, SealedHeader, StorageEntry, H256,
|
||||
MAINNET, U256,
|
||||
};
|
||||
use reth_provider::{DatabaseProviderRW, ShareableDatabase};
|
||||
use reth_provider::{DatabaseProviderRW, ProviderFactory};
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::BTreeMap,
|
||||
@ -37,14 +37,14 @@ pub struct TestTransaction {
|
||||
/// WriteMap DB
|
||||
pub tx: Arc<Env<WriteMap>>,
|
||||
pub path: Option<PathBuf>,
|
||||
factory: ShareableDatabase<Arc<Env<WriteMap>>>,
|
||||
factory: ProviderFactory<Arc<Env<WriteMap>>>,
|
||||
}
|
||||
|
||||
impl Default for TestTransaction {
|
||||
/// Create a new instance of [TestTransaction]
|
||||
fn default() -> Self {
|
||||
let tx = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
Self { tx: tx.clone(), path: None, factory: ShareableDatabase::new(tx, MAINNET.clone()) }
|
||||
Self { tx: tx.clone(), path: None, factory: ProviderFactory::new(tx, MAINNET.clone()) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ impl TestTransaction {
|
||||
Self {
|
||||
tx: tx.clone(),
|
||||
path: Some(path.to_path_buf()),
|
||||
factory: ShareableDatabase::new(tx, MAINNET.clone()),
|
||||
factory: ProviderFactory::new(tx, MAINNET.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ pub use traits::{
|
||||
pub mod providers;
|
||||
pub use providers::{
|
||||
DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider,
|
||||
HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ShareableDatabase,
|
||||
HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory,
|
||||
};
|
||||
|
||||
/// Execution result
|
||||
|
||||
@ -24,14 +24,14 @@ pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW};
|
||||
///
|
||||
/// This provider implements most provider or provider factory traits.
|
||||
#[derive(Debug)]
|
||||
pub struct ShareableDatabase<DB> {
|
||||
pub struct ProviderFactory<DB> {
|
||||
/// Database
|
||||
db: DB,
|
||||
/// Chain spec
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
impl<DB: Database> ShareableDatabase<DB> {
|
||||
impl<DB: Database> ProviderFactory<DB> {
|
||||
/// Returns a provider with a created `DbTx` inside, which allows fetching data from the
|
||||
/// database using different types of providers. Example: [`HeaderProvider`]
|
||||
/// [`BlockHashProvider`]. This may fail if the inner read database transaction fails to open.
|
||||
@ -48,20 +48,20 @@ impl<DB: Database> ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> ShareableDatabase<DB> {
|
||||
impl<DB> ProviderFactory<DB> {
|
||||
/// create new database provider
|
||||
pub fn new(db: DB, chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { db, chain_spec }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Clone> Clone for ShareableDatabase<DB> {
|
||||
impl<DB: Clone> Clone for ProviderFactory<DB> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { db: self.db.clone(), chain_spec: Arc::clone(&self.chain_spec) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> ShareableDatabase<DB> {
|
||||
impl<DB: Database> ProviderFactory<DB> {
|
||||
/// Storage provider for latest block
|
||||
pub fn latest(&self) -> Result<StateProviderBox<'_>> {
|
||||
trace!(target: "providers::db", "Returning latest state provider");
|
||||
@ -111,7 +111,7 @@ impl<DB: Database> ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> HeaderProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> HeaderProvider for ProviderFactory<DB> {
|
||||
fn header(&self, block_hash: &BlockHash) -> Result<Option<Header>> {
|
||||
self.provider()?.header(block_hash)
|
||||
}
|
||||
@ -144,7 +144,7 @@ impl<DB: Database> HeaderProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> BlockHashProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> BlockHashProvider for ProviderFactory<DB> {
|
||||
fn block_hash(&self, number: u64) -> Result<Option<H256>> {
|
||||
self.provider()?.block_hash(number)
|
||||
}
|
||||
@ -154,7 +154,7 @@ impl<DB: Database> BlockHashProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> BlockNumProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> BlockNumProvider for ProviderFactory<DB> {
|
||||
fn chain_info(&self) -> Result<ChainInfo> {
|
||||
self.provider()?.chain_info()
|
||||
}
|
||||
@ -172,7 +172,7 @@ impl<DB: Database> BlockNumProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> BlockProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> BlockProvider for ProviderFactory<DB> {
|
||||
fn find_block_by_hash(&self, hash: H256, source: BlockSource) -> Result<Option<Block>> {
|
||||
self.provider()?.find_block_by_hash(hash, source)
|
||||
}
|
||||
@ -194,7 +194,7 @@ impl<DB: Database> BlockProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> TransactionsProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> TransactionsProvider for ProviderFactory<DB> {
|
||||
fn transaction_id(&self, tx_hash: TxHash) -> Result<Option<TxNumber>> {
|
||||
self.provider()?.transaction_id(tx_hash)
|
||||
}
|
||||
@ -233,7 +233,7 @@ impl<DB: Database> TransactionsProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> ReceiptProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> ReceiptProvider for ProviderFactory<DB> {
|
||||
fn receipt(&self, id: TxNumber) -> Result<Option<Receipt>> {
|
||||
self.provider()?.receipt(id)
|
||||
}
|
||||
@ -247,7 +247,7 @@ impl<DB: Database> ReceiptProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> WithdrawalsProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> WithdrawalsProvider for ProviderFactory<DB> {
|
||||
fn withdrawals_by_block(
|
||||
&self,
|
||||
id: BlockHashOrNumber,
|
||||
@ -261,13 +261,13 @@ impl<DB: Database> WithdrawalsProvider for ShareableDatabase<DB> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> StageCheckpointProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> StageCheckpointProvider for ProviderFactory<DB> {
|
||||
fn get_stage_checkpoint(&self, id: StageId) -> Result<Option<StageCheckpoint>> {
|
||||
self.provider()?.get_stage_checkpoint(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database> EvmEnvProvider for ShareableDatabase<DB> {
|
||||
impl<DB: Database> EvmEnvProvider for ProviderFactory<DB> {
|
||||
fn fill_env_at(
|
||||
&self,
|
||||
cfg: &mut CfgEnv,
|
||||
@ -317,7 +317,7 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ShareableDatabase;
|
||||
use super::ProviderFactory;
|
||||
use crate::{BlockHashProvider, BlockNumProvider};
|
||||
use reth_db::mdbx::{test_utils::create_test_db, EnvKind, WriteMap};
|
||||
use reth_primitives::{ChainSpecBuilder, H256};
|
||||
@ -327,7 +327,7 @@ mod tests {
|
||||
fn common_history_provider() {
|
||||
let chain_spec = ChainSpecBuilder::mainnet().build();
|
||||
let db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let provider = ShareableDatabase::new(db, Arc::new(chain_spec));
|
||||
let provider = ProviderFactory::new(db, Arc::new(chain_spec));
|
||||
let _ = provider.latest();
|
||||
}
|
||||
|
||||
@ -335,8 +335,8 @@ mod tests {
|
||||
fn default_chain_info() {
|
||||
let chain_spec = ChainSpecBuilder::mainnet().build();
|
||||
let db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(db, Arc::new(chain_spec));
|
||||
let provider = db.provider().unwrap();
|
||||
let factory = ProviderFactory::new(db, Arc::new(chain_spec));
|
||||
let provider = factory.provider().unwrap();
|
||||
|
||||
let chain_info = provider.chain_info().expect("should be ok");
|
||||
assert_eq!(chain_info.best_number, 0);
|
||||
@ -347,10 +347,10 @@ mod tests {
|
||||
fn provider_flow() {
|
||||
let chain_spec = ChainSpecBuilder::mainnet().build();
|
||||
let db = create_test_db::<WriteMap>(EnvKind::RW);
|
||||
let db = ShareableDatabase::new(db, Arc::new(chain_spec));
|
||||
let provider = db.provider().unwrap();
|
||||
let factory = ProviderFactory::new(db, Arc::new(chain_spec));
|
||||
let provider = factory.provider().unwrap();
|
||||
provider.block_hash(0).unwrap();
|
||||
let provider_rw = db.provider_rw().unwrap();
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
provider_rw.block_hash(0).unwrap();
|
||||
provider.block_hash(0).unwrap();
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ use reth_interfaces::blockchain_tree::{error::InsertBlockError, CanonicalOutcome
|
||||
#[derive(Clone)]
|
||||
pub struct BlockchainProvider<DB, Tree> {
|
||||
/// Provider type used to access the database.
|
||||
database: ShareableDatabase<DB>,
|
||||
database: ProviderFactory<DB>,
|
||||
/// The blockchain tree instance.
|
||||
tree: Tree,
|
||||
/// Tracks the chain info wrt forkchoice updates
|
||||
@ -56,7 +56,7 @@ pub struct BlockchainProvider<DB, Tree> {
|
||||
impl<DB, Tree> BlockchainProvider<DB, Tree> {
|
||||
/// Create new provider instance that wraps the database and the blockchain tree, using the
|
||||
/// provided latest header to initialize the chain info tracker.
|
||||
pub fn with_latest(database: ShareableDatabase<DB>, tree: Tree, latest: SealedHeader) -> Self {
|
||||
pub fn with_latest(database: ProviderFactory<DB>, tree: Tree, latest: SealedHeader) -> Self {
|
||||
Self { database, tree, chain_info: ChainInfoTracker::new(latest) }
|
||||
}
|
||||
}
|
||||
@ -67,7 +67,7 @@ where
|
||||
{
|
||||
/// Create a new provider using only the database and the tree, fetching the latest header from
|
||||
/// the database to initialize the provider.
|
||||
pub fn new(database: ShareableDatabase<DB>, tree: Tree) -> Result<Self> {
|
||||
pub fn new(database: ProviderFactory<DB>, tree: Tree) -> Result<Self> {
|
||||
let provider = database.provider()?;
|
||||
let best: ChainInfo = provider.chain_info()?;
|
||||
match provider.header_by_number(best.best_number)? {
|
||||
|
||||
@ -44,7 +44,7 @@ pub enum TransactionError {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::{
|
||||
insert_canonical_block, test_utils::blocks::*, ShareableDatabase, TransactionsProvider,
|
||||
insert_canonical_block, test_utils::blocks::*, ProviderFactory, TransactionsProvider,
|
||||
};
|
||||
use reth_db::{
|
||||
mdbx::test_utils::create_test_rw_db,
|
||||
@ -65,7 +65,7 @@ mod test {
|
||||
.shanghai_activated()
|
||||
.build();
|
||||
|
||||
let factory = ShareableDatabase::new(db.as_ref(), Arc::new(chain_spec.clone()));
|
||||
let factory = ProviderFactory::new(db.as_ref(), Arc::new(chain_spec.clone()));
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let data = BlockChainTestData::default();
|
||||
@ -183,7 +183,7 @@ mod test {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let factory = ShareableDatabase::new(db.as_ref(), chain_spec.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), chain_spec.clone());
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
let data = BlockChainTestData::default();
|
||||
|
||||
@ -525,7 +525,7 @@ mod tests {
|
||||
trie::{BranchNodeCompact, TrieMask},
|
||||
Account, Address, H256, MAINNET, U256,
|
||||
};
|
||||
use reth_provider::{DatabaseProviderRW, ShareableDatabase};
|
||||
use reth_provider::{DatabaseProviderRW, ProviderFactory};
|
||||
use std::{collections::BTreeMap, ops::Mul, str::FromStr};
|
||||
|
||||
fn insert_account<'a, TX: DbTxMut<'a>>(
|
||||
@ -555,7 +555,7 @@ mod tests {
|
||||
|
||||
fn incremental_vs_full_root(inputs: &[&str], modified: &str) {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
let hashed_address = H256::from_low_u64_be(1);
|
||||
|
||||
@ -622,7 +622,7 @@ mod tests {
|
||||
|
||||
let hashed_address = keccak256(address);
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
for (key, value) in &storage {
|
||||
tx.tx_ref().put::<tables::HashedStorage>(
|
||||
@ -680,7 +680,7 @@ mod tests {
|
||||
// This ensures we return an empty root when there are no storage entries
|
||||
fn test_empty_storage_root() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let address = Address::random();
|
||||
@ -702,7 +702,7 @@ mod tests {
|
||||
// This ensures that the walker goes over all the storage slots
|
||||
fn test_storage_root() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let address = Address::random();
|
||||
@ -746,7 +746,7 @@ mod tests {
|
||||
state.values().map(|(_, slots)| slots.len()).sum::<usize>();
|
||||
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
for (address, (account, storage)) in &state {
|
||||
@ -785,7 +785,7 @@ mod tests {
|
||||
|
||||
fn test_state_root_with_state(state: State) {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
for (address, (account, storage)) in &state {
|
||||
@ -812,7 +812,7 @@ mod tests {
|
||||
#[test]
|
||||
fn storage_root_regression() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
// Some address whose hash starts with 0xB041
|
||||
let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap();
|
||||
@ -857,7 +857,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let mut hashed_account_cursor =
|
||||
@ -1164,7 +1164,7 @@ mod tests {
|
||||
#[test]
|
||||
fn account_trie_around_extension_node() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let expected = extension_node_trie(&mut tx);
|
||||
@ -1190,7 +1190,7 @@ mod tests {
|
||||
|
||||
fn account_trie_around_extension_node_with_dbtrie() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let expected = extension_node_trie(&mut tx);
|
||||
@ -1218,7 +1218,7 @@ mod tests {
|
||||
tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
let mut hashed_account_cursor = tx.tx_ref().cursor_write::<tables::HashedAccount>().unwrap();
|
||||
|
||||
@ -1252,7 +1252,7 @@ mod tests {
|
||||
#[test]
|
||||
fn storage_trie_around_extension_node() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let mut tx = factory.provider_rw().unwrap();
|
||||
|
||||
let hashed_address = H256::random();
|
||||
|
||||
@ -47,12 +47,12 @@ mod tests {
|
||||
transaction::DbTxMut,
|
||||
};
|
||||
use reth_primitives::{hex_literal::hex, MAINNET};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
|
||||
#[test]
|
||||
fn test_account_trie_order() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let mut cursor = provider.tx_ref().cursor_write::<tables::AccountsTrie>().unwrap();
|
||||
|
||||
|
||||
@ -64,13 +64,13 @@ mod tests {
|
||||
trie::{BranchNodeCompact, StorageTrieEntry},
|
||||
MAINNET,
|
||||
};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
|
||||
// tests that upsert and seek match on the storagetrie cursor
|
||||
#[test]
|
||||
fn test_storage_cursor_abstraction() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let mut cursor = provider.tx_ref().cursor_dup_write::<tables::StoragesTrie>().unwrap();
|
||||
|
||||
|
||||
@ -263,7 +263,7 @@ mod tests {
|
||||
cursor::DbCursorRW, mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut,
|
||||
};
|
||||
use reth_primitives::{trie::StorageTrieEntry, MAINNET};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
|
||||
#[test]
|
||||
fn walk_nodes_with_common_prefix() {
|
||||
@ -290,7 +290,7 @@ mod tests {
|
||||
|
||||
let db = create_test_rw_db();
|
||||
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
|
||||
let mut account_cursor = tx.tx_ref().cursor_write::<tables::AccountsTrie>().unwrap();
|
||||
@ -336,7 +336,7 @@ mod tests {
|
||||
#[test]
|
||||
fn cursor_rootnode_with_changesets() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ShareableDatabase::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
let mut cursor = tx.tx_ref().cursor_dup_write::<tables::StoragesTrie>().unwrap();
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ use crate::{
|
||||
};
|
||||
use reth_db::mdbx::test_utils::create_test_rw_db;
|
||||
use reth_primitives::{BlockBody, SealedBlock};
|
||||
use reth_provider::ShareableDatabase;
|
||||
use reth_provider::ProviderFactory;
|
||||
use reth_stages::{stages::ExecutionStage, ExecInput, Stage};
|
||||
use std::{collections::BTreeMap, ffi::OsStr, fs, path::Path, sync::Arc};
|
||||
|
||||
@ -75,8 +75,7 @@ impl Case for BlockchainTestCase {
|
||||
|
||||
// Create the database
|
||||
let db = create_test_rw_db();
|
||||
let factory =
|
||||
ShareableDatabase::new(db.as_ref(), Arc::new(case.network.clone().into()));
|
||||
let factory = ProviderFactory::new(db.as_ref(), Arc::new(case.network.clone().into()));
|
||||
let mut provider = factory.provider_rw().unwrap();
|
||||
|
||||
// Insert test state
|
||||
|
||||
Reference in New Issue
Block a user