chore: rename ShareableDatabase to ProviderFactory (#3121)

This commit is contained in:
joshieDo
2023-06-14 07:49:32 +01:00
committed by GitHub
parent 724f480bbb
commit 209d2445b0
31 changed files with 156 additions and 158 deletions

View File

@ -26,7 +26,7 @@ use reth_interfaces::{
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
use reth_primitives::{stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256};
use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase};
use reth_provider::{providers::get_stage_checkpoint, ProviderFactory};
use reth_staged_sync::utils::init::{init_db, init_genesis};
use reth_stages::{
sets::DefaultStages,
@ -170,7 +170,7 @@ impl Command {
Ipv4Addr::UNSPECIFIED,
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
)))
.build(ShareableDatabase::new(db, self.chain.clone()))
.build(ProviderFactory::new(db, self.chain.clone()))
.start_network()
.await?;
info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network");
@ -250,7 +250,7 @@ impl Command {
}
let mut current_max_block = latest_block_number;
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
let factory = ProviderFactory::new(&db, self.chain.clone());
while current_max_block < self.to {
let next_block = current_max_block + 1;
@ -266,7 +266,7 @@ impl Command {
// Unwind the pipeline without committing.
{
shareable_db
factory
.provider_rw()
.map_err(PipelineError::Interface)?
.take_block_and_execution_range(&self.chain, next_block..=target_block)?;

View File

@ -9,7 +9,7 @@ use reth_primitives::{
stage::{StageCheckpoint, StageId},
ChainSpec,
};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use reth_staged_sync::utils::init::init_db;
use reth_stages::{
stages::{
@ -68,8 +68,8 @@ impl Command {
std::fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path)?);
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
let factory = ProviderFactory::new(&db, self.chain.clone());
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
let execution_checkpoint_block =
provider_rw.get_stage_checkpoint(StageId::Execution)?.unwrap_or_default().block_number;

View File

@ -46,7 +46,7 @@ use reth_primitives::{
};
use reth_provider::{
providers::get_stage_checkpoint, BlockProvider, CanonStateSubscriptions, HeaderProvider,
ShareableDatabase,
ProviderFactory,
};
use reth_revm::Factory;
use reth_revm_inspectors::stack::Hook;
@ -199,8 +199,8 @@ impl Command {
)?);
// setup the blockchain provider
let shareable_db = ShareableDatabase::new(Arc::clone(&db), Arc::clone(&self.chain));
let blockchain_db = BlockchainProvider::new(shareable_db, blockchain_tree.clone())?;
let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain));
let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?;
let transaction_pool = reth_transaction_pool::Pool::eth_pool(
EthTransactionValidator::new(blockchain_db.clone(), Arc::clone(&self.chain)),
@ -600,7 +600,7 @@ impl Command {
executor: TaskExecutor,
secret_key: SecretKey,
default_peers_path: PathBuf,
) -> NetworkConfig<ShareableDatabase<Arc<Env<WriteMap>>>> {
) -> NetworkConfig<ProviderFactory<Arc<Env<WriteMap>>>> {
let head = self.lookup_head(Arc::clone(&db)).expect("the head block is missing");
self.network
@ -615,7 +615,7 @@ impl Command {
Ipv4Addr::UNSPECIFIED,
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
)))
.build(ShareableDatabase::new(db, self.chain.clone()))
.build(ProviderFactory::new(db, self.chain.clone()))
}
#[allow(clippy::too_many_arguments)]

View File

@ -15,7 +15,7 @@ use reth_db::mdbx::{Env, EnvKind, WriteMap};
use reth_discv4::NatResolver;
use reth_interfaces::p2p::bodies::client::BodiesClient;
use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use std::{path::PathBuf, sync::Arc};
/// `reth p2p` command
@ -129,7 +129,7 @@ impl Command {
network_config_builder = self.discovery.apply_to_builder(network_config_builder);
let network = network_config_builder
.build(Arc::new(ShareableDatabase::new(noop_db, self.chain.clone())))
.build(Arc::new(ProviderFactory::new(noop_db, self.chain.clone())))
.start_network()
.await?;

View File

@ -5,7 +5,7 @@ use reth_db::{
cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx,
};
use reth_primitives::{stage::StageCheckpoint, ChainSpec};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use reth_revm::Factory;
use reth_stages::{stages::ExecutionStage, Stage, UnwindInput};
use std::{path::PathBuf, sync::Arc};
@ -94,8 +94,8 @@ async fn unwind_and_copy<DB: Database>(
tip_block_number: u64,
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
) -> eyre::Result<()> {
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
let mut provider = factory.provider_rw()?;
let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(db_tool.chain.clone()));
@ -129,8 +129,8 @@ async fn dry_run<DB: Database>(
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage. [dry-run]");
let shareable_db = ShareableDatabase::new(&output_db, chain.clone());
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(&output_db, chain.clone());
let mut provider = factory.provider_rw()?;
let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(chain.clone()));
exec_stage

View File

@ -3,7 +3,7 @@ use crate::utils::DbTool;
use eyre::Result;
use reth_db::{database::Database, table::TableImporter, tables};
use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use reth_stages::{stages::AccountHashingStage, Stage, UnwindInput};
use std::{path::PathBuf, sync::Arc};
use tracing::info;
@ -38,8 +38,8 @@ async fn unwind_and_copy<DB: Database>(
tip_block_number: u64,
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
) -> eyre::Result<()> {
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
let mut provider = factory.provider_rw()?;
let mut exec_stage = AccountHashingStage::default();
exec_stage
@ -68,8 +68,8 @@ async fn dry_run<DB: Database>(
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage.");
let shareable_db = ShareableDatabase::new(&output_db, chain);
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(&output_db, chain);
let mut provider = factory.provider_rw()?;
let mut exec_stage = AccountHashingStage {
clean_threshold: 1, // Forces hashing from scratch
..Default::default()

View File

@ -3,7 +3,7 @@ use crate::utils::DbTool;
use eyre::Result;
use reth_db::{database::Database, table::TableImporter, tables};
use reth_primitives::{stage::StageCheckpoint, ChainSpec};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use reth_stages::{stages::StorageHashingStage, Stage, UnwindInput};
use std::{path::PathBuf, sync::Arc};
use tracing::info;
@ -33,8 +33,8 @@ async fn unwind_and_copy<DB: Database>(
tip_block_number: u64,
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
) -> eyre::Result<()> {
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
let mut provider = factory.provider_rw()?;
let mut exec_stage = StorageHashingStage::default();
@ -67,8 +67,8 @@ async fn dry_run<DB: Database>(
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage.");
let shareable_db = ShareableDatabase::new(&output_db, chain);
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(&output_db, chain);
let mut provider = factory.provider_rw()?;
let mut exec_stage = StorageHashingStage {
clean_threshold: 1, // Forces hashing from scratch
..Default::default()

View File

@ -3,7 +3,7 @@ use crate::utils::DbTool;
use eyre::Result;
use reth_db::{database::Database, table::TableImporter, tables};
use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use reth_stages::{
stages::{
AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage,
@ -48,8 +48,8 @@ async fn unwind_and_copy<DB: Database>(
output_db: &reth_db::mdbx::Env<reth_db::mdbx::WriteMap>,
) -> eyre::Result<()> {
let (from, to) = range;
let shareable_db = ShareableDatabase::new(db_tool.db, db_tool.chain.clone());
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone());
let mut provider = factory.provider_rw()?;
let unwind = UnwindInput {
unwind_to: from,
@ -115,8 +115,8 @@ async fn dry_run<DB: Database>(
from: u64,
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage.");
let shareable_db = ShareableDatabase::new(&output_db, chain);
let mut provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(&output_db, chain);
let mut provider = factory.provider_rw()?;
let mut exec_output = false;
while !exec_output {
exec_output = MerkleStage::Execution {

View File

@ -12,7 +12,7 @@ use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
use reth_primitives::ChainSpec;
use reth_provider::{providers::get_stage_checkpoint, ShareableDatabase};
use reth_provider::{providers::get_stage_checkpoint, ProviderFactory};
use reth_staged_sync::utils::init::init_db;
use reth_stages::{
stages::{
@ -120,8 +120,8 @@ impl Command {
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path)?);
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
let mut provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
let factory = ProviderFactory::new(&db, self.chain.clone());
let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
if let Some(listen_addr) = self.metrics {
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
@ -160,7 +160,7 @@ impl Command {
p2p_secret_key,
default_peers_path,
)
.build(Arc::new(ShareableDatabase::new(db.clone(), self.chain.clone())))
.build(Arc::new(ProviderFactory::new(db.clone(), self.chain.clone())))
.start_network()
.await?;
let fetch_client = Arc::new(network.fetch_client().await?);
@ -250,7 +250,7 @@ impl Command {
if self.commit {
provider_rw.commit()?;
provider_rw = shareable_db.provider_rw().map_err(PipelineError::Interface)?;
provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?;
}
}

View File

@ -13,7 +13,7 @@ use reth_db::{
transaction::DbTx,
};
use reth_primitives::{BlockHashOrNumber, ChainSpec};
use reth_provider::ShareableDatabase;
use reth_provider::ProviderFactory;
use std::{ops::RangeInclusive, sync::Arc};
/// `reth stage unwind` command
@ -69,8 +69,8 @@ impl Command {
eyre::bail!("Cannot unwind genesis block")
}
let shareable_db = ShareableDatabase::new(&db, self.chain.clone());
let provider = shareable_db.provider_rw()?;
let factory = ProviderFactory::new(&db, self.chain.clone());
let provider = factory.provider_rw()?;
let blocks_and_execution = provider
.take_block_and_execution_range(&self.chain, range)