fix: use the same ProviderFactory in reth node (#5778)

This commit is contained in:
Dan Cline
2023-12-15 16:01:12 +02:00
committed by GitHub
parent 30efaf4a72
commit cc4bd7c306
3 changed files with 24 additions and 25 deletions

View File

@ -304,7 +304,8 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
debug!(target: "reth::cli", "configured blockchain tree");
// fetch the head block from the database
let head = self.lookup_head(Arc::clone(&db)).wrap_err("the head block is missing")?;
let head =
self.lookup_head(provider_factory.clone()).wrap_err("the head block is missing")?;
// setup the blockchain provider
let blockchain_db =
@ -346,7 +347,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
let default_peers_path = data_dir.known_peers_path();
let network_config = self.load_network_config(
&config,
Arc::clone(&db),
provider_factory.clone(),
ctx.task_executor.clone(),
head,
secret_key,
@ -389,7 +390,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
let max_block = if let Some(block) = self.debug.max_block {
Some(block)
} else if let Some(tip) = self.debug.tip {
Some(self.lookup_or_fetch_tip(&db, &network_client, tip).await?)
Some(self.lookup_or_fetch_tip(provider_factory.clone(), &network_client, tip).await?)
} else {
None
};
@ -425,7 +426,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
&config.stages,
client.clone(),
Arc::clone(&consensus),
provider_factory,
provider_factory.clone(),
&ctx.task_executor,
sync_metrics_tx,
prune_config.clone(),
@ -445,7 +446,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
&config.stages,
network_client.clone(),
Arc::clone(&consensus),
provider_factory,
provider_factory.clone(),
&ctx.task_executor,
sync_metrics_tx,
prune_config.clone(),
@ -476,7 +477,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
let pruner_events = if let Some(prune_config) = prune_config {
let mut pruner = self.build_pruner(
&prune_config,
db.clone(),
provider_factory,
tree_config,
snapshotter.highest_snapshot_receiver(),
);
@ -747,8 +748,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
/// Fetches the head block from the database.
///
/// If the database is empty, returns the genesis block.
fn lookup_head<DB: Database>(&self, db: DB) -> RethResult<Head> {
let factory = ProviderFactory::new(db, self.chain.clone());
fn lookup_head<DB: Database>(&self, factory: ProviderFactory<DB>) -> RethResult<Head> {
let provider = factory.provider()?;
let head = provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number;
@ -780,7 +780,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
/// NOTE: The download is attempted with infinite retries.
async fn lookup_or_fetch_tip<DB, Client>(
&self,
db: DB,
provider_factory: ProviderFactory<DB>,
client: Client,
tip: B256,
) -> RethResult<u64>
@ -788,7 +788,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
DB: Database,
Client: HeadersClient,
{
Ok(self.fetch_tip(db, client, BlockHashOrNumber::Hash(tip)).await?.number)
Ok(self.fetch_tip(provider_factory, client, BlockHashOrNumber::Hash(tip)).await?.number)
}
/// Attempt to look up the block with the given number and return the header.
@ -796,7 +796,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
/// NOTE: The download is attempted with infinite retries.
async fn fetch_tip<DB, Client>(
&self,
db: DB,
factory: ProviderFactory<DB>,
client: Client,
tip: BlockHashOrNumber,
) -> RethResult<SealedHeader>
@ -804,7 +804,6 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
DB: Database,
Client: HeadersClient,
{
let factory = ProviderFactory::new(db, self.chain.clone());
let provider = factory.provider()?;
let header = provider.header_by_hash_or_number(tip)?;
@ -832,7 +831,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
fn load_network_config<DB: Database>(
&self,
config: &Config,
db: DB,
provider_factory: ProviderFactory<DB>,
executor: TaskExecutor,
head: Head,
secret_key: SecretKey,
@ -862,7 +861,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
.sequencer_endpoint(self.rollup.sequencer_http.clone())
.disable_tx_gossip(self.rollup.disable_txpool_gossip);
cfg_builder.build(ProviderFactory::new(db, self.chain.clone()))
cfg_builder.build(provider_factory)
}
#[allow(clippy::too_many_arguments)]
@ -980,7 +979,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
fn build_pruner<DB: Database>(
&self,
config: &PruneConfig,
db: DB,
provider_factory: ProviderFactory<DB>,
tree_config: BlockchainTreeConfig,
highest_snapshots_rx: HighestSnapshotsTracker,
) -> Pruner<DB> {
@ -1014,8 +1013,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
);
Pruner::new(
db,
self.chain.clone(),
provider_factory,
segments.into_vec(),
config.block_interval,
self.chain.prune_delete_limit,

View File

@ -514,11 +514,11 @@ where
BlockchainTree::new(externals, config, None).expect("failed to create tree"),
);
let latest = self.base_config.chain_spec.genesis_header().seal_slow();
let blockchain_provider = BlockchainProvider::with_latest(provider_factory, tree, latest);
let blockchain_provider =
BlockchainProvider::with_latest(provider_factory.clone(), tree, latest);
let pruner = Pruner::new(
db.clone(),
self.base_config.chain_spec.clone(),
provider_factory,
vec![],
5,
self.base_config.chain_spec.prune_delete_limit,

View File

@ -6,7 +6,7 @@ use crate::{
Metrics, PrunerError, PrunerEvent,
};
use reth_db::database::Database;
use reth_primitives::{BlockNumber, ChainSpec, PruneMode, PruneProgress, PruneSegment};
use reth_primitives::{BlockNumber, PruneMode, PruneProgress, PruneSegment};
use reth_provider::{ProviderFactory, PruneCheckpointReader};
use reth_snapshot::HighestSnapshotsTracker;
use reth_tokio_util::EventListeners;
@ -46,8 +46,7 @@ pub struct Pruner<DB> {
impl<DB: Database> Pruner<DB> {
/// Creates a new [Pruner].
pub fn new(
db: DB,
chain_spec: Arc<ChainSpec>,
provider_factory: ProviderFactory<DB>,
segments: Vec<Arc<dyn Segment<DB>>>,
min_block_interval: usize,
delete_limit: usize,
@ -55,7 +54,7 @@ impl<DB: Database> Pruner<DB> {
highest_snapshots_tracker: HighestSnapshotsTracker,
) -> Self {
Self {
provider_factory: ProviderFactory::new(db, chain_spec),
provider_factory,
segments,
min_block_interval,
previous_tip_block_number: None,
@ -267,12 +266,14 @@ mod tests {
use crate::Pruner;
use reth_db::test_utils::create_test_rw_db;
use reth_primitives::MAINNET;
use reth_provider::ProviderFactory;
use tokio::sync::watch;
#[test]
fn is_pruning_needed() {
let db = create_test_rw_db();
let mut pruner = Pruner::new(db, MAINNET.clone(), vec![], 5, 0, 5, watch::channel(None).1);
let provider_factory = ProviderFactory::new(db, MAINNET.clone());
let mut pruner = Pruner::new(provider_factory, vec![], 5, 0, 5, watch::channel(None).1);
// No last pruned block number was set before
let first_block_number = 1;