chore: use UnifiedStorageWriter::commit where possible (#10019)

This commit is contained in:
joshieDo
2024-08-02 15:11:11 +01:00
committed by GitHub
parent f9ad47ed6c
commit 06fbdd98d2
13 changed files with 106 additions and 132 deletions

View File

@ -16,9 +16,9 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle};
use reth_network_api::NetworkInfo;
use reth_primitives::BlockHashOrNumber;
use reth_provider::{
writer::StorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider,
LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader,
StateWriter, StaticFileProviderFactory, StorageReader,
writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter,
HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory,
StageCheckpointReader, StateWriter, StaticFileProviderFactory, StorageReader,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages::StageId;
@ -171,7 +171,7 @@ impl Command {
.try_seal_with_senders()
.map_err(|_| BlockValidationError::SenderRecoveryError)?,
)?;
let mut storage_writer = StorageWriter::new(Some(&provider_rw), None);
let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw);
storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?;
let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?;
let storages = provider_rw.plain_state_storages(storage_lists)?;

View File

@ -18,7 +18,7 @@ use reth_network_api::NetworkInfo;
use reth_network_p2p::full_block::FullBlockClient;
use reth_primitives::BlockHashOrNumber;
use reth_provider::{
writer::StorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider,
writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider,
LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter,
};
use reth_revm::database::StateProviderDatabase;
@ -155,7 +155,7 @@ impl Command {
executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?;
let execution_outcome = executor.finalize();
let mut storage_writer = StorageWriter::new(Some(&provider_rw), None);
let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw);
storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?;
let checkpoint = Some(StageCheckpoint::new(

View File

@ -9,7 +9,7 @@ use reth_db_common::{
DbTool,
};
use reth_node_core::args::StageEnum;
use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory};
use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory};
use reth_stages::StageId;
use reth_static_file_types::{find_fixed_range, StaticFileSegment};
@ -174,8 +174,7 @@ impl Command {
tx.put::<tables::StageCheckpoints>(StageId::Finish.to_string(), Default::default())?;
static_file_provider.commit()?;
provider_rw.commit()?;
UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?;
Ok(())
}

View File

@ -27,8 +27,8 @@ use reth_node_metrics::{
version::VersionInfo,
};
use reth_provider::{
ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory,
StaticFileWriter,
writer::UnifiedStorageWriter, ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter,
StaticFileProviderFactory,
};
use reth_stages::{
stages::{
@ -272,12 +272,10 @@ impl Command {
}
if self.commit {
// For unwinding it makes more sense to commit the database first, since if
// this function is interrupted before the static files commit, we can just
// truncate the static files according to the
// checkpoints on the next start-up.
provider_rw.commit()?;
provider_factory.static_file_provider().commit()?;
UnifiedStorageWriter::commit_unwind(
provider_rw,
provider_factory.static_file_provider(),
)?;
provider_rw = provider_factory.provider_rw()?;
}
}
@ -300,8 +298,7 @@ impl Command {
provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?;
}
if self.commit {
provider_factory.static_file_provider().commit()?;
provider_rw.commit()?;
UnifiedStorageWriter::commit(provider_rw, provider_factory.static_file_provider())?;
provider_rw = provider_factory.provider_rw()?;
}

View File

@ -3,7 +3,7 @@
use reth_chain_state::ExecutedBlock;
use reth_db::Database;
use reth_primitives::{SealedBlock, B256};
use reth_provider::{writer::StorageWriter, ProviderFactory, StaticFileProviderFactory};
use reth_provider::{writer::UnifiedStorageWriter, ProviderFactory, StaticFileProviderFactory};
use reth_prune::{Pruner, PrunerOutput};
use std::sync::{
mpsc::{Receiver, SendError, Sender},
@ -62,10 +62,10 @@ where
let provider_rw = self.provider.provider_rw().expect("todo: handle errors");
let sf_provider = self.provider.static_file_provider();
StorageWriter::from(&provider_rw, &sf_provider)
UnifiedStorageWriter::from(&provider_rw, &sf_provider)
.remove_blocks_above(new_tip_num)
.expect("todo: handle errors");
StorageWriter::commit_unwind(provider_rw, sf_provider)
UnifiedStorageWriter::commit_unwind(provider_rw, sf_provider)
.expect("todo: handle errors");
// we ignore the error because the caller may or may not care about the result
@ -80,10 +80,10 @@ where
let provider_rw = self.provider.provider_rw().expect("todo: handle errors");
let static_file_provider = self.provider.static_file_provider();
StorageWriter::from(&provider_rw, &static_file_provider)
UnifiedStorageWriter::from(&provider_rw, &static_file_provider)
.save_blocks(&blocks)
.expect("todo: handle errors");
StorageWriter::commit(provider_rw, static_file_provider)
UnifiedStorageWriter::commit(provider_rw, static_file_provider)
.expect("todo: handle errors");
// we ignore the error because the caller may or may not care about the result

View File

@ -16,7 +16,7 @@ use reth_node_core::version::SHORT_VERSION;
use reth_optimism_primitives::bedrock_import::is_dup_tx;
use reth_primitives::Receipts;
use reth_provider::{
writer::StorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory,
writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory,
StageCheckpointReader, StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader,
};
use reth_stages::StageId;
@ -222,7 +222,7 @@ where
}
// We're reusing receipt writing code internal to
// `StorageWriter::append_receipts_from_blocks`, so we just use a default empty
// `UnifiedStorageWriter::append_receipts_from_blocks`, so we just use a default empty
// `BundleState`.
let execution_outcome =
ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default());
@ -231,14 +231,13 @@ where
static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?;
// finally, write the receipts
let mut storage_writer = StorageWriter::new(Some(&provider), Some(static_file_producer));
let mut storage_writer = UnifiedStorageWriter::from(&provider, static_file_producer);
storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?;
}
provider.commit()?;
// as static files works in file ranges, internally it will be committing when creating the
// next file range already, so we only need to call explicitly at the end.
static_file_provider.commit()?;
UnifiedStorageWriter::commit(provider, static_file_provider)?;
Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns })
}

View File

@ -8,7 +8,7 @@ use futures_util::Future;
use reth_db_api::database::Database;
use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH;
use reth_provider::{
providers::StaticFileWriter, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory,
writer::UnifiedStorageWriter, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory,
StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory,
};
use reth_prune::PrunerBuilder;
@ -342,12 +342,10 @@ where
))?;
}
// For unwinding it makes more sense to commit the database first, since if
// this function is interrupted before the static files commit, we can just
// truncate the static files according to the
// checkpoints on the next start-up.
provider_rw.commit()?;
self.provider_factory.static_file_provider().commit()?;
UnifiedStorageWriter::commit_unwind(
provider_rw,
self.provider_factory.static_file_provider(),
)?;
stage.post_unwind_commit()?;
@ -455,14 +453,10 @@ where
result: out.clone(),
});
// For execution it makes more sense to commit the static files first, since if
// this function is interrupted before the database commit, we can just truncate
// the static files according to the checkpoints on the next
// start-up.
self.provider_factory.static_file_provider().commit()?;
provider_rw.commit()?;
stage.post_execute_commit()?;
UnifiedStorageWriter::commit(
provider_rw,
self.provider_factory.static_file_provider(),
)?;
if done {
let block_number = checkpoint.block_number;
@ -520,8 +514,8 @@ fn on_stage_error<DB: Database>(
StageId::MerkleExecute,
prev_checkpoint.unwrap_or_default(),
)?;
factory.static_file_provider().commit()?;
provider_rw.commit()?;
UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?;
// We unwind because of a validation error. If the unwind itself
// fails, we bail entirely,

View File

@ -10,7 +10,7 @@ use reth_primitives::{BlockNumber, Header, StaticFileSegment};
use reth_primitives_traits::format_gas_throughput;
use reth_provider::{
providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter},
writer::StorageWriter,
writer::UnifiedStorageWriter,
BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown,
ProviderError, StateWriter, StatsReader, TransactionVariant,
};
@ -361,7 +361,7 @@ where
let time = Instant::now();
// write output
let mut writer = StorageWriter::new(Some(provider), static_file_producer);
let mut writer = UnifiedStorageWriter::new(provider, static_file_producer);
writer.write_to_storage(state, OriginalValuesKnown::Yes)?;
let db_write_duration = time.elapsed();

View File

@ -13,7 +13,7 @@ use reth_primitives::{
use reth_provider::{
errors::provider::ProviderResult,
providers::{StaticFileProvider, StaticFileWriter},
writer::StorageWriter,
writer::UnifiedStorageWriter,
BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DatabaseProviderRW,
ExecutionOutcome, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError,
ProviderFactory, RevertsInit, StageCheckpointWriter, StateWriter, StaticFileProviderFactory,
@ -131,8 +131,9 @@ pub fn init_genesis<DB: Database>(factory: ProviderFactory<DB>) -> Result<B256,
let segment = StaticFileSegment::Transactions;
static_file_provider.latest_writer(segment)?.increment_block(0)?;
provider_rw.commit()?;
static_file_provider.commit()?;
// `commit_unwind`` will first commit the DB and then the static file provider, which is
// necessary on `init_genesis`.
UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?;
Ok(hash)
}
@ -210,7 +211,7 @@ pub fn insert_state<'a, 'b, DB: Database>(
Vec::new(),
);
let mut storage_writer = StorageWriter::new(Some(provider), None);
let mut storage_writer = UnifiedStorageWriter::from_database(provider);
storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?;
trace!(target: "reth::cli", "Inserted state");

View File

@ -147,7 +147,7 @@ pub enum ProviderError {
StorageLockError(#[from] crate::lockfile::StorageLockError),
/// Storage writer error.
#[error(transparent)]
StorageWriterError(#[from] crate::writer::StorageWriterError),
UnifiedStorageWriterError(#[from] crate::writer::UnifiedStorageWriterError),
}
impl From<reth_fs_util::FsPathError> for ProviderError {

View File

@ -1,12 +1,9 @@
use crate::db::DatabaseError;
use reth_primitives::StaticFileSegment;
/// `StorageWriter` related errors
/// `UnifiedStorageWriter` related errors
#[derive(Clone, Debug, thiserror_no_std::Error, PartialEq, Eq)]
pub enum StorageWriterError {
/// Database writer is missing
#[error("Database writer is missing")]
MissingDatabaseWriter,
pub enum UnifiedStorageWriterError {
/// Static file writer is missing
#[error("Static file writer is missing")]
MissingStaticFileWriter,

View File

@ -5,7 +5,7 @@ use crate::{
traits::{
AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter,
},
writer::StorageWriter,
writer::UnifiedStorageWriter,
AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader,
BlockReader, BlockWriter, BundleStateInit, EvmEnvProvider, FinalizedBlockReader,
FinalizedBlockWriter, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider,
@ -3570,7 +3570,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
Ok(block_indices)
}
/// TODO(joshie): this fn should be moved to `StorageWriter` eventually
/// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually
fn append_blocks_with_state(
&self,
blocks: Vec<SealedBlockWithSenders>,
@ -3600,7 +3600,7 @@ impl<TX: DbTxMut + DbTx> BlockWriter for DatabaseProvider<TX> {
// Must be written after blocks because of the receipt lookup.
// TODO: should _these_ be moved to storagewriter? seems like storagewriter should be
// _above_ db provider
let mut storage_writer = StorageWriter::new(Some(self), None);
let mut storage_writer = UnifiedStorageWriter::from_database(self);
storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?;
durations_recorder.record_relative(metrics::Action::InsertState);

View File

@ -21,7 +21,7 @@ use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_api::{
BlockNumReader, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt,
};
use reth_storage_errors::writer::StorageWriterError;
use reth_storage_errors::writer::UnifiedStorageWriterError;
use revm::db::OriginalValuesKnown;
use std::{borrow::Borrow, sync::Arc};
use tracing::{debug, instrument};
@ -35,46 +35,41 @@ enum StorageType<C = (), S = ()> {
StaticFile(S),
}
/// [`StorageWriter`] is responsible for managing the writing to either database, static file or
/// both.
/// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database
/// and static file providers.
#[derive(Debug)]
pub struct StorageWriter<'a, TX, SF> {
database: Option<&'a DatabaseProvider<TX>>,
pub struct UnifiedStorageWriter<'a, TX, SF> {
database: &'a DatabaseProvider<TX>,
static_file: Option<SF>,
}
impl<'a, TX, SF> StorageWriter<'a, TX, SF> {
/// Creates a new instance of [`StorageWriter`].
impl<'a, TX, SF> UnifiedStorageWriter<'a, TX, SF> {
/// Creates a new instance of [`UnifiedStorageWriter`].
///
/// # Parameters
/// - `database`: An optional reference to a database provider.
/// - `static_file`: An optional mutable reference to a static file instance.
pub const fn new(database: Option<&'a DatabaseProvider<TX>>, static_file: Option<SF>) -> Self {
pub const fn new(database: &'a DatabaseProvider<TX>, static_file: Option<SF>) -> Self {
Self { database, static_file }
}
/// Creates a new instance of [`StorageWriter`] from a database provider and a static file
/// instance.
/// Creates a new instance of [`UnifiedStorageWriter`] from a database provider and a static
/// file instance.
pub const fn from(database: &'a DatabaseProvider<TX>, static_file: SF) -> Self {
Self::new(Some(database), Some(static_file))
Self::new(database, Some(static_file))
}
/// Creates a new instance of [`StorageWriter`] from a static file instance.
pub const fn from_static_file(static_file: SF) -> Self {
Self::new(None, Some(static_file))
}
/// Creates a new instance of [`StorageWriter`] from a database provider.
/// Creates a new instance of [`UnifiedStorageWriter`] from a database provider.
pub const fn from_database(database: &'a DatabaseProvider<TX>) -> Self {
Self::new(Some(database), None)
Self::new(database, None)
}
/// Returns a reference to the database writer.
///
/// # Panics
/// If the database provider is not set.
fn database(&self) -> &DatabaseProvider<TX> {
self.database.as_ref().expect("should exist")
const fn database(&self) -> &DatabaseProvider<TX> {
self.database
}
/// Returns a reference to the static file instance.
@ -93,35 +88,28 @@ impl<'a, TX, SF> StorageWriter<'a, TX, SF> {
self.static_file.as_mut().expect("should exist")
}
/// Ensures that the database provider is set.
///
/// # Returns
/// - `Ok(())` if the database provider is set.
/// - `Err(StorageWriterError::MissingDatabaseWriter)` if the database provider is not set.
const fn ensure_database(&self) -> Result<(), StorageWriterError> {
if self.database.is_none() {
return Err(StorageWriterError::MissingDatabaseWriter)
}
Ok(())
}
/// Ensures that the static file instance is set.
///
/// # Returns
/// - `Ok(())` if the static file instance is set.
/// - `Err(StorageWriterError::MissingStaticFileWriter)` if the static file instance is not set.
#[allow(unused)]
const fn ensure_static_file(&self) -> Result<(), StorageWriterError> {
const fn ensure_static_file(&self) -> Result<(), UnifiedStorageWriterError> {
if self.static_file.is_none() {
return Err(StorageWriterError::MissingStaticFileWriter)
return Err(UnifiedStorageWriterError::MissingStaticFileWriter)
}
Ok(())
}
}
impl StorageWriter<'_, (), ()> {
impl UnifiedStorageWriter<'_, (), ()> {
/// Commits both storage types in the right order.
///
/// For non-unwinding operations it makes more sense to commit the static files first, since if
/// it is interrupted before the database commit, we can just truncate
/// the static files according to the checkpoints on the next
/// start-up.
///
/// NOTE: If unwinding data from storage, use `commit_unwind` instead!
pub fn commit<DB: Database>(
database: DatabaseProviderRW<DB>,
@ -134,6 +122,11 @@ impl StorageWriter<'_, (), ()> {
/// Commits both storage types in the right order for an unwind operation.
///
/// For unwinding it makes more sense to commit the database first, since if
/// it is interrupted before the static files commit, we can just
/// truncate the static files according to the
/// checkpoints on the next start-up.
///
/// NOTE: Should only be used after unwinding data from storage!
pub fn commit_unwind<DB: Database>(
database: DatabaseProviderRW<DB>,
@ -145,7 +138,7 @@ impl StorageWriter<'_, (), ()> {
}
}
impl<'a, 'b, TX> StorageWriter<'a, TX, &'b StaticFileProvider>
impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, &'b StaticFileProvider>
where
TX: DbTxMut + DbTx,
{
@ -164,14 +157,11 @@ where
// Only write receipts to static files if there is no receipt pruning configured.
let mut state_writer = if self.database().prune_modes_ref().has_receipts_pruning() {
StorageWriter::from_database(self.database())
UnifiedStorageWriter::from_database(self.database())
} else {
StorageWriter::new(
Some(self.database()),
Some(
self.static_file()
.get_writer(first_block.number, StaticFileSegment::Receipts)?,
),
UnifiedStorageWriter::from(
self.database(),
self.static_file().get_writer(first_block.number, StaticFileSegment::Receipts)?,
)
};
@ -226,7 +216,7 @@ where
{
let header_writer =
self.static_file().get_writer(block.number, StaticFileSegment::Headers)?;
let mut storage_writer = StorageWriter::new(Some(self.database()), Some(header_writer));
let mut storage_writer = UnifiedStorageWriter::from(self.database(), header_writer);
let td = storage_writer.append_headers_from_blocks(
block.header().number,
std::iter::once(&(block.header(), block.hash())),
@ -244,7 +234,7 @@ where
let transactions_writer =
self.static_file().get_writer(block.number, StaticFileSegment::Transactions)?;
let mut storage_writer =
StorageWriter::new(Some(self.database()), Some(transactions_writer));
UnifiedStorageWriter::from(self.database(), transactions_writer);
let no_hash_transactions =
block.body.clone().into_iter().map(TransactionSignedNoHash::from).collect();
storage_writer.append_transactions_from_blocks(
@ -299,7 +289,7 @@ where
}
}
impl<'a, 'b, TX> StorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
where
TX: DbTx,
{
@ -311,11 +301,11 @@ where
fn ensure_static_file_segment(
&self,
segment: StaticFileSegment,
) -> Result<(), StorageWriterError> {
) -> Result<(), UnifiedStorageWriterError> {
match &self.static_file {
Some(writer) => {
if writer.user_header().segment() != segment {
Err(StorageWriterError::IncorrectStaticFileWriter(
Err(UnifiedStorageWriterError::IncorrectStaticFileWriter(
writer.user_header().segment(),
segment,
))
@ -323,7 +313,7 @@ where
Ok(())
}
}
None => Err(StorageWriterError::MissingStaticFileWriter),
None => Err(UnifiedStorageWriterError::MissingStaticFileWriter),
}
}
@ -331,8 +321,8 @@ where
/// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the
/// total difficulty of the parent block during header insertion.
///
/// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for
/// the Headers segment.
/// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a
/// writer for the Headers segment.
pub fn append_headers_from_blocks<H, I>(
&mut self,
initial_block_number: BlockNumber,
@ -342,7 +332,6 @@ where
I: Borrow<(H, B256)>,
H: Borrow<Header>,
{
self.ensure_database()?;
self.ensure_static_file_segment(StaticFileSegment::Headers)?;
let mut td = self
@ -364,8 +353,8 @@ where
/// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number
/// when appending to static files.
///
/// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for
/// the Transactions segment.
/// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a
/// writer for the Transactions segment.
pub fn append_transactions_from_blocks<T>(
&mut self,
initial_block_number: BlockNumber,
@ -374,7 +363,6 @@ where
where
T: Borrow<Vec<TransactionSignedNoHash>>,
{
self.ensure_database()?;
self.ensure_static_file_segment(StaticFileSegment::Transactions)?;
let mut bodies_cursor =
@ -412,17 +400,18 @@ where
}
}
impl<'a, 'b, TX> StorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
impl<'a, 'b, TX> UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
where
TX: DbTxMut + DbTx,
{
/// Appends receipts block by block.
///
/// ATTENTION: If called from [`StorageWriter`] without a static file producer, it will always
/// write them to database. Otherwise, it will look into the pruning configuration to decide.
/// ATTENTION: If called from [`UnifiedStorageWriter`] without a static file producer, it will
/// always write them to database. Otherwise, it will look into the pruning configuration to
/// decide.
///
/// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for
/// the Receipts segment.
/// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a
/// writer for the Receipts segment.
///
/// # Parameters
/// - `initial_block_number`: The starting block number.
@ -433,13 +422,12 @@ where
initial_block_number: BlockNumber,
blocks: impl Iterator<Item = Vec<Option<reth_primitives::Receipt>>>,
) -> ProviderResult<()> {
self.ensure_database()?;
let mut bodies_cursor =
self.database().tx_ref().cursor_read::<tables::BlockBodyIndices>()?;
// We write receipts to database in two situations:
// * If we are in live sync. In this case, `StorageWriter` is built without a static file
// writer.
// * If we are in live sync. In this case, `UnifiedStorageWriter` is built without a static
// file writer.
// * If there is any kind of receipt pruning
let mut storage_type = if self.static_file.is_none() ||
self.database().prune_modes_ref().has_receipts_pruning()
@ -493,7 +481,7 @@ where
}
}
impl<'a, 'b, TX> StateWriter for StorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
impl<'a, 'b, TX> StateWriter for UnifiedStorageWriter<'a, TX, StaticFileProviderRWRefMut<'b>>
where
TX: DbTxMut + DbTx,
{
@ -504,7 +492,6 @@ where
execution_outcome: ExecutionOutcome,
is_value_known: OriginalValuesKnown,
) -> ProviderResult<()> {
self.ensure_database()?;
let (plain_state, reverts) =
execution_outcome.bundle.into_plain_state_and_reverts(is_value_known);
@ -791,7 +778,7 @@ mod tests {
let outcome =
ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");
@ -892,7 +879,7 @@ mod tests {
state.merge_transitions(BundleRetention::Reverts);
let outcome =
ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");
@ -960,7 +947,7 @@ mod tests {
let outcome =
ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");
@ -1108,7 +1095,7 @@ mod tests {
let bundle = state.take_bundle();
let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");
@ -1274,7 +1261,7 @@ mod tests {
init_state.merge_transitions(BundleRetention::Reverts);
let outcome =
ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");
@ -1322,7 +1309,7 @@ mod tests {
state.merge_transitions(BundleRetention::Reverts);
let outcome =
ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new());
let mut writer = StorageWriter::new(Some(&provider), None);
let mut writer = UnifiedStorageWriter::from_database(&provider);
writer
.write_to_storage(outcome, OriginalValuesKnown::Yes)
.expect("Could not write bundle state to DB");