clippy: rm some type_complexity (#9276)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
Thomas Coratger
2024-07-03 21:17:33 +02:00
committed by GitHub
parent 4f3f5067ce
commit ba370918c8
5 changed files with 35 additions and 15 deletions

View File

@ -88,6 +88,18 @@ const MAX_INVALID_HEADERS: u32 = 512u32;
/// If the distance exceeds this threshold, the pipeline will be used for sync. /// If the distance exceeds this threshold, the pipeline will be used for sync.
pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS;
/// Represents a pending forkchoice update.
///
/// This type encapsulates the necessary components for a pending forkchoice update
/// in the context of a beacon consensus engine.
///
/// It consists of:
/// - The current fork choice state.
/// - Optional payload attributes specific to the engine type.
/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update.
type PendingForkchoiceUpdate<PayloadAttributes> =
(ForkchoiceState, Option<PayloadAttributes>, oneshot::Sender<RethResult<OnForkChoiceUpdated>>);
/// The beacon consensus engine is the driver that switches between historical and live sync. /// The beacon consensus engine is the driver that switches between historical and live sync.
/// ///
/// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are /// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are
@ -189,12 +201,7 @@ where
/// It is recorded if we cannot process the forkchoice update because /// It is recorded if we cannot process the forkchoice update because
/// a hook with database read-write access is active. /// a hook with database read-write access is active.
/// This is a temporary solution to always process missed FCUs. /// This is a temporary solution to always process missed FCUs.
#[allow(clippy::type_complexity)] pending_forkchoice_update: Option<PendingForkchoiceUpdate<EngineT::PayloadAttributes>>,
pending_forkchoice_update: Option<(
ForkchoiceState,
Option<EngineT::PayloadAttributes>,
oneshot::Sender<RethResult<OnForkChoiceUpdated>>,
)>,
/// Tracks the header of invalid payloads that were rejected by the engine because they're /// Tracks the header of invalid payloads that were rejected by the engine because they're
/// invalid. /// invalid.
invalid_headers: InvalidHeaderCache, invalid_headers: InvalidHeaderCache,

View File

@ -164,6 +164,14 @@ where
} }
} }
/// Type alias for the items stored in the heap of [`EtlIter`].
///
/// Each item in the heap is a tuple containing:
/// - A `Reverse` tuple of a key-value pair (`Vec<u8>, Vec<u8>`), used to maintain the heap in
/// ascending order of keys.
/// - An index (`usize`) representing the source file from which the key-value pair was read.
type HeapItem = (Reverse<(Vec<u8>, Vec<u8>)>, usize);
/// `EtlIter` is an iterator for traversing through sorted key-value pairs in a collection of ETL /// `EtlIter` is an iterator for traversing through sorted key-value pairs in a collection of ETL
/// files. These files are created using the [`Collector`] and contain data where keys are encoded /// files. These files are created using the [`Collector`] and contain data where keys are encoded
/// and values are compressed. /// and values are compressed.
@ -174,8 +182,7 @@ where
#[derive(Debug)] #[derive(Debug)]
pub struct EtlIter<'a> { pub struct EtlIter<'a> {
/// Heap managing the next items to be iterated. /// Heap managing the next items to be iterated.
#[allow(clippy::type_complexity)] heap: BinaryHeap<HeapItem>,
heap: BinaryHeap<(Reverse<(Vec<u8>, Vec<u8>)>, usize)>,
/// Reference to the vector of ETL files being iterated over. /// Reference to the vector of ETL files being iterated over.
files: &'a mut Vec<EtlFile>, files: &'a mut Vec<EtlFile>,
} }

View File

@ -130,7 +130,6 @@ where
/// Generates two batches. The first is to be inserted into the database before running the /// Generates two batches. The first is to be inserted into the database before running the
/// benchmark. The second is to be benchmarked with. /// benchmark. The second is to be benchmarked with.
#[allow(clippy::type_complexity)]
fn generate_batches<T>(size: usize) -> (Vec<TableRow<T>>, Vec<TableRow<T>>) fn generate_batches<T>(size: usize) -> (Vec<TableRow<T>>, Vec<TableRow<T>>)
where where
T: Table, T: Table,

View File

@ -10,6 +10,9 @@ use std::sync::Arc;
#[derive(Debug, Deref, DerefMut)] #[derive(Debug, Deref, DerefMut)]
pub struct StaticFileCursor<'a>(NippyJarCursor<'a, SegmentHeader>); pub struct StaticFileCursor<'a>(NippyJarCursor<'a, SegmentHeader>);
/// Type alias for column results with optional values.
type ColumnResult<T> = ProviderResult<Option<T>>;
impl<'a> StaticFileCursor<'a> { impl<'a> StaticFileCursor<'a> {
/// Returns a new [`StaticFileCursor`]. /// Returns a new [`StaticFileCursor`].
pub fn new(jar: &'a NippyJar<SegmentHeader>, reader: Arc<DataReader>) -> ProviderResult<Self> { pub fn new(jar: &'a NippyJar<SegmentHeader>, reader: Arc<DataReader>) -> ProviderResult<Self> {
@ -56,7 +59,7 @@ impl<'a> StaticFileCursor<'a> {
pub fn get_one<M: ColumnSelectorOne>( pub fn get_one<M: ColumnSelectorOne>(
&mut self, &mut self,
key_or_num: KeyOrNumber<'_>, key_or_num: KeyOrNumber<'_>,
) -> ProviderResult<Option<M::FIRST>> { ) -> ColumnResult<M::FIRST> {
let row = self.get(key_or_num, M::MASK)?; let row = self.get(key_or_num, M::MASK)?;
match row { match row {
@ -69,7 +72,7 @@ impl<'a> StaticFileCursor<'a> {
pub fn get_two<M: ColumnSelectorTwo>( pub fn get_two<M: ColumnSelectorTwo>(
&mut self, &mut self,
key_or_num: KeyOrNumber<'_>, key_or_num: KeyOrNumber<'_>,
) -> ProviderResult<Option<(M::FIRST, M::SECOND)>> { ) -> ColumnResult<(M::FIRST, M::SECOND)> {
let row = self.get(key_or_num, M::MASK)?; let row = self.get(key_or_num, M::MASK)?;
match row { match row {
@ -79,11 +82,10 @@ impl<'a> StaticFileCursor<'a> {
} }
/// Gets three column values from a row. /// Gets three column values from a row.
#[allow(clippy::type_complexity)]
pub fn get_three<M: ColumnSelectorThree>( pub fn get_three<M: ColumnSelectorThree>(
&mut self, &mut self,
key_or_num: KeyOrNumber<'_>, key_or_num: KeyOrNumber<'_>,
) -> ProviderResult<Option<(M::FIRST, M::SECOND, M::THIRD)>> { ) -> ColumnResult<(M::FIRST, M::SECOND, M::THIRD)> {
let row = self.get(key_or_num, M::MASK)?; let row = self.get(key_or_num, M::MASK)?;
match row { match row {

View File

@ -30,6 +30,12 @@ use reth_tracing::{RethTracer, Tracer};
use schnellru::{ByLength, LruMap}; use schnellru::{ByLength, LruMap};
use std::{collections::HashMap, sync::Arc}; use std::{collections::HashMap, sync::Arc};
/// Type alias for the LRU cache used within the [`PrecompileCache`].
type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>;
/// Type alias for the thread-safe `Arc<RwLock<_>>` wrapper around [`PrecompileCache`].
type CachedPrecompileResult = Arc<RwLock<PrecompileLRUCache>>;
/// A cache for precompile inputs / outputs. /// A cache for precompile inputs / outputs.
/// ///
/// This assumes that the precompile is a standard precompile, as in `StandardPrecompileFn`, meaning /// This assumes that the precompile is a standard precompile, as in `StandardPrecompileFn`, meaning
@ -40,8 +46,7 @@ use std::{collections::HashMap, sync::Arc};
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct PrecompileCache { pub struct PrecompileCache {
/// Caches for each precompile input / output. /// Caches for each precompile input / output.
#[allow(clippy::type_complexity)] cache: HashMap<(Address, SpecId), CachedPrecompileResult>,
cache: HashMap<(Address, SpecId), Arc<RwLock<LruMap<(Bytes, u64), PrecompileResult>>>>,
} }
/// Custom EVM configuration /// Custom EVM configuration