Enable clippy's useless_let_if_seq linter (#7220)

Co-authored-by: Alexey Shekhirin <a.shekhirin@gmail.com>
This commit is contained in:
Justin Traglia
2024-03-19 11:33:22 -05:00
committed by GitHub
parent 1bf5d6a53a
commit 709d8a16d3
12 changed files with 56 additions and 44 deletions

View File

@ -95,6 +95,7 @@ clippy.uninlined_format_args = "deny"
clippy.equatable_if_let = "deny"
clippy.or_fun_call = "deny"
clippy.branches_sharing_code = "deny"
clippy.useless_let_if_seq = "deny"
[workspace.package]
version = "0.2.0-beta.3"

View File

@ -449,16 +449,17 @@ impl StorageInner {
let Block { header, body, .. } = block.block;
let body = BlockBody { transactions: body, ommers: vec![], withdrawals: None };
let mut blob_gas_used = None;
if chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
let blob_gas_used = if chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
let mut sum_blob_gas_used = 0;
for tx in &body.transactions {
if let Some(blob_tx) = tx.transaction.as_eip4844() {
sum_blob_gas_used += blob_tx.blob_gas();
}
}
blob_gas_used = Some(sum_blob_gas_used);
}
Some(sum_blob_gas_used)
} else {
None
};
trace!(target: "consensus::auto", ?bundle_state, ?header, ?body, "executed block, calculating state root and completing header");

View File

@ -7,6 +7,7 @@
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(clippy::useless_let_if_seq)]
use reth_basic_payload_builder::{
commit_withdrawals, is_better_payload, pre_block_beacon_root_contract_call, BuildArguments,

View File

@ -7,6 +7,7 @@
)]
#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(clippy::useless_let_if_seq)]
#[cfg(feature = "optimism")]
pub use builder::*;

View File

@ -652,11 +652,12 @@ impl SealedHeader {
chain_spec: &ChainSpec,
) -> Result<(), HeaderValidationError> {
// Determine the parent gas limit, considering elasticity multiplier on the London fork.
let mut parent_gas_limit = parent.gas_limit;
if chain_spec.fork(Hardfork::London).transitions_at_block(self.number) {
parent_gas_limit =
parent.gas_limit * chain_spec.base_fee_params(self.timestamp).elasticity_multiplier;
}
let parent_gas_limit =
if chain_spec.fork(Hardfork::London).transitions_at_block(self.number) {
parent.gas_limit * chain_spec.base_fee_params(self.timestamp).elasticity_multiplier
} else {
parent.gas_limit
};
// Check for an increase in gas limit beyond the allowed threshold.
if self.gas_limit > parent_gas_limit {

View File

@ -186,13 +186,14 @@ where
}
// sort results then take the configured percentile result
let mut price = inner.last_price.price;
if !results.is_empty() {
let mut price = if !results.is_empty() {
results.sort_unstable();
price = *results
.get((results.len() - 1) * self.oracle_config.percentile as usize / 100)
.expect("gas price index is a percent of nonzero array length, so a value always exists");
}
*results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect(
"gas price index is a percent of nonzero array length, so a value always exists",
)
} else {
inner.last_price.price
};
// constrain to the max price
if let Some(max_price) = self.oracle_config.max_price {

View File

@ -126,10 +126,13 @@ impl<EF: ExecutorFactory> ExecutionStage<EF> {
let static_file_provider = provider.static_file_provider();
// We only use static files for Receipts, if there is no receipt pruning of any kind.
let mut static_file_producer = None;
if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() {
static_file_producer = Some(prepare_static_file_producer(provider, start_block)?);
}
let static_file_producer = if self.prune_modes.receipts.is_none() &&
self.prune_modes.receipts_log_filter.is_empty()
{
Some(prepare_static_file_producer(provider, start_block)?)
} else {
None
};
// Build executor
let mut executor = self.executor_factory.with_state(LatestStateProviderRef::new(

View File

@ -96,15 +96,16 @@ impl<DB: Database> Segment<DB> for Headers {
// Generate list of hashes for filters & PHF
let mut cursor = provider.tx_ref().cursor_read::<RawTable<tables::CanonicalHeaders>>()?;
let mut hashes = None;
if config.filters.has_filters() {
hashes = Some(
let hashes = if config.filters.has_filters() {
Some(
cursor
.walk(Some(RawKey::from(*block_range.start())))?
.take(range_len)
.map(|row| row.map(|(_key, value)| value.into_value()).map_err(|e| e.into())),
);
}
)
} else {
None
};
create_static_file_T1_T2_T3::<
tables::Headers,

View File

@ -81,15 +81,16 @@ impl<DB: Database> Segment<DB> for Receipts {
)?;
// Generate list of hashes for filters & PHF
let mut hashes = None;
if config.filters.has_filters() {
hashes = Some(
let hashes = if config.filters.has_filters() {
Some(
provider
.transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))?
.into_iter()
.map(|(tx, _)| Ok(tx)),
);
}
)
} else {
None
};
create_static_file_T1::<tables::Receipts, TxNumber, SegmentHeader>(
provider.tx_ref(),

View File

@ -85,15 +85,16 @@ impl<DB: Database> Segment<DB> for Transactions {
)?;
// Generate list of hashes for filters & PHF
let mut hashes = None;
if config.filters.has_filters() {
hashes = Some(
let hashes = if config.filters.has_filters() {
Some(
provider
.transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))?
.into_iter()
.map(|(tx, _)| Ok(tx)),
);
}
)
} else {
None
};
create_static_file_T1::<tables::Transactions, TxNumber, SegmentHeader>(
provider.tx_ref(),

View File

@ -45,12 +45,13 @@ impl Compact for AccountBeforeTx {
let address = Address::from_slice(&buf[..20]);
buf.advance(20);
let mut info = None;
if len - 20 > 0 {
let info = if len - 20 > 0 {
let (acc, advanced_buf) = Account::from_compact(buf, len - 20);
buf = advanced_buf;
info = Some(acc);
}
Some(acc)
} else {
None
};
(Self { address, info }, buf)
}

View File

@ -425,15 +425,14 @@ impl<H: NippyJarHeader> NippyJarWriter<H> {
fn commit_offsets_inner(&mut self) -> Result<(), NippyJarError> {
// The last offset on disk can be the first offset of `self.offsets` given how
// `append_column()` works alongside commit. So we need to skip it.
let mut last_offset_ondisk = None;
if self.offsets_file.get_ref().metadata()?.len() > 1 {
let mut last_offset_ondisk = if self.offsets_file.get_ref().metadata()?.len() > 1 {
self.offsets_file.seek(SeekFrom::End(-(OFFSET_SIZE_BYTES as i64)))?;
let mut buf = [0u8; OFFSET_SIZE_BYTES as usize];
self.offsets_file.get_ref().read_exact(&mut buf)?;
last_offset_ondisk = Some(u64::from_le_bytes(buf));
}
Some(u64::from_le_bytes(buf))
} else {
None
};
self.offsets_file.seek(SeekFrom::End(0))?;