diff --git a/Cargo.toml b/Cargo.toml index 20038743c..8ada546aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,10 +218,6 @@ alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "410850b" } ethers-core = { version = "2.0.14", default-features = false } ethers-providers = { version = "2.0.14", default-features = false } -# js -boa_engine = "0.17" -boa_gc = "0.17" - # misc aquamarine = "0.5" bytes = "1.5" diff --git a/bin/reth/src/commands/db/static_files/headers.rs b/bin/reth/src/commands/db/static_files/headers.rs index 5ce8f8d44..7584f614c 100644 --- a/bin/reth/src/commands/db/static_files/headers.rs +++ b/bin/reth/src/commands/db/static_files/headers.rs @@ -91,15 +91,15 @@ impl Command { }, |provider| { Ok(provider - .header_by_number(num as u64)? - .ok_or(ProviderError::HeaderNotFound((num as u64).into()))?) + .header_by_number(num)? + .ok_or(ProviderError::HeaderNotFound((num).into()))?) }, )?; } // BENCHMARK QUERYING A RANDOM HEADER BY HASH { - let num = row_indexes[rng.gen_range(0..row_indexes.len())] as u64; + let num = row_indexes[rng.gen_range(0..row_indexes.len())]; let header_hash = provider_factory .header_by_number(num)? .ok_or(ProviderError::HeaderNotFound(num.into()))? diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index a252c4f4d..f59e9e149 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -141,7 +141,7 @@ impl ImportCommand { tokio::select! { res = pipeline.run() => res?, _ = tokio::signal::ctrl_c() => {}, - }; + } info!(target: "reth::cli", "Finishing up"); Ok(()) diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 63c2279a1..f015cd599 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -261,7 +261,7 @@ impl Command { _ => return Ok(()), }; if let Some(unwind_stage) = &unwind_stage { - assert!(exec_stage.type_id() == unwind_stage.type_id()); + assert_eq!(exec_stage.type_id(), unwind_stage.type_id()); } let checkpoint = provider_rw.get_stage_checkpoint(exec_stage.id())?.unwrap_or_default(); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index fdfc393f0..e925f6c64 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1128,13 +1128,13 @@ where if let Err((_hash, error)) = self.try_make_sync_target_canonical(block_num_hash) { - if error.is_fatal() { + return if error.is_fatal() { error!(target: "consensus::engine", %error, "Encountered fatal error"); - return Err(BeaconOnNewPayloadError::Internal(Box::new(error))) + Err(BeaconOnNewPayloadError::Internal(Box::new(error))) } else { // If we could not make the sync target block canonical, we // should return the error as an invalid payload status. - return Ok(PayloadStatus::new( + Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: error.to_string(), }, @@ -2006,7 +2006,7 @@ mod tests { let mut rx = spawn_consensus_engine(consensus_engine); // consensus engine is idle - std::thread::sleep(Duration::from_millis(100)); + tokio::time::sleep(Duration::from_millis(100)).await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received diff --git a/crates/net/common/src/bandwidth_meter.rs b/crates/net/common/src/bandwidth_meter.rs index cc61d0c07..ba24ffff7 100644 --- a/crates/net/common/src/bandwidth_meter.rs +++ b/crates/net/common/src/bandwidth_meter.rs @@ -134,7 +134,7 @@ impl AsyncRead for MeteredStream { this.meter .inner .inbound - .fetch_add(u64::try_from(num_bytes).unwrap_or(u64::max_value()), Ordering::Relaxed); + .fetch_add(u64::try_from(num_bytes).unwrap_or(u64::MAX), Ordering::Relaxed); Poll::Ready(Ok(())) } } @@ -150,7 +150,7 @@ impl AsyncWrite for MeteredStream { this.meter .inner .outbound - .fetch_add(u64::try_from(num_bytes).unwrap_or(u64::max_value()), Ordering::Relaxed); + .fetch_add(u64::try_from(num_bytes).unwrap_or(u64::MAX), Ordering::Relaxed); Poll::Ready(Ok(num_bytes)) } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 4920389bc..8c5f0394b 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -590,7 +590,7 @@ impl Discv4Service { /// Returns the current enr sequence of the local record. fn enr_seq(&self) -> Option { - (self.config.enable_eip868).then(|| self.local_eip_868_enr.seq()) + self.config.enable_eip868.then(|| self.local_eip_868_enr.seq()) } /// Sets the [Interval] used for periodically looking up targets over the network diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index 9491e34f0..6e442f0b5 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -54,13 +54,13 @@ pub struct BlockHeaders( #[cfg(any(test, feature = "arbitrary"))] impl proptest::arbitrary::Arbitrary for BlockHeaders { type Parameters = (); - type Strategy = proptest::prelude::BoxedStrategy; - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { let headers_strategy = vec(valid_header_strategy(), 0..10); // Adjust the range as needed headers_strategy.prop_map(BlockHeaders).boxed() } + + type Strategy = proptest::prelude::BoxedStrategy; } #[cfg(any(test, feature = "arbitrary"))] diff --git a/crates/net/eth-wire/src/types/receipts.rs b/crates/net/eth-wire/src/types/receipts.rs index 5d177e0a1..41d40d9c4 100644 --- a/crates/net/eth-wire/src/types/receipts.rs +++ b/crates/net/eth-wire/src/types/receipts.rs @@ -63,7 +63,7 @@ mod tests { let mut out = out.as_slice(); let decoded = Receipts::decode(&mut out).unwrap(); - assert!(receipts == decoded); + assert_eq!(receipts, decoded); } #[test] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 89ff0c06e..83226397c 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -691,10 +691,10 @@ impl TransactionFetcher { // try to send the request to the peer if let Err(err) = peer.request_tx.try_send(req) { // peer channel is full - match err { + return match err { TrySendError::Full(_) | TrySendError::Closed(_) => { self.metrics.egress_peer_channel_full.increment(1); - return Some(new_announced_hashes) + Some(new_announced_hashes) } } } else { diff --git a/crates/node-core/src/args/secret_key.rs b/crates/node-core/src/args/secret_key.rs index e08f8caaf..b93d909b4 100644 --- a/crates/node-core/src/args/secret_key.rs +++ b/crates/node-core/src/args/secret_key.rs @@ -37,7 +37,9 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result { let contents = fs::read_to_string(secret_key_path)?; - Ok((contents.as_str().parse::()) + Ok(contents + .as_str() + .parse::() .map_err(SecretKeyError::SecretKeyDecodeError)?) } Ok(false) => { diff --git a/crates/node-core/src/events/cl.rs b/crates/node-core/src/events/cl.rs index 38f8c819e..6d9c2cb99 100644 --- a/crates/node-core/src/events/cl.rs +++ b/crates/node-core/src/events/cl.rs @@ -66,12 +66,12 @@ impl Stream for ConsensusLayerHealthEvents { if let Some(transition_config) = this.canon_chain.last_exchanged_transition_configuration_timestamp() { - if transition_config.elapsed() <= NO_TRANSITION_CONFIG_EXCHANGED_PERIOD { + return if transition_config.elapsed() <= NO_TRANSITION_CONFIG_EXCHANGED_PERIOD { // We never had an FCU, but had a transition config exchange, and it's recent. - return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverReceivedUpdates)) + Poll::Ready(Some(ConsensusLayerHealthEvent::NeverReceivedUpdates)) } else { // We never had an FCU, but had a transition config exchange, but it's too old. - return Poll::Ready(Some(ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile( + Poll::Ready(Some(ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile( transition_config.elapsed(), ))) } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 07faf63d4..c583b7c21 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -595,9 +595,10 @@ impl ChainSpec { // If shanghai is activated, initialize the header with an empty withdrawals hash, and // empty withdrawals list. - let withdrawals_root = - (self.fork(Hardfork::Shanghai).active_at_timestamp(self.genesis.timestamp)) - .then_some(EMPTY_WITHDRAWALS); + let withdrawals_root = self + .fork(Hardfork::Shanghai) + .active_at_timestamp(self.genesis.timestamp) + .then_some(EMPTY_WITHDRAWALS); // If Cancun is activated at genesis, we set: // * parent beacon block root to 0x0 @@ -647,7 +648,7 @@ impl ChainSpec { let genesis_base_fee = self.genesis.base_fee_per_gas.unwrap_or(EIP1559_INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. - (self.fork(Hardfork::London).active_at_block(0)).then_some(genesis_base_fee) + self.fork(Hardfork::London).active_at_block(0).then_some(genesis_base_fee) } /// Get the [BaseFeeParams] for the chain at the given timestamp. diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index d4caa8186..8448b309e 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -234,7 +234,7 @@ impl proptest::arbitrary::Arbitrary for Receipt { deposit_receipt_version } } - }; + } arbitrary_receipt().boxed() } diff --git a/crates/primitives/src/revm/config.rs b/crates/primitives/src/revm/config.rs index 0dcadaa03..672910e56 100644 --- a/crates/primitives/src/revm/config.rs +++ b/crates/primitives/src/revm/config.rs @@ -10,14 +10,14 @@ pub fn revm_spec_by_timestamp_after_merge( ) -> revm_primitives::SpecId { #[cfg(feature = "optimism")] if chain_spec.is_optimism() { - if chain_spec.fork(Hardfork::Ecotone).active_at_timestamp(timestamp) { - return revm_primitives::ECOTONE + return if chain_spec.fork(Hardfork::Ecotone).active_at_timestamp(timestamp) { + revm_primitives::ECOTONE } else if chain_spec.fork(Hardfork::Canyon).active_at_timestamp(timestamp) { - return revm_primitives::CANYON + revm_primitives::CANYON } else if chain_spec.fork(Hardfork::Regolith).active_at_timestamp(timestamp) { - return revm_primitives::REGOLITH + revm_primitives::REGOLITH } else { - return revm_primitives::BEDROCK + revm_primitives::BEDROCK } } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 8344d674c..f78fe7fea 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -624,8 +624,6 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { #[cfg(any(test, feature = "arbitrary"))] impl proptest::arbitrary::Arbitrary for PooledTransactionsElement { type Parameters = (); - type Strategy = proptest::strategy::BoxedStrategy; - fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::{any, Strategy}; @@ -646,6 +644,8 @@ impl proptest::arbitrary::Arbitrary for PooledTransactionsElement { }) .boxed() } + + type Strategy = proptest::strategy::BoxedStrategy; } /// A signed pooled transaction with recovered signer. diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 3c51092fd..b5e0cc1de 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -91,13 +91,14 @@ impl Pruner { // `self.prune_max_blocks_per_run`. // // Also see docs for `self.previous_tip_block_number`. - let blocks_since_last_run = - (self.previous_tip_block_number.map_or(1, |previous_tip_block_number| { + let blocks_since_last_run = self + .previous_tip_block_number + .map_or(1, |previous_tip_block_number| { // Saturating subtraction is needed for the case when the chain was reverted, // meaning current block number might be less than the previous tip // block number. tip_block_number.saturating_sub(previous_tip_block_number) as usize - })) + }) .min(self.prune_max_blocks_per_run); let delete_limit = self.delete_limit * blocks_since_last_run; diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index 379431448..f0b24ef0a 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -42,7 +42,7 @@ impl Segment for TransactionLookup { } } .into_inner(); - let tx_range = start..=(end.min(start + input.delete_limit as u64 - 1)); + let tx_range = start..=end.min(start + input.delete_limit as u64 - 1); let tx_range_end = *tx_range.end(); // Retrieve transactions in the range and calculate their hashes in parallel diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 877dcd68c..332899ad9 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -331,7 +331,7 @@ async fn spawn_connection( // shutdown break } - }; + } } }); diff --git a/crates/rpc/rpc-types/src/eth/transaction/typed.rs b/crates/rpc/rpc-types/src/eth/transaction/typed.rs index d0569f6f2..cf0abc9c1 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/typed.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/typed.rs @@ -151,13 +151,13 @@ impl Encodable for TransactionKind { fn encode(&self, out: &mut dyn BufMut) { match self { TransactionKind::Call(to) => to.encode(out), - TransactionKind::Create => ([]).encode(out), + TransactionKind::Create => [].encode(out), } } fn length(&self) -> usize { match self { TransactionKind::Call(to) => to.length(), - TransactionKind::Create => ([]).length(), + TransactionKind::Create => [].length(), } } } diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index b3d609cbe..4e6c854b8 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -324,18 +324,18 @@ where return Ok(EthApi::gas_price(self).await?) } - /// Handler for: `eth_blobBaseFee` - async fn blob_base_fee(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); - return Ok(EthApi::blob_base_fee(self).await?) - } - /// Handler for: `eth_maxPriorityFeePerGas` async fn max_priority_fee_per_gas(&self) -> Result { trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); return Ok(EthApi::suggested_priority_fee(self).await?) } + /// Handler for: `eth_blobBaseFee` + async fn blob_base_fee(&self) -> Result { + trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); + return Ok(EthApi::blob_base_fee(self).await?) + } + // FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further // caching of it in the LRU cache. // When new RPC call is executed, the cache gets locked, we check it for the historical fees @@ -624,7 +624,7 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - (1).into(), + 1.into(), (newest_block + 1000).into(), Some(vec![10.0]), ) @@ -647,8 +647,8 @@ mod tests { let response = as EthApiServer>::fee_history( ð_api, - (0).into(), - (newest_block).into(), + 0.into(), + newest_block.into(), None, ) .await @@ -670,7 +670,7 @@ mod tests { let (eth_api, base_fees_per_gas, gas_used_ratios) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let fee_history = eth_api.fee_history(1, (newest_block).into(), None).await.unwrap(); + let fee_history = eth_api.fee_history(1, newest_block.into(), None).await.unwrap(); assert_eq!( &fee_history.base_fee_per_gas, &base_fees_per_gas[base_fees_per_gas.len() - 2..], @@ -708,7 +708,7 @@ mod tests { prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); let fee_history = - eth_api.fee_history(block_count, (newest_block).into(), None).await.unwrap(); + eth_api.fee_history(block_count, newest_block.into(), None).await.unwrap(); assert_eq!( &fee_history.base_fee_per_gas, &base_fees_per_gas, diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 2d3cb1474..b237a3256 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -167,7 +167,7 @@ where return Ok(()) } - while (canon_state.next().await).is_some() { + while canon_state.next().await.is_some() { let current_syncing = pubsub.network.is_syncing(); // Only send a new response if the sync status has changed if current_syncing != initial_sync_status { diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 13b8b4a53..91de1562a 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -115,7 +115,7 @@ impl AccountHashingStage { let mut acc_changeset_cursor = provider.tx_ref().cursor_write::()?; - for (t, (addr, acc)) in (opts.blocks).zip(&accounts) { + for (t, (addr, acc)) in opts.blocks.zip(&accounts) { let Account { nonce, balance, .. } = acc; let prev_acc = Account { nonce: nonce - 1, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 3b0bb84fb..298389f6c 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -287,7 +287,7 @@ mod tests { input.checkpoint = Some(checkpoint); continue } else { - assert!(checkpoint.block_number == previous_stage); + assert_eq!(checkpoint.block_number, previous_stage); assert_matches!(checkpoint.storage_hashing_stage_checkpoint(), Some(StorageHashingCheckpoint { progress: EntitiesCheckpoint { processed, diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 0e590737e..4cb91ccee 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -136,7 +136,7 @@ impl Stage for SenderRecoveryStage { let (tx_id, sender) = match recovered { Ok(result) => result, Err(error) => { - match *error { + return match *error { SenderRecoveryStageError::FailedRecovery(err) => { // get the block number for the bad transaction let block_number = tx @@ -148,14 +148,14 @@ impl Stage for SenderRecoveryStage { let sealed_header = provider .sealed_header(block_number)? .ok_or(ProviderError::HeaderNotFound(block_number.into()))?; - return Err(StageError::Block { + Err(StageError::Block { block: Box::new(sealed_header), error: BlockErrorKind::Validation( consensus::ConsensusError::TransactionSignerRecoveryError, ), }) } - SenderRecoveryStageError::StageError(err) => return Err(err), + SenderRecoveryStageError::StageError(err) => Err(err), } } }; diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index f7e2082c7..abc785edd 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -63,8 +63,9 @@ pub fn get_fields(data: &Data) -> FieldList { assert_eq!(fields.len(), data_fields.named.len()); } syn::Fields::Unnamed(ref data_fields) => { - assert!( - data_fields.unnamed.len() == 1, + assert_eq!( + data_fields.unnamed.len(), + 1, "Compact only allows one unnamed field. Consider making it a struct." ); load_field(&data_fields.unnamed[0], &mut fields, false); @@ -80,8 +81,9 @@ pub fn get_fields(data: &Data) -> FieldList { panic!("Not allowed to have Enum Variants with multiple named fields. Make it a struct instead.") } syn::Fields::Unnamed(data_fields) => { - assert!( - data_fields.unnamed.len() == 1, + assert_eq!( + data_fields.unnamed.len(), + 1, "Compact only allows one unnamed field. Consider making it a struct." ); load_field(&data_fields.unnamed[0], &mut fields, true); diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 09ecf20ef..a85c3dff3 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -503,7 +503,7 @@ mod tests { // GET let tx = env.tx().expect(ERROR_INIT_TX); let result = tx.get::(key).expect(ERROR_GET); - assert!(result.expect(ERROR_RETURN_VALUE) == value); + assert_eq!(result.expect(ERROR_RETURN_VALUE), value); tx.commit().expect(ERROR_COMMIT); } @@ -1131,9 +1131,9 @@ mod tests { let mut cursor = tx.cursor_dup_read::().unwrap(); // Notice that value11 and value22 have been ordered in the DB. - assert!(Some(value00) == cursor.next_dup_val().unwrap()); - assert!(Some(value11) == cursor.next_dup_val().unwrap()); - assert!(Some(value22) == cursor.next_dup_val().unwrap()); + assert_eq!(Some(value00), cursor.next_dup_val().unwrap()); + assert_eq!(Some(value11), cursor.next_dup_val().unwrap()); + assert_eq!(Some(value22), cursor.next_dup_val().unwrap()); } // Seek value with exact subkey diff --git a/crates/storage/db/src/tables/models/blocks.rs b/crates/storage/db/src/tables/models/blocks.rs index 905f336b5..62378612a 100644 --- a/crates/storage/db/src/tables/models/blocks.rs +++ b/crates/storage/db/src/tables/models/blocks.rs @@ -96,8 +96,9 @@ mod tests { let mut ommer = StoredBlockOmmers::default(); ommer.ommers.push(Header::default()); ommer.ommers.push(Header::default()); - assert!( - ommer.clone() == StoredBlockOmmers::decompress::>(ommer.compress()).unwrap() + assert_eq!( + ommer.clone(), + StoredBlockOmmers::decompress::>(ommer.compress()).unwrap() ); } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index bacb3021c..71a4ac7fd 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -146,7 +146,7 @@ impl Environment { where F: FnOnce(*mut ffi::MDBX_env) -> T, { - (f)(self.env_ptr()) + f(self.env_ptr()) } /// Flush the environment data buffers to disk. diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 21aba9fb8..2aa12600e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2134,32 +2134,40 @@ impl HashingWriter for DatabaseProvider { } impl HistoryWriter for DatabaseProvider { - fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()> { - // account history stage - { - let indices = self.changed_accounts_and_blocks_with_range(range.clone())?; - self.insert_account_history_index(indices)?; - } - - // storage history stage - { - let indices = self.changed_storages_and_blocks_with_range(range)?; - self.insert_storage_history_index(indices)?; - } - - Ok(()) - } - - fn insert_storage_history_index( + fn unwind_account_history_indices( &self, - storage_transitions: BTreeMap<(Address, B256), Vec>, - ) -> ProviderResult<()> { - self.append_history_index::<_, tables::StoragesHistory>( - storage_transitions, - |(address, storage_key), highest_block_number| { - StorageShardedKey::new(address, storage_key, highest_block_number) - }, - ) + range: RangeInclusive, + ) -> ProviderResult { + let mut last_indices = self + .tx + .cursor_read::()? + .walk_range(range)? + .map(|entry| entry.map(|(index, account)| (account.address, index))) + .collect::, _>>()?; + last_indices.sort_by_key(|(a, _)| *a); + + // Unwind the account history index. + let mut cursor = self.tx.cursor_write::()?; + for &(address, rem_index) in &last_indices { + let partial_shard = unwind_history_shards::<_, tables::AccountsHistory, _>( + &mut cursor, + ShardedKey::last(address), + rem_index, + |sharded_key| sharded_key.key == address, + )?; + + // Check the last returned partial shard. + // If it's not empty, the shard needs to be reinserted. + if !partial_shard.is_empty() { + cursor.insert( + ShardedKey::last(address), + BlockNumberList::new_pre_sorted(partial_shard), + )?; + } + } + + let changesets = last_indices.len(); + Ok(changesets) } fn insert_account_history_index( @@ -2212,40 +2220,32 @@ impl HistoryWriter for DatabaseProvider { Ok(changesets) } - fn unwind_account_history_indices( + fn insert_storage_history_index( &self, - range: RangeInclusive, - ) -> ProviderResult { - let mut last_indices = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(index, account)| (account.address, index))) - .collect::, _>>()?; - last_indices.sort_by_key(|(a, _)| *a); + storage_transitions: BTreeMap<(Address, B256), Vec>, + ) -> ProviderResult<()> { + self.append_history_index::<_, tables::StoragesHistory>( + storage_transitions, + |(address, storage_key), highest_block_number| { + StorageShardedKey::new(address, storage_key, highest_block_number) + }, + ) + } - // Unwind the account history index. - let mut cursor = self.tx.cursor_write::()?; - for &(address, rem_index) in &last_indices { - let partial_shard = unwind_history_shards::<_, tables::AccountsHistory, _>( - &mut cursor, - ShardedKey::last(address), - rem_index, - |sharded_key| sharded_key.key == address, - )?; - - // Check the last returned partial shard. - // If it's not empty, the shard needs to be reinserted. - if !partial_shard.is_empty() { - cursor.insert( - ShardedKey::last(address), - BlockNumberList::new_pre_sorted(partial_shard), - )?; - } + fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()> { + // account history stage + { + let indices = self.changed_accounts_and_blocks_with_range(range.clone())?; + self.insert_account_history_index(indices)?; } - let changesets = last_indices.len(); - Ok(changesets) + // storage history stage + { + let indices = self.changed_storages_and_blocks_with_range(range)?; + self.insert_storage_history_index(indices)?; + } + + Ok(()) } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index fbf36f9f9..6dc75e307 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -255,15 +255,6 @@ impl<'a> TransactionsProvider for StaticFileJarProvider<'a> { Err(ProviderError::UnsupportedProvider) } - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let txs = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(ProviderError::SenderRecoveryError) - } - fn transactions_by_tx_range( &self, range: impl RangeBounds, @@ -282,6 +273,15 @@ impl<'a> TransactionsProvider for StaticFileJarProvider<'a> { Ok(txes) } + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let txs = self.transactions_by_tx_range(range)?; + TransactionSignedNoHash::recover_signers(&txs, txs.len()) + .ok_or(ProviderError::SenderRecoveryError) + } + fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 104b19ea7..bdd2122ab 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1008,15 +1008,6 @@ impl TransactionsProvider for StaticFileProvider { Err(ProviderError::UnsupportedProvider) } - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let txes = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txes, txes.len()) - .ok_or(ProviderError::SenderRecoveryError) - } - fn transactions_by_tx_range( &self, range: impl RangeBounds, @@ -1031,6 +1022,15 @@ impl TransactionsProvider for StaticFileProvider { ) } + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let txes = self.transactions_by_tx_range(range)?; + TransactionSignedNoHash::recover_signers(&txes, txes.len()) + .ok_or(ProviderError::SenderRecoveryError) + } + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 5b40ef0b5..26e09e5a1 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -692,9 +692,6 @@ impl StateProviderFactory for Arc { } impl WithdrawalsProvider for MockEthProvider { - fn latest_withdrawal(&self) -> ProviderResult> { - Ok(None) - } fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -702,6 +699,9 @@ impl WithdrawalsProvider for MockEthProvider { ) -> ProviderResult> { Ok(None) } + fn latest_withdrawal(&self) -> ProviderResult> { + Ok(None) + } } impl ChangeSetReader for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 851db2f3c..2272f09a0 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -195,13 +195,6 @@ impl TransactionsProvider for NoopProvider { Ok(Vec::default()) } - fn senders_by_tx_range( - &self, - _range: impl RangeBounds, - ) -> ProviderResult> { - Ok(Vec::default()) - } - fn transactions_by_tx_range( &self, _range: impl RangeBounds, @@ -209,6 +202,13 @@ impl TransactionsProvider for NoopProvider { Ok(Vec::default()) } + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -429,9 +429,6 @@ impl StageCheckpointReader for NoopProvider { } impl WithdrawalsProvider for NoopProvider { - fn latest_withdrawal(&self) -> ProviderResult> { - Ok(None) - } fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -439,6 +436,9 @@ impl WithdrawalsProvider for NoopProvider { ) -> ProviderResult> { Ok(None) } + fn latest_withdrawal(&self) -> ProviderResult> { + Ok(None) + } } impl PruneCheckpointReader for NoopProvider { diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 0246a9583..5f2a11048 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -92,11 +92,11 @@ impl TransactionPool for NoopTransactionPool { mpsc::channel(1).1 } - fn blob_transaction_sidecars_listener(&self) -> Receiver { + fn new_transactions_listener(&self) -> Receiver> { mpsc::channel(1).1 } - fn new_transactions_listener(&self) -> Receiver> { + fn blob_transaction_sidecars_listener(&self) -> Receiver { mpsc::channel(1).1 } @@ -211,6 +211,13 @@ impl TransactionPool for NoopTransactionPool { None } + fn get_transactions_by_origin( + &self, + _origin: TransactionOrigin, + ) -> Vec>> { + vec![] + } + fn unique_senders(&self) -> HashSet
{ Default::default() } @@ -235,13 +242,6 @@ impl TransactionPool for NoopTransactionPool { } Err(BlobStoreError::MissingSidecar(tx_hashes[0])) } - - fn get_transactions_by_origin( - &self, - _origin: TransactionOrigin, - ) -> Vec>> { - vec![] - } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 9bf55678e..f59076458 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -973,7 +973,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { gas_price: *gas_price, gas_limit: *gas_limit, to: *to, - value: (*value), + value: *value, input: (*input).clone(), size: tx.size(), }, @@ -995,7 +995,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_priority_fee_per_gas: *max_priority_fee_per_gas, gas_limit: *gas_limit, to: *to, - value: (*value), + value: *value, input: (*input).clone(), accesslist: (*access_list).clone(), size: tx.size(), @@ -1020,7 +1020,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { max_fee_per_blob_gas: *max_fee_per_blob_gas, gas_limit: *gas_limit, to: *to, - value: (*value), + value: *value, input: (*input).clone(), accesslist: (*access_list).clone(), // only generate a sidecar if it is a 4844 tx - also for the sake of diff --git a/crates/trie-parallel/src/storage_root_targets.rs b/crates/trie-parallel/src/storage_root_targets.rs index a0ada6349..d34441d3b 100644 --- a/crates/trie-parallel/src/storage_root_targets.rs +++ b/crates/trie-parallel/src/storage_root_targets.rs @@ -38,8 +38,8 @@ impl IntoIterator for StorageRootTargets { #[cfg(feature = "parallel")] impl rayon::iter::IntoParallelIterator for StorageRootTargets { - type Item = (B256, PrefixSet); type Iter = rayon::collections::hash_map::IntoIter; + type Item = (B256, PrefixSet); fn into_par_iter(self) -> Self::Iter { self.0.into_par_iter() diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 27104423d..faf693f33 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -1083,9 +1083,9 @@ mod tests { assert_eq!(nibbles2b[..], [0xB, 0x0]); assert_eq!(node2a, node2b); tx.commit().unwrap(); - let tx = factory.provider_rw().unwrap(); { + let tx = factory.provider_rw().unwrap(); let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); @@ -1138,11 +1138,10 @@ mod tests { assert_ne!(node1c.hashes[0], node1b.hashes[0]); assert_eq!(node1c.hashes[1], node1b.hashes[1]); assert_eq!(node1c.hashes[2], node1b.hashes[2]); - drop(tx); } - let tx = factory.provider_rw().unwrap(); { + let tx = factory.provider_rw().unwrap(); let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); diff --git a/crates/trie/src/trie_cursor/noop.rs b/crates/trie/src/trie_cursor/noop.rs index 5f21bbf6a..b168fbfc9 100644 --- a/crates/trie/src/trie_cursor/noop.rs +++ b/crates/trie/src/trie_cursor/noop.rs @@ -29,16 +29,16 @@ impl TrieCursorFactory for NoopTrieCursorFactory { pub struct NoopAccountTrieCursor; impl TrieCursor for NoopAccountTrieCursor { - /// Seeks within the account trie. - fn seek( + /// Seeks an exact match within the account trie. + fn seek_exact( &mut self, _key: Nibbles, ) -> Result, DatabaseError> { Ok(None) } - /// Seeks an exact match within the account trie. - fn seek_exact( + /// Seeks within the account trie. + fn seek( &mut self, _key: Nibbles, ) -> Result, DatabaseError> { @@ -57,16 +57,16 @@ impl TrieCursor for NoopAccountTrieCursor { pub struct NoopStorageTrieCursor; impl TrieCursor for NoopStorageTrieCursor { - /// Seeks a key in storage tries. - fn seek( + /// Seeks an exact match in storage tries. + fn seek_exact( &mut self, _key: Nibbles, ) -> Result, DatabaseError> { Ok(None) } - /// Seeks an exact match in storage tries. - fn seek_exact( + /// Seeks a key in storage tries. + fn seek( &mut self, _key: Nibbles, ) -> Result, DatabaseError> {