Enable clippy's redundant_clone linter (#7202)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
Justin Traglia
2024-03-18 20:35:04 -05:00
committed by GitHub
parent b7ef60b899
commit d91274eaa2
33 changed files with 66 additions and 83 deletions

View File

@ -89,8 +89,9 @@ rust.unused_must_use = "deny"
rust.rust_2018_idioms = "deny"
clippy.empty_line_after_outer_attr = "deny"
clippy.derive_partial_eq_without_eq = "deny"
clippy.redundant_clone = "deny"
clippy.trait_duplication_in_bounds = "deny"
clippy.uninlined_format_args = "warn"
clippy.uninlined_format_args = "deny"
[workspace.package]
version = "0.2.0-beta.3"

View File

@ -107,7 +107,7 @@ impl Command {
bench(
BenchKind::RandomHash,
provider_factory.clone(),
provider_factory,
StaticFileSegment::Headers,
filters,
compression,

View File

@ -100,7 +100,7 @@ impl Command {
db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)?;
let provider_factory =
Arc::new(ProviderFactory::new(db, chain.clone(), data_dir.static_files_path())?);
Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?);
{
if !self.only_bench {

View File

@ -37,7 +37,7 @@ impl Command {
let tx_range =
provider_factory.provider()?.transaction_range_by_block_range(block_range.into())?;
let mut row_indexes = tx_range.clone().collect::<Vec<_>>();
let mut row_indexes = tx_range.collect::<Vec<_>>();
let path: PathBuf = StaticFileSegment::Receipts
.filename_with_configuration(filters, compression, &block_range)

View File

@ -37,7 +37,7 @@ impl Command {
let tx_range = provider.transaction_range_by_block_range(block_range.into())?;
let mut row_indexes = tx_range.clone().collect::<Vec<_>>();
let mut row_indexes = tx_range.collect::<Vec<_>>();
let path: PathBuf = StaticFileSegment::Transactions
.filename_with_configuration(filters, compression, &block_range)

View File

@ -56,8 +56,7 @@ impl InitCommand {
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?;
info!(target: "reth::cli", "Writing genesis block");

View File

@ -164,7 +164,7 @@ async fn dry_run<DB: Database>(
info!(target: "reth::cli", "Executing stage. [dry-run]");
let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new(
output_provider_factory.chain_spec().clone(),
output_provider_factory.chain_spec(),
EthEvmConfig::default(),
));

View File

@ -1277,7 +1277,7 @@ mod tests {
.shanghai_activated()
.build(),
);
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec);
let consensus = Arc::new(TestConsensus::default());
let executor_factory = TestExecutorFactory::default();
executor_factory.extend(exec_res);
@ -1516,7 +1516,7 @@ mod tests {
mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3);
let mut tree = BlockchainTree::new(
TreeExternals::new(provider_factory.clone(), consensus, executor_factory.clone()),
TreeExternals::new(provider_factory, consensus, executor_factory),
BlockchainTreeConfig::default(),
None,
)
@ -1540,7 +1540,7 @@ mod tests {
);
assert_eq!(
tree.insert_block(canonical_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(),
tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
@ -1591,7 +1591,7 @@ mod tests {
let genesis = data.genesis;
// test pops execution results from vector, so order is from last to first.
let externals = setup_externals(vec![exec5.clone(), exec4.clone(), exec3, exec2, exec1]);
let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]);
// last finalized block would be number 9.
setup_genesis(&externals.provider_factory, genesis);

View File

@ -460,7 +460,7 @@ mod tests {
let remote_id = pk2id(&remote_key.public_key(SECP256K1));
let mut remote_hello = self.local_hello.clone();
remote_hello.id = remote_id;
let fork_filter = self.fork_filter.clone();
let fork_filter = self.fork_filter;
let remote_handle = tokio::spawn(async move {
let outgoing = TcpStream::connect(local_addr).await.unwrap();

View File

@ -1528,7 +1528,7 @@ mod test {
RequestTxHashes::new(request_hashes.into_iter().collect::<HashSet<_>>());
// but response contains tx 1 + another tx
let response_txns = PooledTransactions(vec![signed_tx_1.clone(), signed_tx_2.clone()]);
let response_txns = PooledTransactions(vec![signed_tx_1.clone(), signed_tx_2]);
let payload = UnverifiedPooledTransactions::new(response_txns);
let (outcome, verified_payload) = payload.verify(&request_hashes, &PeerId::ZERO);

View File

@ -374,14 +374,7 @@ impl NodeConfig {
info!(target: "reth::cli", "Connecting to P2P network");
let secret_key = self.network_secret(data_dir)?;
let default_peers_path = data_dir.known_peers_path();
Ok(self.load_network_config(
config,
client,
executor.clone(),
head,
secret_key,
default_peers_path.clone(),
))
Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path))
}
/// Create the [NetworkBuilder].
@ -460,16 +453,16 @@ impl NodeConfig {
{
// configure blockchain tree
let tree_externals = TreeExternals::new(
provider_factory.clone(),
provider_factory,
consensus.clone(),
EvmProcessorFactory::new(self.chain.clone(), evm_config),
);
let tree = BlockchainTree::new(
tree_externals,
tree_config,
prune_config.clone().map(|config| config.segments),
prune_config.map(|config| config.segments),
)?
.with_sync_metrics_tx(sync_metrics_tx.clone());
.with_sync_metrics_tx(sync_metrics_tx);
Ok(tree)
}

View File

@ -263,7 +263,7 @@ impl From<OptimismBuiltPayload> for OptimismExecutionPayloadEnvelopeV3 {
B256::ZERO
};
OptimismExecutionPayloadEnvelopeV3 {
execution_payload: block_to_payload_v3(block.clone()),
execution_payload: block_to_payload_v3(block),
block_value: fees,
// From the engine API spec:
//

View File

@ -542,7 +542,7 @@ mod tests {
.unwrap()];
// Generate a BlobTransactionSidecar from the blobs
let sidecar = generate_blob_sidecar(blobs.clone());
let sidecar = generate_blob_sidecar(blobs);
// Assert commitment equality
assert_eq!(
@ -616,7 +616,7 @@ mod tests {
.unwrap()];
// Generate a BlobTransactionSidecar from the blobs
let sidecar = generate_blob_sidecar(blobs.clone());
let sidecar = generate_blob_sidecar(blobs);
// Create a vector to store the encoded RLP
let mut encoded_rlp = Vec::new();
@ -647,7 +647,7 @@ mod tests {
.unwrap()];
// Generate a BlobTransactionSidecar from the blobs
let sidecar = generate_blob_sidecar(blobs.clone());
let sidecar = generate_blob_sidecar(blobs);
// Create a vector to store the encoded RLP
let mut encoded_rlp = Vec::new();

View File

@ -304,7 +304,7 @@ mod tests {
.execute(
&BlockWithSenders {
block: Block {
header: header.clone(),
header,
body: vec![tx, tx_deposit],
ommers: vec![],
withdrawals: None,
@ -375,7 +375,7 @@ mod tests {
.execute(
&BlockWithSenders {
block: Block {
header: header.clone(),
header,
body: vec![tx, tx_deposit],
ommers: vec![],
withdrawals: None,

View File

@ -869,10 +869,8 @@ mod tests {
// there is no system contract call so there should be NO STORAGE CHANGES
// this means we'll check the transition state
let state = executor.evm.context.evm.inner.db;
let transition_state = state
.transition_state
.clone()
.expect("the evm should be initialized with bundle updates");
let transition_state =
state.transition_state.expect("the evm should be initialized with bundle updates");
// assert that it is the default (empty) transition state
assert_eq!(transition_state, TransitionState::default());

View File

@ -440,7 +440,7 @@ mod tests {
excess_blob_gas: 0x580000,
};
let _block = try_payload_v3_to_block(new_payload.clone())
let _block = try_payload_v3_to_block(new_payload)
.expect_err("execution payload conversion requires typed txs without a rlp header");
}

View File

@ -478,7 +478,7 @@ mod tests {
testing_pool(),
NoopNetwork::default(),
cache.clone(),
GasPriceOracle::new(provider.clone(), Default::default(), cache.clone()),
GasPriceOracle::new(provider, Default::default(), cache),
ETHEREUM_BLOCK_GAS_LIMIT,
BlockingTaskPool::build().expect("failed to build tracing pool"),
fee_history_cache,

View File

@ -143,7 +143,7 @@ mod tests {
GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()),
ETHEREUM_BLOCK_GAS_LIMIT,
BlockingTaskPool::build().expect("failed to build tracing pool"),
FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()),
FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()),
evm_config,
);
let address = Address::random();
@ -164,10 +164,10 @@ mod tests {
pool,
(),
cache.clone(),
GasPriceOracle::new(mock_provider.clone(), Default::default(), cache.clone()),
GasPriceOracle::new(mock_provider, Default::default(), cache.clone()),
ETHEREUM_BLOCK_GAS_LIMIT,
BlockingTaskPool::build().expect("failed to build tracing pool"),
FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()),
FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()),
evm_config,
);

View File

@ -63,7 +63,7 @@ pub(crate) fn unwind_hashes<S: Clone + Stage<Arc<TempDatabase<DatabaseEnv>>>>(
) {
let (input, unwind) = range;
let mut stage = stage.clone();
let mut stage = stage;
let provider = db.factory.provider_rw().unwrap();
StorageHashingStage::default().unwind(&provider, unwind).unwrap();

View File

@ -84,7 +84,7 @@ where
consensus,
sync_gap: None,
hash_collector: Collector::new(etl_config.file_size / 2, etl_config.dir.clone()),
header_collector: Collector::new(etl_config.file_size / 2, etl_config.dir.clone()),
header_collector: Collector::new(etl_config.file_size / 2, etl_config.dir),
is_etl_ready: false,
}
}
@ -231,7 +231,7 @@ where
let local_head_number = gap.local_head.number;
// let the downloader know what to sync
self.downloader.update_sync_gap(gap.local_head, gap.target.clone());
self.downloader.update_sync_gap(gap.local_head, gap.target);
// We only want to stop once we have all the headers on ETL filespace (disk).
loop {

View File

@ -405,7 +405,7 @@ mod tests {
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list.clone()),
(shard(2), full_list),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);

View File

@ -416,7 +416,7 @@ mod tests {
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list.clone()),
(shard(2), full_list),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);

View File

@ -376,11 +376,8 @@ mod tests {
fn only_one() {
let (provider_factory, static_file_provider, _temp_static_files_dir) = setup();
let static_file_producer = StaticFileProducer::new(
provider_factory,
static_file_provider.clone(),
PruneModes::default(),
);
let static_file_producer =
StaticFileProducer::new(provider_factory, static_file_provider, PruneModes::default());
let (tx, rx) = channel();

View File

@ -863,7 +863,7 @@ mod tests {
address1,
RevmAccount {
status: AccountStatus::Touched,
info: account_info.clone(),
info: account_info,
// 0x00 => 0 => 9
storage: HashMap::from([(
U256::ZERO,
@ -1072,7 +1072,7 @@ mod tests {
address1,
RevmAccount {
status: AccountStatus::Touched,
info: account1.clone(),
info: account1,
// 0x01 => 0 => 5
storage: HashMap::from([(
U256::from(1),
@ -1135,7 +1135,7 @@ mod tests {
assert!(this.revert_to(16));
assert_eq!(this.receipts.len(), 7);
let mut this = base.clone();
let mut this = base;
assert!(!this.revert_to(17));
assert_eq!(this.receipts.len(), 7);
}

View File

@ -550,8 +550,8 @@ mod tests {
block2.set_hash(block2_hash);
block2.senders.push(Address::new([4; 20]));
let mut block_state_extended = block_state1.clone();
block_state_extended.extend(block_state2.clone());
let mut block_state_extended = block_state1;
block_state_extended.extend(block_state2);
let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None);

View File

@ -785,7 +785,7 @@ mod tests {
static_file_writer.commit().unwrap();
drop(static_file_writer);
let gap = provider.sync_gap(mode.clone(), checkpoint).unwrap();
let gap = provider.sync_gap(mode, checkpoint).unwrap();
assert_eq!(gap.local_head, head);
assert_eq!(gap.target.tip(), consensus_tip.into());
}

View File

@ -497,8 +497,7 @@ mod tests {
// run
assert_eq!(
HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone())
.basic_account(ADDRESS)
.clone(),
.basic_account(ADDRESS),
Ok(None)
);
assert_eq!(
@ -548,7 +547,7 @@ mod tests {
Ok(None)
);
assert_eq!(
HistoricalStateProviderRef::new(&tx, 1000, static_file_provider.clone())
HistoricalStateProviderRef::new(&tx, 1000, static_file_provider)
.basic_account(HIGHER_ADDRESS),
Ok(Some(higher_acc_plain))
);
@ -712,7 +711,7 @@ mod tests {
account_history_block_number: Some(1),
storage_history_block_number: Some(1),
},
static_file_provider.clone(),
static_file_provider,
);
assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState));
assert_eq!(

View File

@ -711,7 +711,7 @@ mod tests {
fn test_manager_graceful_shutdown() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle.clone());
let manager = TaskManager::new(handle);
let executor = manager.executor();
let val = Arc::new(AtomicBool::new(false));
@ -730,9 +730,8 @@ mod tests {
fn test_manager_graceful_shutdown_many() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle.clone());
let manager = TaskManager::new(handle);
let executor = manager.executor();
let _e = executor.clone();
let counter = Arc::new(AtomicUsize::new(0));
let num = 10;
@ -756,7 +755,7 @@ mod tests {
fn test_manager_graceful_shutdown_timeout() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle.clone());
let manager = TaskManager::new(handle);
let executor = manager.executor();
let timeout = Duration::from_millis(500);

View File

@ -1170,7 +1170,7 @@ mod tests {
.unwrap()];
// Generate a BlobTransactionSidecar from the blobs.
let sidecar = generate_blob_sidecar(blobs.clone());
let sidecar = generate_blob_sidecar(blobs);
// Create an in-memory blob store.
let blob_store = InMemoryBlobStore::default();

View File

@ -2359,8 +2359,7 @@ mod tests {
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let first = f.validated(tx.clone());
let first_added =
pool.add_transaction(first.clone(), on_chain_balance, on_chain_nonce).unwrap();
let first_added = pool.add_transaction(first, on_chain_balance, on_chain_nonce).unwrap();
let replacement = f.validated(tx.rng_hash().inc_price());
let replacement_added =
pool.add_transaction(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap();
@ -2698,7 +2697,7 @@ mod tests {
pool.add_transaction(f.validated(tx.clone()), U256::from(1_000), 0).unwrap();
// Create another mock transaction with an incremented price.
let tx1 = tx.inc_price().next().clone();
let tx1 = tx.inc_price().next();
// Validate the second mock transaction and add it to the pool.
let tx1_validated = f.validated(tx1.clone());
@ -2897,13 +2896,13 @@ mod tests {
let tx_2 = tx_1.next();
// Create 4 transactions
let v0 = f.validated(tx_0.clone());
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
// Add first 2 to the pool
let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce).unwrap();
assert!(pool.queued_transactions().is_empty());
assert_eq!(2, pool.pending_transactions().len());
@ -2912,7 +2911,7 @@ mod tests {
pool.prune_transaction_by_hash(v0.hash());
// Now add transaction with nonce 2
let _res = pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce).unwrap();
// v2 is in the queue now. v1 is still in 'pending'.
assert_eq!(1, pool.queued_transactions().len());
@ -2942,7 +2941,7 @@ mod tests {
let tx_1 = tx_0.next();
// Create 2 transactions
let v0 = f.validated(tx_0.clone());
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
// Add them to the pool
@ -2971,14 +2970,14 @@ mod tests {
let tx_3 = tx_2.next();
// Create 4 transactions
let v0 = f.validated(tx_0.clone());
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
// Add first 2 to the pool
let _res = pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(2, pool.pending_transactions().len());
@ -2987,7 +2986,7 @@ mod tests {
pool.remove_transaction(v0.id());
// Now add transaction with nonce 2
let _res = pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce).unwrap();
// v2 is in the queue now. v1 is still in 'pending'.
assert_eq!(1, pool.queued_transactions().len());
@ -3008,7 +3007,7 @@ mod tests {
assert_eq!(2, pool.pending_transactions().len());
// Add transaction v3 - it 'unclogs' everything.
let _res = pool.add_transaction(v3.clone(), on_chain_balance, on_chain_nonce).unwrap();
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(3, pool.pending_transactions().len());

View File

@ -864,7 +864,7 @@ impl From<MockTransaction> for Transaction {
gas_limit,
to,
value,
input: input.clone(),
input,
}),
MockTransaction::Eip1559 {
hash: _,
@ -886,8 +886,8 @@ impl From<MockTransaction> for Transaction {
max_priority_fee_per_gas,
to,
value,
access_list: accesslist.clone(),
input: input.clone(),
access_list: accesslist,
input,
}),
MockTransaction::Eip4844 {
hash,

View File

@ -291,9 +291,7 @@ mod tests {
}
assert_eq!(
ParallelStateRoot::new(consistent_view.clone(), hashed_state)
.incremental_root()
.unwrap(),
ParallelStateRoot::new(consistent_view, hashed_state).incremental_root().unwrap(),
test_utils::state_root(state)
);
}

View File

@ -61,7 +61,7 @@ where
// Create a hash builder to rebuild the root node since it is not available in the database.
let mut hash_builder =
HashBuilder::default().with_proof_retainer(Vec::from([target_nibbles.clone()]));
HashBuilder::default().with_proof_retainer(Vec::from([target_nibbles]));
let mut account_rlp = Vec::with_capacity(128);
let mut account_node_iter = AccountNodeIter::new(walker, hashed_account_cursor);