mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 19:09:54 +00:00
chore: remove some excessive allocs in hot path (#13176)
This commit is contained in:
@ -278,7 +278,6 @@ impl<N: NetworkPrimitives> TransactionFetcher<N> {
|
||||
+ IntoIterator<Item = (TxHash, Option<(u8, usize)>)>,
|
||||
) -> RequestTxHashes {
|
||||
let mut acc_size_response = 0;
|
||||
let hashes_from_announcement_len = hashes_from_announcement.len();
|
||||
|
||||
let mut hashes_from_announcement_iter = hashes_from_announcement.into_iter();
|
||||
|
||||
@ -292,7 +291,7 @@ impl<N: NetworkPrimitives> TransactionFetcher<N> {
|
||||
acc_size_response = size;
|
||||
}
|
||||
|
||||
let mut surplus_hashes = RequestTxHashes::with_capacity(hashes_from_announcement_len - 1);
|
||||
let mut surplus_hashes = RequestTxHashes::default();
|
||||
|
||||
// folds size based on expected response size and adds selected hashes to the request
|
||||
// list and the other hashes to the surplus list
|
||||
@ -326,8 +325,6 @@ impl<N: NetworkPrimitives> TransactionFetcher<N> {
|
||||
}
|
||||
|
||||
surplus_hashes.extend(hashes_from_announcement_iter.map(|(hash, _metadata)| hash));
|
||||
surplus_hashes.shrink_to_fit();
|
||||
hashes_to_request.shrink_to_fit();
|
||||
|
||||
surplus_hashes
|
||||
}
|
||||
@ -432,8 +429,7 @@ impl<N: NetworkPrimitives> TransactionFetcher<N> {
|
||||
peers: &HashMap<PeerId, PeerMetadata<N>>,
|
||||
has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
|
||||
) {
|
||||
let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info);
|
||||
let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req);
|
||||
let mut hashes_to_request = RequestTxHashes::default();
|
||||
let is_session_active = |peer_id: &PeerId| peers.contains_key(peer_id);
|
||||
|
||||
let mut search_durations = TxFetcherSearchDurations::default();
|
||||
@ -482,9 +478,6 @@ impl<N: NetworkPrimitives> TransactionFetcher<N> {
|
||||
search_durations.fill_request
|
||||
);
|
||||
|
||||
// free unused memory
|
||||
hashes_to_request.shrink_to_fit();
|
||||
|
||||
self.update_pending_fetch_cache_search_metrics(search_durations);
|
||||
|
||||
trace!(target: "net::tx",
|
||||
|
||||
@ -640,16 +640,8 @@ where
|
||||
return
|
||||
}
|
||||
|
||||
// load message version before announcement data type is destructed in packing
|
||||
let msg_version = valid_announcement_data.msg_version();
|
||||
//
|
||||
// demand recommended soft limit on response, however the peer may enforce an arbitrary
|
||||
// limit on the response (2MB)
|
||||
//
|
||||
// request buffer is shrunk via call to pack request!
|
||||
let init_capacity_req =
|
||||
self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version);
|
||||
let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req);
|
||||
let mut hashes_to_request =
|
||||
RequestTxHashes::with_capacity(valid_announcement_data.len() / 4);
|
||||
let surplus_hashes =
|
||||
self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data);
|
||||
|
||||
@ -657,7 +649,6 @@ where
|
||||
trace!(target: "net::tx",
|
||||
peer_id=format!("{peer_id:#}"),
|
||||
surplus_hashes=?*surplus_hashes,
|
||||
%msg_version,
|
||||
%client,
|
||||
"some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes"
|
||||
);
|
||||
@ -668,7 +659,6 @@ where
|
||||
trace!(target: "net::tx",
|
||||
peer_id=format!("{peer_id:#}"),
|
||||
hashes=?*hashes_to_request,
|
||||
%msg_version,
|
||||
%client,
|
||||
"sending hashes in `GetPooledTransactions` request to peer's session"
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user