mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore: clean up lints (#5881)
This commit is contained in:
@ -54,8 +54,7 @@ pub struct NetworkArgs {
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub no_persist_peers: bool,
|
||||
|
||||
#[allow(rustdoc::invalid_html_tags)]
|
||||
/// NAT resolution method (any|none|upnp|publicip|extip:<IP>)
|
||||
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
|
||||
#[arg(long, default_value = "any")]
|
||||
pub nat: NatResolver,
|
||||
|
||||
|
||||
@ -219,7 +219,6 @@ impl RpcServerArgs {
|
||||
/// Returns the handles for the launched regular RPC server(s) (if any) and the server handle
|
||||
/// for the auth server that handles the `engine_` API that's accessed by the consensus
|
||||
/// layer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn start_servers<Reth, Engine, Conf>(
|
||||
&self,
|
||||
components: &Reth,
|
||||
|
||||
@ -146,7 +146,6 @@ pub trait RethNodeCommandConfig: fmt::Debug {
|
||||
|
||||
// The default payload builder is implemented on the unit type.
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
#[allow(clippy::let_unit_value)]
|
||||
let payload_builder = reth_basic_payload_builder::EthereumPayloadBuilder::default();
|
||||
|
||||
// Optimism's payload builder is implemented on the OptimismPayloadBuilder type.
|
||||
|
||||
@ -46,7 +46,6 @@ impl From<DatabaseError> for InitDatabaseError {
|
||||
}
|
||||
|
||||
/// Write the genesis block if it has not already been written
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
pub fn init_genesis<DB: Database>(
|
||||
db: Arc<DB>,
|
||||
chain: Arc<ChainSpec>,
|
||||
@ -235,7 +234,6 @@ mod tests {
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn collect_table_entries<DB, T>(
|
||||
tx: &<DB as Database>::TX,
|
||||
) -> Result<Vec<TableRow<T>>, InitDatabaseError>
|
||||
|
||||
@ -33,7 +33,7 @@ pub struct MiningTask<Client, Pool: TransactionPool> {
|
||||
pool: Pool,
|
||||
/// backlog of sets of transactions ready to be mined
|
||||
queued: VecDeque<Vec<Arc<ValidPoolTransaction<<Pool as TransactionPool>::Transaction>>>>,
|
||||
/// TODO: ideally this would just be a sender of hashes
|
||||
// TODO: ideally this would just be a sender of hashes
|
||||
to_engine: UnboundedSender<BeaconEngineMessage>,
|
||||
/// Used to notify consumers of new blocks
|
||||
canon_state_notification: CanonStateNotificationSender,
|
||||
|
||||
@ -46,31 +46,31 @@ impl ForkchoiceStateTracker {
|
||||
}
|
||||
|
||||
/// Returns whether the latest received FCU is valid: [ForkchoiceStatus::Valid]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_latest_valid(&self) -> bool {
|
||||
self.latest_status().map(|s| s.is_valid()).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Syncing]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_latest_syncing(&self) -> bool {
|
||||
self.latest_status().map(|s| s.is_syncing()).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Invalid]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_latest_invalid(&self) -> bool {
|
||||
self.latest_status().map(|s| s.is_invalid()).unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns the last valid head hash.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn last_valid_head(&self) -> Option<B256> {
|
||||
self.last_valid.as_ref().map(|s| s.head_block_hash)
|
||||
}
|
||||
|
||||
/// Returns the head hash of the latest received FCU to which we need to sync.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn sync_target(&self) -> Option<B256> {
|
||||
self.last_syncing.as_ref().map(|s| s.head_block_hash)
|
||||
}
|
||||
@ -88,7 +88,7 @@ impl ForkchoiceStateTracker {
|
||||
|
||||
/// Represents a forkchoice update and tracks the status we assigned to it.
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ReceivedForkchoiceState {
|
||||
state: ForkchoiceState,
|
||||
status: ForkchoiceStatus,
|
||||
|
||||
@ -10,7 +10,7 @@ use tracing::debug;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PolledHook {
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) name: &'static str,
|
||||
pub(crate) event: EngineHookEvent,
|
||||
pub(crate) action: Option<EngineHookAction>,
|
||||
|
||||
@ -120,7 +120,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_pipeline_sync_pending(&self) -> bool {
|
||||
self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle()
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![deny(unused_must_use, rust_2018_idioms, unused_crate_dependencies)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
#![allow(clippy::non_canonical_clone_impl)]
|
||||
|
||||
mod forkid;
|
||||
mod hardfork;
|
||||
|
||||
@ -9,7 +9,6 @@ use reth_primitives::{BlockHash, BlockNumber, SealedBlock};
|
||||
|
||||
/// Various error cases that can occur when a block violates tree assumptions.
|
||||
#[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum BlockchainTreeError {
|
||||
/// Thrown if the block number is lower than the last finalized block number.
|
||||
#[error("block number is lower than the last finalized block number #{last_finalized}")]
|
||||
@ -41,7 +40,7 @@ pub enum BlockchainTreeError {
|
||||
/// The block hash that could not be found.
|
||||
block_hash: BlockHash,
|
||||
},
|
||||
// Thrown if the block failed to buffer
|
||||
/// Thrown if the block failed to buffer
|
||||
#[error("block with hash {block_hash} failed to buffer")]
|
||||
BlockBufferingFailed {
|
||||
/// The block hash of the block that failed to buffer.
|
||||
@ -53,7 +52,6 @@ pub enum BlockchainTreeError {
|
||||
pub type CanonicalResult<T> = Result<T, CanonicalError>;
|
||||
|
||||
/// Canonical Errors
|
||||
#[allow(missing_docs)]
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum CanonicalError {
|
||||
/// Error originating from validation operations.
|
||||
@ -63,17 +61,17 @@ pub enum CanonicalError {
|
||||
#[error(transparent)]
|
||||
BlockchainTree(#[from] BlockchainTreeError),
|
||||
/// Error indicating a transaction reverted during execution.
|
||||
#[error("transaction error on revert: {inner}")]
|
||||
CanonicalRevert { inner: String },
|
||||
#[error("transaction error on revert: {0}")]
|
||||
CanonicalRevert(String),
|
||||
/// Error indicating a transaction failed to commit during execution.
|
||||
#[error("transaction error on commit: {inner}")]
|
||||
CanonicalCommit { inner: String },
|
||||
#[error("transaction error on commit: {0}")]
|
||||
CanonicalCommit(String),
|
||||
}
|
||||
|
||||
impl CanonicalError {
|
||||
/// Returns `true` if the error is fatal.
|
||||
pub fn is_fatal(&self) -> bool {
|
||||
matches!(self, Self::CanonicalCommit { .. } | Self::CanonicalRevert { .. })
|
||||
matches!(self, Self::CanonicalCommit(_) | Self::CanonicalRevert(_))
|
||||
}
|
||||
}
|
||||
|
||||
@ -270,8 +268,8 @@ impl InsertBlockErrorKind {
|
||||
}
|
||||
InsertBlockErrorKind::Canonical(err) => match err {
|
||||
CanonicalError::BlockchainTree(_) |
|
||||
CanonicalError::CanonicalCommit { .. } |
|
||||
CanonicalError::CanonicalRevert { .. } => false,
|
||||
CanonicalError::CanonicalCommit(_) |
|
||||
CanonicalError::CanonicalRevert(_) => false,
|
||||
CanonicalError::Validation(_) => true,
|
||||
},
|
||||
InsertBlockErrorKind::BlockchainTree(_) => false,
|
||||
|
||||
@ -4,13 +4,10 @@ use crate::p2p::{
|
||||
error::PeerRequestResult,
|
||||
priority::Priority,
|
||||
};
|
||||
use futures::{future, Future, FutureExt};
|
||||
use reth_primitives::{BlockBody, WithPeerId, B256};
|
||||
use std::{
|
||||
fmt::{Debug, Formatter},
|
||||
pin::Pin,
|
||||
};
|
||||
use tokio::sync::oneshot::{self, Receiver};
|
||||
use futures::FutureExt;
|
||||
use reth_primitives::{BlockBody, B256};
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
/// A test client for fetching bodies
|
||||
pub struct TestBodiesClient<F> {
|
||||
@ -46,7 +43,7 @@ where
|
||||
_priority: Priority,
|
||||
) -> Self::Output {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
tx.send((self.responder)(hashes));
|
||||
let _ = tx.send((self.responder)(hashes));
|
||||
Box::pin(rx.map(|x| match x {
|
||||
Ok(value) => value,
|
||||
Err(err) => Err(err.into()),
|
||||
|
||||
@ -4,15 +4,15 @@ use rand::{
|
||||
};
|
||||
use reth_primitives::{
|
||||
proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock,
|
||||
SealedHeader, Signature, StorageEntry, Transaction, TransactionKind, TransactionSigned,
|
||||
TxLegacy, B256, U256,
|
||||
SealedHeader, StorageEntry, Transaction, TransactionKind, TransactionSigned, TxLegacy, B256,
|
||||
U256,
|
||||
};
|
||||
use secp256k1::{KeyPair, Message as SecpMessage, Secp256k1, SecretKey, SECP256K1};
|
||||
use secp256k1::{KeyPair, Secp256k1};
|
||||
use std::{
|
||||
cmp::{max, min},
|
||||
collections::{hash_map::DefaultHasher, BTreeMap},
|
||||
hash::Hasher,
|
||||
ops::{Range, RangeInclusive, Sub},
|
||||
ops::{Range, RangeInclusive},
|
||||
};
|
||||
|
||||
// TODO(onbjerg): Maybe we should split this off to its own crate, or move the helpers to the
|
||||
@ -220,7 +220,7 @@ where
|
||||
|
||||
let mut changesets = Vec::new();
|
||||
|
||||
blocks.into_iter().for_each(|block| {
|
||||
for _block in blocks {
|
||||
let mut changeset = Vec::new();
|
||||
let (from, to, mut transfer, new_entries) = random_account_change(
|
||||
rng,
|
||||
@ -263,7 +263,7 @@ where
|
||||
prev_to.balance = prev_to.balance.wrapping_add(transfer);
|
||||
|
||||
changesets.push(changeset);
|
||||
});
|
||||
}
|
||||
|
||||
let final_state = state
|
||||
.into_iter()
|
||||
@ -388,7 +388,7 @@ pub fn random_log<R: Rng>(rng: &mut R, address: Option<Address>, topics_count: O
|
||||
mod test {
|
||||
use super::*;
|
||||
use reth_primitives::{
|
||||
hex, keccak256, public_key_to_address, AccessList, Address, TransactionKind, TxEip1559,
|
||||
hex, public_key_to_address, AccessList, Signature, TransactionKind, TxEip1559,
|
||||
};
|
||||
use secp256k1::KeyPair;
|
||||
use std::str::FromStr;
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
//! Testing support for headers related interfaces.
|
||||
|
||||
use crate::{
|
||||
consensus::{self, Consensus, ConsensusError},
|
||||
p2p::{
|
||||
@ -6,19 +7,16 @@ use crate::{
|
||||
error::{DownloadError, DownloadResult, PeerRequestResult, RequestError},
|
||||
headers::{
|
||||
client::{HeadersClient, HeadersRequest},
|
||||
downloader::{validate_header_download, HeaderDownloader, SyncTarget},
|
||||
downloader::{HeaderDownloader, SyncTarget},
|
||||
error::HeadersDownloaderResult,
|
||||
},
|
||||
priority::Priority,
|
||||
},
|
||||
};
|
||||
use futures::{future, Future, FutureExt, Stream, StreamExt};
|
||||
use reth_eth_wire::BlockHeaders;
|
||||
use futures::{Future, FutureExt, Stream, StreamExt};
|
||||
use reth_primitives::{
|
||||
BlockHash, BlockNumber, Head, Header, HeadersDirection, PeerId, SealedBlock, SealedHeader,
|
||||
WithPeerId, B256, U256,
|
||||
Header, HeadersDirection, PeerId, SealedBlock, SealedHeader, WithPeerId, U256,
|
||||
};
|
||||
use reth_rpc_types::engine::ForkchoiceState;
|
||||
use std::{
|
||||
fmt,
|
||||
pin::Pin,
|
||||
@ -28,12 +26,7 @@ use std::{
|
||||
},
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
use tokio::sync::{
|
||||
oneshot::{error::RecvError, Receiver},
|
||||
watch,
|
||||
watch::error::SendError,
|
||||
Mutex,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
/// A test downloader which just returns the values that have been pushed to it.
|
||||
#[derive(Debug)]
|
||||
@ -67,11 +60,6 @@ impl TestHeaderDownloader {
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate whether the header is valid in relation to it's parent
|
||||
fn validate(&self, header: &SealedHeader, parent: &SealedHeader) -> DownloadResult<()> {
|
||||
validate_header_download(&self.consensus, header, parent)
|
||||
}
|
||||
}
|
||||
|
||||
impl HeaderDownloader for TestHeaderDownloader {
|
||||
@ -94,7 +82,7 @@ impl Stream for TestHeaderDownloader {
|
||||
return Poll::Ready(Some(Ok(std::mem::take(&mut this.queued_headers))))
|
||||
}
|
||||
if this.download.is_none() {
|
||||
this.download.insert(this.create_download());
|
||||
this.download = Some(this.create_download());
|
||||
}
|
||||
|
||||
match ready!(this.download.as_mut().unwrap().poll_next_unpin(cx)) {
|
||||
@ -293,8 +281,8 @@ impl Consensus for TestConsensus {
|
||||
|
||||
fn validate_header_against_parent(
|
||||
&self,
|
||||
header: &SealedHeader,
|
||||
parent: &SealedHeader,
|
||||
_header: &SealedHeader,
|
||||
_parent: &SealedHeader,
|
||||
) -> Result<(), ConsensusError> {
|
||||
if self.fail_validation() {
|
||||
Err(consensus::ConsensusError::BaseFeeMissing)
|
||||
@ -305,8 +293,8 @@ impl Consensus for TestConsensus {
|
||||
|
||||
fn validate_header_with_total_difficulty(
|
||||
&self,
|
||||
header: &Header,
|
||||
total_difficulty: U256,
|
||||
_header: &Header,
|
||||
_total_difficulty: U256,
|
||||
) -> Result<(), ConsensusError> {
|
||||
if self.fail_validation() {
|
||||
Err(consensus::ConsensusError::BaseFeeMissing)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
#![allow(unused)]
|
||||
|
||||
mod bodies;
|
||||
mod full_block;
|
||||
mod headers;
|
||||
|
||||
@ -12,9 +12,6 @@
|
||||
use proc_macro::TokenStream;
|
||||
use syn::{parse_macro_input, DeriveInput};
|
||||
|
||||
#[allow(unused_extern_crates)]
|
||||
extern crate proc_macro;
|
||||
|
||||
mod expand;
|
||||
mod metric;
|
||||
mod with_attrs;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
//! Mock discovery support
|
||||
|
||||
#![allow(missing_docs, unused)]
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{
|
||||
proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong},
|
||||
@ -26,7 +26,7 @@ use tokio::{
|
||||
task::{JoinHandle, JoinSet},
|
||||
};
|
||||
use tokio_stream::{Stream, StreamExt};
|
||||
use tracing::{debug, error};
|
||||
use tracing::debug;
|
||||
|
||||
/// Mock discovery node
|
||||
#[derive(Debug)]
|
||||
@ -34,7 +34,7 @@ pub struct MockDiscovery {
|
||||
local_addr: SocketAddr,
|
||||
local_enr: NodeRecord,
|
||||
secret_key: SecretKey,
|
||||
udp: Arc<UdpSocket>,
|
||||
_udp: Arc<UdpSocket>,
|
||||
_tasks: JoinSet<()>,
|
||||
/// Receiver for incoming messages
|
||||
ingress: IngressReceiver,
|
||||
@ -79,7 +79,7 @@ impl MockDiscovery {
|
||||
local_addr,
|
||||
local_enr,
|
||||
secret_key,
|
||||
udp: socket,
|
||||
_udp: socket,
|
||||
pending_pongs: Default::default(),
|
||||
pending_neighbours: Default::default(),
|
||||
command_rx,
|
||||
@ -88,7 +88,7 @@ impl MockDiscovery {
|
||||
}
|
||||
|
||||
/// Spawn and consume the stream.
|
||||
pub fn spawn(mut self) -> JoinHandle<()> {
|
||||
pub fn spawn(self) -> JoinHandle<()> {
|
||||
tokio::task::spawn(async move {
|
||||
let _: Vec<_> = self.collect().await;
|
||||
})
|
||||
@ -282,8 +282,7 @@ pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{Discv4Event, PingReason};
|
||||
use reth_primitives::{hex_literal::hex, ForkHash, ForkId};
|
||||
use crate::Discv4Event;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
/// This test creates two local UDP sockets. The mocked discovery service responds to specific
|
||||
@ -294,10 +293,9 @@ mod tests {
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let (_, mut service) = create_discv4().await;
|
||||
let (mut mockv4, mut cmd) = MockDiscovery::new().await.unwrap();
|
||||
let (mut mockv4, _cmd) = MockDiscovery::new().await.unwrap();
|
||||
|
||||
let mock_enr = mockv4.local_enr();
|
||||
let mock_addr = mockv4.local_addr();
|
||||
|
||||
// we only want to test internally
|
||||
service.local_enr_mut().address = IpAddr::V4(Ipv4Addr::UNSPECIFIED);
|
||||
@ -314,7 +312,7 @@ mod tests {
|
||||
// process the mock pong
|
||||
let event = mockv4.next().await.unwrap();
|
||||
match event {
|
||||
MockEvent::Pong { ping, pong, to } => {
|
||||
MockEvent::Pong { ping: _, pong: _, to } => {
|
||||
assert_eq!(to, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), discv_addr.port()));
|
||||
}
|
||||
MockEvent::Neighbours { .. } => {
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
#![allow(unused)]
|
||||
//! Test helper impls for generating bodies
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use reth_db::{database::Database, tables, transaction::DbTxMut, DatabaseEnv};
|
||||
use reth_interfaces::{db, p2p::bodies::response::BlockResponse};
|
||||
use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader, B256};
|
||||
|
||||
@ -209,28 +209,28 @@ where
|
||||
header: &SealedHeader,
|
||||
request: HeadersRequest,
|
||||
peer_id: PeerId,
|
||||
) -> Result<(), HeadersResponseError> {
|
||||
) -> Result<(), Box<HeadersResponseError>> {
|
||||
match self.existing_sync_target() {
|
||||
SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. }
|
||||
if header.hash() != hash =>
|
||||
{
|
||||
Err(HeadersResponseError {
|
||||
Err(Box::new(HeadersResponseError {
|
||||
request,
|
||||
peer_id: Some(peer_id),
|
||||
error: DownloadError::InvalidTip(
|
||||
GotExpected { got: header.hash(), expected: hash }.into(),
|
||||
),
|
||||
})
|
||||
}))
|
||||
}
|
||||
SyncTargetBlock::Number(number) if header.number != number => {
|
||||
Err(HeadersResponseError {
|
||||
Err(Box::new(HeadersResponseError {
|
||||
request,
|
||||
peer_id: Some(peer_id),
|
||||
error: DownloadError::InvalidTipNumber(GotExpected {
|
||||
got: header.number,
|
||||
expected: number,
|
||||
}),
|
||||
})
|
||||
}))
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
@ -243,7 +243,6 @@ where
|
||||
/// Returns an error if the given headers are invalid.
|
||||
///
|
||||
/// Caution: this expects the `headers` to be sorted with _falling_ block numbers
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn process_next_headers(
|
||||
&mut self,
|
||||
request: HeadersRequest,
|
||||
@ -352,7 +351,6 @@ where
|
||||
}
|
||||
|
||||
/// Handles the response for the request for the sync target
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn on_sync_target_outcome(
|
||||
&mut self,
|
||||
response: HeadersRequestOutcome,
|
||||
@ -442,7 +440,6 @@ where
|
||||
}
|
||||
|
||||
/// Invoked when we received a response
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn on_headers_outcome(
|
||||
&mut self,
|
||||
response: HeadersRequestOutcome,
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
#![allow(unused)]
|
||||
//! Test helper impls for generating bodies
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use reth_primitives::SealedHeader;
|
||||
|
||||
/// Returns a new [SealedHeader] that's the child header of the given `parent`.
|
||||
|
||||
@ -9,7 +9,6 @@
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![allow(clippy::result_large_err)] // TODO(danipopes): fix this
|
||||
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
@ -1,34 +1,18 @@
|
||||
use super::file_codec::BlockFileCodec;
|
||||
use alloy_rlp::{Decodable, Header as RlpHeader};
|
||||
use itertools::Either;
|
||||
use reth_interfaces::{
|
||||
p2p::{
|
||||
bodies::client::{BodiesClient, BodiesFut},
|
||||
download::DownloadClient,
|
||||
error::RequestError,
|
||||
headers::client::{HeadersClient, HeadersFut, HeadersRequest},
|
||||
priority::Priority,
|
||||
},
|
||||
sync::{NetworkSyncUpdater, SyncState, SyncStateProvider},
|
||||
use reth_interfaces::p2p::{
|
||||
bodies::client::{BodiesClient, BodiesFut},
|
||||
download::DownloadClient,
|
||||
error::RequestError,
|
||||
headers::client::{HeadersClient, HeadersFut, HeadersRequest},
|
||||
priority::Priority,
|
||||
};
|
||||
use reth_primitives::{
|
||||
Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, PeerId,
|
||||
B256,
|
||||
};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
iter::zip,
|
||||
path::Path,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, PeerId, B256,
|
||||
};
|
||||
use std::{self, collections::HashMap, path::Path};
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncReadExt, BufReader},
|
||||
};
|
||||
use tokio::{fs::File, io::AsyncReadExt};
|
||||
use tokio_stream::StreamExt;
|
||||
use tokio_util::codec::FramedRead;
|
||||
use tracing::{trace, warn};
|
||||
@ -250,14 +234,12 @@ mod tests {
|
||||
use crate::{
|
||||
bodies::{
|
||||
bodies::BodiesDownloaderBuilder,
|
||||
test_utils::{create_raw_bodies, insert_headers, zip_blocks},
|
||||
test_utils::{insert_headers, zip_blocks},
|
||||
},
|
||||
headers::{reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header},
|
||||
test_utils::{generate_bodies, generate_bodies_file},
|
||||
};
|
||||
use alloy_rlp::Encodable;
|
||||
use assert_matches::assert_matches;
|
||||
use futures::SinkExt;
|
||||
use futures_util::stream::StreamExt;
|
||||
use reth_db::test_utils::create_test_rw_db;
|
||||
use reth_interfaces::{
|
||||
@ -269,12 +251,7 @@ mod tests {
|
||||
};
|
||||
use reth_primitives::{SealedHeader, MAINNET};
|
||||
use reth_provider::ProviderFactory;
|
||||
use std::{
|
||||
io::{Read, Seek, SeekFrom, Write},
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter};
|
||||
use tokio_util::codec::FramedWrite;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
async fn streams_bodies_from_buffer() {
|
||||
@ -337,8 +314,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_download_headers_from_file() {
|
||||
// Generate some random blocks
|
||||
let db = create_test_rw_db();
|
||||
let (file, headers, mut bodies) = generate_bodies_file(0..=19).await;
|
||||
let (file, headers, _) = generate_bodies_file(0..=19).await;
|
||||
|
||||
// now try to read them back
|
||||
let client = Arc::new(FileClient::from_file(file).await.unwrap());
|
||||
|
||||
@ -31,7 +31,7 @@ impl Decoder for BlockFileCodec {
|
||||
if src.is_empty() {
|
||||
return Ok(None)
|
||||
}
|
||||
let mut buf_slice = &mut src.as_ref();
|
||||
let buf_slice = &mut src.as_ref();
|
||||
let body = Block::decode(buf_slice)?;
|
||||
src.advance(src.len() - buf_slice.len());
|
||||
Ok(Some(body))
|
||||
|
||||
@ -1,24 +1,23 @@
|
||||
#![allow(unused)]
|
||||
//! Test helper impls
|
||||
//! Test helper impls.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::bodies::test_utils::create_raw_bodies;
|
||||
use futures::SinkExt;
|
||||
use reth_interfaces::test_utils::generators::random_block_range;
|
||||
use reth_interfaces::test_utils::{generators, generators::random_block_range};
|
||||
use reth_primitives::{BlockBody, SealedHeader, B256};
|
||||
use std::{collections::HashMap, io::SeekFrom, ops::RangeInclusive};
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncSeekExt, AsyncWriteExt, BufWriter},
|
||||
};
|
||||
use tokio::{fs::File, io::AsyncSeekExt};
|
||||
use tokio_util::codec::FramedWrite;
|
||||
|
||||
mod bodies_client;
|
||||
mod file_client;
|
||||
mod file_codec;
|
||||
|
||||
pub use bodies_client::TestBodiesClient;
|
||||
|
||||
mod file_client;
|
||||
pub use file_client::{FileClient, FileClientError};
|
||||
|
||||
mod file_codec;
|
||||
pub(crate) use file_codec::BlockFileCodec;
|
||||
use reth_interfaces::test_utils::generators;
|
||||
|
||||
/// Metrics scope used for testing.
|
||||
pub(crate) const TEST_SCOPE: &str = "downloaders.test";
|
||||
@ -51,12 +50,12 @@ pub(crate) fn generate_bodies(
|
||||
/// Generate a set of bodies, write them to a temporary file, and return the file along with the
|
||||
/// bodies and corresponding block hashes
|
||||
pub(crate) async fn generate_bodies_file(
|
||||
rng: RangeInclusive<u64>,
|
||||
range: RangeInclusive<u64>,
|
||||
) -> (tokio::fs::File, Vec<SealedHeader>, HashMap<B256, BlockBody>) {
|
||||
let (headers, mut bodies) = generate_bodies(0..=19);
|
||||
let (headers, bodies) = generate_bodies(range);
|
||||
let raw_block_bodies = create_raw_bodies(headers.clone().iter(), &mut bodies.clone());
|
||||
|
||||
let mut file: File = tempfile::tempfile().unwrap().into();
|
||||
let file: File = tempfile::tempfile().unwrap().into();
|
||||
let mut writer = FramedWrite::new(file, BlockFileCodec);
|
||||
|
||||
// rlp encode one after the other
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{
|
||||
error::ECIESErrorImpl,
|
||||
mac::{HeaderBytes, MAC},
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use aes::Aes256Enc;
|
||||
use block_padding::NoPadding;
|
||||
use cipher::BlockEncrypt;
|
||||
|
||||
@ -1,28 +1,3 @@
|
||||
#![allow(dead_code, unreachable_pub, missing_docs, unused_variables)]
|
||||
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
fmt, io,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy_rlp::{Decodable, Encodable, Error as RlpError, EMPTY_LIST_CODE};
|
||||
use futures::{Sink, SinkExt, StreamExt};
|
||||
use pin_project::pin_project;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio_stream::Stream;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
use reth_codecs::derive_arbitrary;
|
||||
use reth_metrics::metrics::counter;
|
||||
use reth_primitives::{
|
||||
bytes::{Buf, BufMut, Bytes, BytesMut},
|
||||
hex, GotExpected,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
capability::SharedCapabilities,
|
||||
disconnect::CanDisconnect,
|
||||
@ -30,6 +5,27 @@ use crate::{
|
||||
pinger::{Pinger, PingerEvent},
|
||||
DisconnectReason, HelloMessage, HelloMessageWithProtocols,
|
||||
};
|
||||
use alloy_rlp::{Decodable, Encodable, Error as RlpError, EMPTY_LIST_CODE};
|
||||
use futures::{Sink, SinkExt, StreamExt};
|
||||
use pin_project::pin_project;
|
||||
use reth_codecs::derive_arbitrary;
|
||||
use reth_metrics::metrics::counter;
|
||||
use reth_primitives::{
|
||||
bytes::{Buf, BufMut, Bytes, BytesMut},
|
||||
hex, GotExpected,
|
||||
};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
fmt, io,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio_stream::Stream;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// [`MAX_PAYLOAD_SIZE`] is the maximum size of an uncompressed message payload.
|
||||
/// This is defined in [EIP-706](https://eips.ethereum.org/EIPS/eip-706).
|
||||
@ -56,6 +52,7 @@ const PING_INTERVAL: Duration = Duration::from_secs(60);
|
||||
|
||||
/// [`GRACE_PERIOD`] determines the amount of time to wait for a peer to disconnect after sending a
|
||||
/// [`P2PMessage::Disconnect`] message.
|
||||
#[allow(dead_code)]
|
||||
const GRACE_PERIOD: Duration = Duration::from_secs(2);
|
||||
|
||||
/// [`MAX_P2P_CAPACITY`] is the maximum number of messages that can be buffered to be sent in the
|
||||
@ -323,9 +320,12 @@ impl<S> P2PStream<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gracefully disconnects the connection by sending a disconnect message and stop reading new
|
||||
/// messages.
|
||||
pub trait DisconnectP2P {
|
||||
/// Starts to gracefully disconnect.
|
||||
fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), P2PStreamError>;
|
||||
|
||||
/// Returns `true` if the connection is about to disconnect.
|
||||
fn is_disconnecting(&self) -> bool;
|
||||
}
|
||||
@ -842,6 +842,8 @@ mod tests {
|
||||
P2PStreamError::Disconnected(reason) => assert_eq!(reason, expected_disconnect),
|
||||
e => panic!("unexpected err: {e}"),
|
||||
}
|
||||
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use super::{
|
||||
broadcast::NewBlockHashes, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders,
|
||||
GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66,
|
||||
|
||||
@ -38,7 +38,6 @@ macro_rules! fuzz_type_and_name {
|
||||
( $x:ty, $fuzzname:ident ) => {
|
||||
/// Fuzzes the round-trip encoding of the type.
|
||||
#[test_fuzz]
|
||||
#[allow(non_snake_case)]
|
||||
fn $fuzzname(thing: $x) {
|
||||
crate::roundtrip_fuzz::<$x>(thing)
|
||||
}
|
||||
|
||||
@ -117,7 +117,6 @@ impl Discovery {
|
||||
}
|
||||
|
||||
/// Updates the `eth:ForkId` field in discv4.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn update_fork_id(&self, fork_id: ForkId) {
|
||||
if let Some(discv4) = &self.discv4 {
|
||||
// use forward-compatible forkid entry
|
||||
|
||||
@ -59,8 +59,8 @@ pub struct EthRequestHandler<C> {
|
||||
/// The client type that can interact with the chain.
|
||||
client: C,
|
||||
/// Used for reporting peers.
|
||||
#[allow(unused)]
|
||||
// TODO use to report spammers
|
||||
#[allow(dead_code)]
|
||||
peers: PeersHandle,
|
||||
/// Incoming request from the [NetworkManager](crate::NetworkManager).
|
||||
incoming_requests: ReceiverStream<IncomingEthRequest>,
|
||||
@ -272,7 +272,6 @@ where
|
||||
/// This is the key type for spam detection cache. The counter is ignored during `PartialEq` and
|
||||
/// `Hash`.
|
||||
#[derive(Debug, PartialEq, Hash)]
|
||||
#[allow(unused)]
|
||||
struct RespondedGetBlockHeaders {
|
||||
req: (PeerId, GetBlockHeaders),
|
||||
}
|
||||
|
||||
@ -355,8 +355,8 @@ impl PeerState {
|
||||
#[derive(Debug)]
|
||||
struct Request<Req, Resp> {
|
||||
/// The issued request object
|
||||
/// TODO: this can be attached to the response in error case
|
||||
#[allow(unused)]
|
||||
// TODO: this can be attached to the response in error case
|
||||
#[allow(dead_code)]
|
||||
request: Req,
|
||||
response: oneshot::Sender<Resp>,
|
||||
}
|
||||
|
||||
@ -56,14 +56,12 @@ pub enum PeerMessage {
|
||||
/// All `eth` request variants.
|
||||
EthRequest(PeerRequest),
|
||||
/// Other than eth namespace message
|
||||
#[allow(unused)]
|
||||
Other(RawCapabilityMessage),
|
||||
}
|
||||
|
||||
/// Request Variants that only target block related data.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum BlockRequest {
|
||||
GetBlockHeaders(GetBlockHeaders),
|
||||
GetBlockBodies(GetBlockBodies),
|
||||
@ -71,7 +69,7 @@ pub enum BlockRequest {
|
||||
|
||||
/// Protocol related request messages that expect a response
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::enum_variant_names, missing_docs)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum PeerRequest {
|
||||
/// Request Block headers from the peer.
|
||||
///
|
||||
@ -265,15 +263,8 @@ impl PeerResponseResult {
|
||||
}
|
||||
|
||||
/// Returns whether this result is an error.
|
||||
#[allow(unused)]
|
||||
pub fn is_err(&self) -> bool {
|
||||
match self {
|
||||
PeerResponseResult::BlockHeaders(res) => res.is_err(),
|
||||
PeerResponseResult::BlockBodies(res) => res.is_err(),
|
||||
PeerResponseResult::PooledTransactions(res) => res.is_err(),
|
||||
PeerResponseResult::NodeData(res) => res.is_err(),
|
||||
PeerResponseResult::Receipts(res) => res.is_err(),
|
||||
}
|
||||
self.err().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -60,7 +60,7 @@ const TIMEOUT_SCALING: u32 = 3;
|
||||
/// - incoming _internal_ requests/broadcasts via the request/command channel
|
||||
/// - incoming requests/broadcasts _from remote_ via the connection
|
||||
/// - responses for handled ETH requests received from the remote peer.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ActiveSession {
|
||||
/// Keeps track of request ids.
|
||||
pub(crate) next_id: u64,
|
||||
@ -662,7 +662,7 @@ pub(crate) struct ReceivedRequest {
|
||||
/// Receiver half of the channel that's supposed to receive the proper response.
|
||||
rx: PeerResponse,
|
||||
/// Timestamp when we read this msg from the wire.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
received: Instant,
|
||||
}
|
||||
|
||||
|
||||
@ -179,7 +179,6 @@ impl SessionCounter {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub(crate) fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> {
|
||||
Self::ensure(self.pending_outbound, self.limits.max_pending_outbound)
|
||||
}
|
||||
|
||||
@ -254,7 +254,6 @@ pub enum ActiveSessionMessage {
|
||||
message: PeerMessage,
|
||||
},
|
||||
/// Received a message that does not match the announced capabilities of the peer.
|
||||
#[allow(unused)]
|
||||
InvalidMessage {
|
||||
/// Identifier of the remote peer.
|
||||
peer_id: PeerId,
|
||||
|
||||
@ -471,7 +471,7 @@ pub(crate) struct ActivePeer {
|
||||
/// Best block of the peer.
|
||||
pub(crate) best_hash: B256,
|
||||
/// The capabilities of the remote peer.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) capabilities: Arc<Capabilities>,
|
||||
/// A communication channel directly to the session task.
|
||||
pub(crate) request_tx: PeerRequestSender,
|
||||
|
||||
@ -1067,7 +1067,6 @@ struct Peer {
|
||||
/// negotiated version of the session.
|
||||
version: EthVersion,
|
||||
/// The peer's client version.
|
||||
#[allow(unused)]
|
||||
client_version: Arc<str>,
|
||||
}
|
||||
|
||||
|
||||
@ -166,9 +166,9 @@ struct ProtocolState {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
enum ProtocolEvent {
|
||||
Established {
|
||||
#[allow(dead_code)]
|
||||
direction: Direction,
|
||||
peer_id: PeerId,
|
||||
to_connection: mpsc::UnboundedSender<Command>,
|
||||
|
||||
@ -648,7 +648,7 @@ pub struct PayloadConfig {
|
||||
/// The chain spec.
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
/// The rollup's compute pending block configuration option.
|
||||
/// TODO(clabby): Implement this feature.
|
||||
// TODO(clabby): Implement this feature.
|
||||
#[cfg(feature = "optimism")]
|
||||
#[allow(dead_code)]
|
||||
compute_pending_block: bool,
|
||||
|
||||
@ -34,8 +34,7 @@ pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS);
|
||||
pub const BEACON_NONCE: u64 = 0u64;
|
||||
|
||||
/// The default Ethereum block gas limit.
|
||||
///
|
||||
/// TODO: This should be a chain spec parameter.
|
||||
// TODO: This should be a chain spec parameter.
|
||||
/// See <https://github.com/paradigmxyz/reth/issues/3233>.
|
||||
pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000;
|
||||
|
||||
|
||||
@ -15,7 +15,6 @@
|
||||
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
#![allow(clippy::non_canonical_clone_impl)]
|
||||
|
||||
mod account;
|
||||
pub mod basefee;
|
||||
|
||||
@ -184,7 +184,7 @@ pub(crate) enum TraceStyle {
|
||||
/// Parity style tracer
|
||||
Parity,
|
||||
/// Geth style tracer
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
Geth,
|
||||
}
|
||||
|
||||
|
||||
@ -177,7 +177,7 @@ pub(crate) struct ServerHandle(Arc<watch::Sender<()>>);
|
||||
|
||||
impl ServerHandle {
|
||||
/// Wait for the server to stop.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn stopped(self) {
|
||||
self.0.closed().await
|
||||
}
|
||||
@ -201,7 +201,7 @@ impl ConnectionGuard {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn available_connections(&self) -> usize {
|
||||
self.0.available_permits()
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ fn execute_notification(notif: Notif<'_>, max_log_length: u32) -> MethodResponse
|
||||
response
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct HandleRequest<L: Logger> {
|
||||
pub(crate) methods: Methods,
|
||||
pub(crate) max_request_body_size: u32,
|
||||
|
||||
@ -208,7 +208,7 @@ impl std::fmt::Debug for IpcServer {
|
||||
|
||||
/// Data required by the server to handle requests received via an IPC connection
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ServiceData<L: Logger> {
|
||||
/// Registered server methods.
|
||||
pub(crate) methods: Methods,
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
//! Types for the Ethereum 2.0 RPC protocol (beacon chain).
|
||||
|
||||
#![allow(missing_docs)]
|
||||
//! Types for the Ethereum 2.0 RPC protocol (beacon chain)
|
||||
|
||||
use alloy_primitives::FixedBytes;
|
||||
use constants::{BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN};
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
#![allow(missing_docs)]
|
||||
//! Payload support for the beacon API.
|
||||
//!
|
||||
//! Internal helper module to deserialize/serialize the payload attributes for the beacon API, which
|
||||
@ -9,6 +8,8 @@
|
||||
//!
|
||||
//! See also <https://github.com/ethereum/consensus-specs/blob/master/specs/deneb/beacon-chain.md#executionpayload>
|
||||
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{
|
||||
beacon::{withdrawals::BeaconWithdrawal, BlsPublicKey},
|
||||
engine::ExecutionPayloadV3,
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::serde_helpers::storage::JsonStorageKey;
|
||||
use alloy_primitives::{Address, Bytes, B256, B512, U256, U64};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@ -1,5 +1,9 @@
|
||||
//! Engine API types:
|
||||
//! <https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md>
|
||||
//! and <https://eips.ethereum.org/EIPS/eip-3675>,
|
||||
//! following the execution specs <https://github.com/ethereum/execution-apis/tree/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine>.
|
||||
|
||||
#![allow(missing_docs)]
|
||||
//! Engine API types: <https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md> and <https://eips.ethereum.org/EIPS/eip-3675> following the execution specs <https://github.com/ethereum/execution-apis/tree/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine>
|
||||
|
||||
mod cancun;
|
||||
mod forkchoice;
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
//! Geth tracing types.
|
||||
|
||||
#![allow(missing_docs)]
|
||||
//! Geth tracing types
|
||||
|
||||
use crate::{state::StateOverride, BlockOverrides};
|
||||
use alloy_primitives::{Bytes, B256, U256};
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
#![allow(missing_docs)]
|
||||
//! The [`TransactionRequest`][crate::TransactionRequest] is a universal representation for a
|
||||
//! transaction deserialized from the json input of an RPC call. Depending on what fields are set,
|
||||
//! it can be converted into the container type [`TypedTransactionRequest`].
|
||||
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{
|
||||
eth::transaction::AccessList,
|
||||
kzg::{Blob, Bytes48},
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
//! MEV bundle type bindings
|
||||
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{BlockId, BlockNumberOrTag, Log};
|
||||
use alloy_primitives::{Address, Bytes, TxHash, B256, U256, U64};
|
||||
use serde::{
|
||||
|
||||
@ -69,7 +69,7 @@ impl NodeRecord {
|
||||
}
|
||||
|
||||
/// Creates a new record from a socket addr and peer id.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub fn new(addr: SocketAddr, id: PeerId) -> Self {
|
||||
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
//! Relay API bindings: <https://flashbots.github.io/relay-specs/>
|
||||
|
||||
#![allow(missing_docs)]
|
||||
//! Relay API bindings <https://flashbots.github.io/relay-specs/>
|
||||
|
||||
use crate::{
|
||||
beacon::{BlsPublicKey, BlsSignature},
|
||||
@ -318,7 +319,8 @@ mod tests {
|
||||
fn capella_bid_submission_ssz() {
|
||||
use ssz::{Decode, Encode};
|
||||
|
||||
let bytes = include_bytes!("../test_data/relay/signed_bid_submission_capella.ssz").to_vec();
|
||||
let bytes =
|
||||
include_bytes!("../../test_data/relay/signed_bid_submission_capella.ssz").to_vec();
|
||||
let bid = SignedBidSubmissionV2::from_ssz_bytes(&bytes).unwrap();
|
||||
assert_eq!(bytes, bid.as_ssz_bytes());
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ struct EthBundleInner<Eth> {
|
||||
/// Access to commonly used code of the `eth` namespace
|
||||
eth_api: Eth,
|
||||
// restrict the number of concurrent tracing calls.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
blocking_task_guard: BlockingTaskGuard,
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
#![allow(dead_code, unused_variables)]
|
||||
use crate::result::internal_rpc_err;
|
||||
use async_trait::async_trait;
|
||||
use jsonrpsee::core::RpcResult;
|
||||
@ -11,7 +10,7 @@ use reth_rpc_types::{
|
||||
|
||||
const API_LEVEL: u64 = 8;
|
||||
|
||||
/// Otterscan Api
|
||||
/// Otterscan API.
|
||||
#[derive(Debug)]
|
||||
pub struct OtterscanApi<Eth> {
|
||||
eth: Eth,
|
||||
@ -40,17 +39,17 @@ where
|
||||
}
|
||||
|
||||
/// Handler for `ots_getInternalOperations`
|
||||
async fn get_internal_operations(&self, tx_hash: TxHash) -> RpcResult<Vec<InternalOperation>> {
|
||||
async fn get_internal_operations(&self, _tx_hash: TxHash) -> RpcResult<Vec<InternalOperation>> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
|
||||
/// Handler for `ots_getTransactionError`
|
||||
async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult<String> {
|
||||
async fn get_transaction_error(&self, _tx_hash: TxHash) -> RpcResult<String> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
|
||||
/// Handler for `ots_traceTransaction`
|
||||
async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult<TraceEntry> {
|
||||
async fn trace_transaction(&self, _tx_hash: TxHash) -> RpcResult<TraceEntry> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
|
||||
@ -72,9 +71,9 @@ where
|
||||
/// Handler for `getBlockTransactions`
|
||||
async fn get_block_transactions(
|
||||
&self,
|
||||
block_number: BlockNumberOrTag,
|
||||
page_number: usize,
|
||||
page_size: usize,
|
||||
_block_number: BlockNumberOrTag,
|
||||
_page_number: usize,
|
||||
_page_size: usize,
|
||||
) -> RpcResult<OtsBlockTransactions> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
@ -82,9 +81,9 @@ where
|
||||
/// Handler for `searchTransactionsBefore`
|
||||
async fn search_transactions_before(
|
||||
&self,
|
||||
address: Address,
|
||||
block_number: BlockNumberOrTag,
|
||||
page_size: usize,
|
||||
_address: Address,
|
||||
_block_number: BlockNumberOrTag,
|
||||
_page_size: usize,
|
||||
) -> RpcResult<TransactionsWithReceipts> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
@ -92,9 +91,9 @@ where
|
||||
/// Handler for `searchTransactionsAfter`
|
||||
async fn search_transactions_after(
|
||||
&self,
|
||||
address: Address,
|
||||
block_number: BlockNumberOrTag,
|
||||
page_size: usize,
|
||||
_address: Address,
|
||||
_block_number: BlockNumberOrTag,
|
||||
_page_size: usize,
|
||||
) -> RpcResult<TransactionsWithReceipts> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
@ -102,14 +101,14 @@ where
|
||||
/// Handler for `getTransactionBySenderAndNonce`
|
||||
async fn get_transaction_by_sender_and_nonce(
|
||||
&self,
|
||||
sender: Address,
|
||||
nonce: u64,
|
||||
_sender: Address,
|
||||
_nonce: u64,
|
||||
) -> RpcResult<Option<Transaction>> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
|
||||
/// Handler for `getContractCreator`
|
||||
async fn get_contract_creator(&self, address: Address) -> RpcResult<Option<ContractCreator>> {
|
||||
async fn get_contract_creator(&self, _address: Address) -> RpcResult<Option<ContractCreator>> {
|
||||
Err(internal_rpc_err("unimplemented"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ pub enum PipelineError {
|
||||
Provider(#[from] ProviderError),
|
||||
/// The pipeline encountered an error while trying to send an event.
|
||||
#[error("pipeline encountered an error while trying to send an event")]
|
||||
Channel(#[from] SendError<PipelineEvent>),
|
||||
Channel(#[from] Box<SendError<PipelineEvent>>),
|
||||
/// The stage encountered an internal error.
|
||||
#[error(transparent)]
|
||||
Internal(Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
@ -64,7 +64,6 @@
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![allow(clippy::result_large_err)] // TODO(danipopes): fix this
|
||||
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
#![allow(unused)]
|
||||
use reth_primitives::stage::StageId;
|
||||
|
||||
#[cfg(test)]
|
||||
mod macros;
|
||||
#[cfg(test)]
|
||||
pub(crate) use macros::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod runner;
|
||||
#[cfg(test)]
|
||||
pub(crate) use runner::{
|
||||
ExecuteStageTestRunner, StageTestRunner, TestRunnerError, UnwindStageTestRunner,
|
||||
};
|
||||
|
||||
@ -2,9 +2,8 @@ use super::TestStageDB;
|
||||
use crate::{ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput};
|
||||
use reth_db::{test_utils::TempDatabase, DatabaseEnv};
|
||||
use reth_interfaces::db::DatabaseError;
|
||||
use reth_primitives::MAINNET;
|
||||
use reth_provider::{ProviderError, ProviderFactory};
|
||||
use std::{borrow::Borrow, sync::Arc};
|
||||
use reth_provider::ProviderError;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
|
||||
@ -3,25 +3,19 @@ use reth_db::{
|
||||
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
|
||||
database::Database,
|
||||
models::{AccountBeforeTx, StoredBlockBodyIndices},
|
||||
table::{Table, TableRow},
|
||||
table::Table,
|
||||
tables,
|
||||
test_utils::{create_test_rw_db, create_test_rw_db_with_path, TempDatabase},
|
||||
transaction::{DbTx, DbTxMut},
|
||||
DatabaseEnv, DatabaseError as DbError,
|
||||
};
|
||||
use reth_interfaces::{provider::ProviderResult, test_utils::generators::ChangeSet, RethResult};
|
||||
use reth_interfaces::{provider::ProviderResult, test_utils::generators::ChangeSet};
|
||||
use reth_primitives::{
|
||||
keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StorageEntry,
|
||||
TxHash, TxNumber, B256, MAINNET, U256,
|
||||
};
|
||||
use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, HistoryWriter, ProviderFactory};
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::BTreeMap,
|
||||
ops::{Deref, RangeInclusive},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use reth_provider::{HistoryWriter, ProviderFactory};
|
||||
use std::{collections::BTreeMap, path::Path, sync::Arc};
|
||||
|
||||
/// Test database that is used for testing stage implementations.
|
||||
#[derive(Debug)]
|
||||
@ -46,7 +40,7 @@ impl TestStageDB {
|
||||
where
|
||||
F: FnOnce(&<DatabaseEnv as Database>::TXMut) -> ProviderResult<()>,
|
||||
{
|
||||
let mut tx = self.factory.provider_rw()?;
|
||||
let tx = self.factory.provider_rw()?;
|
||||
f(tx.tx_ref())?;
|
||||
tx.commit().expect("failed to commit");
|
||||
Ok(())
|
||||
@ -245,18 +239,20 @@ impl TestStageDB {
|
||||
let hashed_entry = StorageEntry { key: keccak256(entry.key), ..entry };
|
||||
|
||||
let mut cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
|
||||
if let Some(e) = cursor
|
||||
if cursor
|
||||
.seek_by_key_subkey(address, entry.key)?
|
||||
.filter(|e| e.key == entry.key)
|
||||
.is_some()
|
||||
{
|
||||
cursor.delete_current()?;
|
||||
}
|
||||
cursor.upsert(address, entry)?;
|
||||
|
||||
let mut cursor = tx.cursor_dup_write::<tables::HashedStorage>()?;
|
||||
if let Some(e) = cursor
|
||||
if cursor
|
||||
.seek_by_key_subkey(hashed_address, hashed_entry.key)?
|
||||
.filter(|e| e.key == hashed_entry.key)
|
||||
.is_some()
|
||||
{
|
||||
cursor.delete_current()?;
|
||||
}
|
||||
@ -299,7 +295,7 @@ impl TestStageDB {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn insert_history<I>(&self, changesets: I, block_offset: Option<u64>) -> ProviderResult<()>
|
||||
pub fn insert_history<I>(&self, changesets: I, _block_offset: Option<u64>) -> ProviderResult<()>
|
||||
where
|
||||
I: IntoIterator<Item = ChangeSet>,
|
||||
{
|
||||
|
||||
@ -5,8 +5,6 @@
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
// TODO(danipopes): add these warnings
|
||||
// #![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
#![allow(clippy::non_canonical_clone_impl)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
@ -1,14 +1,13 @@
|
||||
#![allow(dead_code, unused_imports, non_snake_case)]
|
||||
|
||||
use criterion::{
|
||||
black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
|
||||
};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use reth_db::{
|
||||
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
|
||||
table::{Decode, Decompress, DupSort},
|
||||
tables::*,
|
||||
transaction::DbTx,
|
||||
};
|
||||
use std::time::Instant;
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
#![allow(dead_code, unused_imports, non_snake_case)]
|
||||
|
||||
use criterion::{
|
||||
black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
|
||||
};
|
||||
@ -10,12 +8,8 @@ use proptest::{
|
||||
strategy::{Strategy, ValueTree},
|
||||
test_runner::TestRunner,
|
||||
};
|
||||
use reth_db::{
|
||||
cursor::{DbCursorRW, DbDupCursorRO, DbDupCursorRW},
|
||||
TxHashNumber,
|
||||
};
|
||||
use std::{collections::HashSet, time::Instant};
|
||||
use test_fuzz::runtime::num_traits::Zero;
|
||||
use reth_db::{cursor::DbCursorRW, TxHashNumber};
|
||||
use std::collections::HashSet;
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
@ -216,6 +210,7 @@ where
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
struct TableStats {
|
||||
page_size: usize,
|
||||
leaf_pages: usize,
|
||||
|
||||
@ -1,46 +1,60 @@
|
||||
#![allow(dead_code, unused_imports, non_snake_case)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use iai::main;
|
||||
use paste::paste;
|
||||
use reth_db::table::{Compress, Decode, Decompress, Encode, Table};
|
||||
|
||||
mod utils;
|
||||
use utils::*;
|
||||
|
||||
macro_rules! impl_iai_inner {
|
||||
(
|
||||
$(($name:tt, $mod:tt, $compress:tt, $decompress:tt, $encode:tt, $decode:tt, $seqread:tt, $randread:tt, $seqwrite:tt, $randwrite:tt))+
|
||||
$(($name:ident, $mod:ident, $compress:ident, $decompress:ident, $encode:ident, $decode:ident, $seqread:ident, $randread:ident, $seqwrite:ident, $randwrite:ident))+
|
||||
) => {
|
||||
$(
|
||||
mod $mod {
|
||||
use iai::{black_box};
|
||||
include!("./utils.rs");
|
||||
$(
|
||||
mod $mod {
|
||||
use super::*;
|
||||
use std::hint::black_box;
|
||||
|
||||
pub fn $compress() {
|
||||
for (_, _, v, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
black_box(v.compress());
|
||||
}
|
||||
pub fn $compress() {
|
||||
for (_, _, v, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
black_box(v.compress());
|
||||
}
|
||||
pub fn $decompress() {
|
||||
for (_, _, _, comp) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
let _ = black_box(<reth_db::tables::$name as Table>::Value::decompress(comp));
|
||||
}
|
||||
}
|
||||
pub fn $encode() {
|
||||
for (k, _, _, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
black_box(k.encode());
|
||||
}
|
||||
}
|
||||
pub fn $decode() {
|
||||
for (_, enc, _, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
let _ = black_box(<reth_db::tables::$name as Table>::Key::decode(enc));
|
||||
}
|
||||
}
|
||||
pub fn $seqread() {}
|
||||
pub fn $randread() {}
|
||||
pub fn $seqwrite() {}
|
||||
pub fn $randwrite() {}
|
||||
}
|
||||
use $mod::*;
|
||||
)+
|
||||
|
||||
main!(
|
||||
pub fn $decompress() {
|
||||
for (_, _, _, comp) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
let _ = black_box(<reth_db::tables::$name as Table>::Value::decompress(comp));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn $encode() {
|
||||
for (k, _, _, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
black_box(k.encode());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn $decode() {
|
||||
for (_, enc, _, _) in black_box(load_vectors::<reth_db::tables::$name>()) {
|
||||
let _ = black_box(<reth_db::tables::$name as Table>::Key::decode(enc));
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn $seqread() {}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn $randread() {}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn $seqwrite() {}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn $randwrite() {}
|
||||
}
|
||||
use $mod::*;
|
||||
)+
|
||||
|
||||
iai::main!(
|
||||
$(
|
||||
$compress,
|
||||
$decompress,
|
||||
@ -52,7 +66,7 @@ macro_rules! impl_iai_inner {
|
||||
}
|
||||
|
||||
macro_rules! impl_iai {
|
||||
($($name:tt),+) => {
|
||||
($($name:ident),+) => {
|
||||
paste! {
|
||||
impl_iai_inner!(
|
||||
$(
|
||||
|
||||
@ -1,9 +1,8 @@
|
||||
#[allow(unused_imports)]
|
||||
use reth_db::{
|
||||
database::Database,
|
||||
table::*,
|
||||
table::{Compress, Encode, Table, TableRow},
|
||||
test_utils::create_test_rw_db_with_path,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
transaction::DbTxMut,
|
||||
DatabaseEnv,
|
||||
};
|
||||
use reth_primitives::{fs, Bytes};
|
||||
@ -19,7 +18,7 @@ const RANDOM_INDEXES: [usize; 10] = [23, 2, 42, 5, 3, 99, 54, 0, 33, 64];
|
||||
|
||||
/// Returns bench vectors in the format: `Vec<(Key, EncodedKey, Value, CompressedValue)>`.
|
||||
#[allow(dead_code)]
|
||||
fn load_vectors<T: reth_db::table::Table>() -> Vec<(T::Key, Bytes, T::Value, Bytes)>
|
||||
pub(crate) fn load_vectors<T: Table>() -> Vec<(T::Key, Bytes, T::Value, Bytes)>
|
||||
where
|
||||
T: Default,
|
||||
T::Key: Default + Clone + for<'de> serde::Deserialize<'de>,
|
||||
@ -50,7 +49,7 @@ where
|
||||
/// Sets up a clear database at `bench_db_path`.
|
||||
#[allow(clippy::ptr_arg)]
|
||||
#[allow(dead_code)]
|
||||
fn set_up_db<T>(
|
||||
pub(crate) fn set_up_db<T>(
|
||||
bench_db_path: &Path,
|
||||
pair: &Vec<(<T as Table>::Key, Bytes, <T as Table>::Value, Bytes)>,
|
||||
) -> DatabaseEnv
|
||||
|
||||
@ -15,7 +15,7 @@ use core::ops::Bound;
|
||||
use std::{collections::BTreeMap, ops::RangeBounds};
|
||||
|
||||
/// Mock database used for testing with inner BTreeMap structure
|
||||
/// TODO
|
||||
// TODO
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DatabaseMock {
|
||||
/// Main data. TODO (Make it table aware)
|
||||
|
||||
@ -66,11 +66,11 @@ macro_rules! impl_fuzzer_key {
|
||||
|
||||
/// Fuzzer generates a random instance of the object and proceeds to compress and decompress it. It
|
||||
/// then makes sure that it matches the original object.
|
||||
#[allow(unused)]
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! impl_fuzzer_value {
|
||||
($($name:tt),+) => {
|
||||
$(
|
||||
impl_fuzzer_with_input!(($name, $name, Compress, compress, Decompress, decompress));
|
||||
impl_fuzzer_value_with_input!($name, $name);
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
@ -1,9 +1,8 @@
|
||||
//! Integrates different codecs into table::Encode and table::Decode
|
||||
//! Integrates different codecs into `table::Encode` and `table::Decode`.
|
||||
|
||||
mod compact;
|
||||
pub use compact::CompactU256;
|
||||
|
||||
pub mod fuzz;
|
||||
|
||||
mod postcard;
|
||||
mod scale;
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
#![allow(unused)]
|
||||
|
||||
use crate::{
|
||||
table::{Decode, Encode},
|
||||
DatabaseError,
|
||||
};
|
||||
use postcard::{from_bytes, to_allocvec, to_vec};
|
||||
use reth_primitives::*;
|
||||
|
||||
// Just add `Serialize` and `Deserialize`, and set impl_heapless_postcard!(T, MaxSize(T))
|
||||
//
|
||||
//
|
||||
// use serde::{Deserialize, Serialize};
|
||||
//
|
||||
// #[derive(Serialize, Deserialize )]
|
||||
// pub struct T {
|
||||
// }
|
||||
//
|
||||
// impl_heapless_postcard!(T, MaxSize(T))
|
||||
macro_rules! impl_postcard {
|
||||
($($name:tt),+) => {
|
||||
$(
|
||||
impl Encode for $name {
|
||||
type Encoded = Vec<u8>;
|
||||
|
||||
fn encode(self) -> Self::Encoded {
|
||||
to_allocvec(&self).expect("Failed to encode")
|
||||
}
|
||||
}
|
||||
|
||||
impl Decode for $name {
|
||||
fn decode<B: Into<Bytes>>(value: B) -> Result<Self, Error> {
|
||||
from_bytes(&value.into()).map_err(|e| Error::Decode(e.into()))
|
||||
}
|
||||
}
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
type VecU8 = Vec<u8>;
|
||||
|
||||
//#[cfg(feature = "bench-postcard")]
|
||||
//impl_postcard!(VecU8, Receipt, B256, U256, Address, u8, u16, u64, Header, Account, Log, TxType);
|
||||
@ -291,12 +291,12 @@ impl EnvironmentKind {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct TxnPtr(pub *mut ffi::MDBX_txn);
|
||||
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
|
||||
unsafe impl Send for TxnPtr {}
|
||||
unsafe impl Sync for TxnPtr {}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct EnvPtr(pub *mut ffi::MDBX_env);
|
||||
pub(crate) struct EnvPtr(pub(crate) *mut ffi::MDBX_env);
|
||||
unsafe impl Send for EnvPtr {}
|
||||
unsafe impl Sync for EnvPtr {}
|
||||
|
||||
@ -309,6 +309,7 @@ pub(crate) enum TxnManagerMessage {
|
||||
/// Environment statistics.
|
||||
///
|
||||
/// Contains information about the size and layout of an MDBX environment or database.
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
pub struct Stat(ffi::MDBX_stat);
|
||||
|
||||
@ -362,6 +363,7 @@ impl Stat {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
pub struct GeometryInfo(ffi::MDBX_envinfo__bindgen_ty_1);
|
||||
|
||||
@ -374,6 +376,7 @@ impl GeometryInfo {
|
||||
/// Environment information.
|
||||
///
|
||||
/// Contains environment information about the map size, readers, last txn id etc.
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
pub struct Info(ffi::MDBX_envinfo);
|
||||
|
||||
|
||||
@ -174,7 +174,7 @@ impl fmt::Display for Error {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn mdbx_result(err_code: c_int) -> Result<bool> {
|
||||
pub(crate) fn mdbx_result(err_code: c_int) -> Result<bool> {
|
||||
match err_code {
|
||||
ffi::MDBX_SUCCESS => Ok(false),
|
||||
ffi::MDBX_RESULT_TRUE => Ok(true),
|
||||
|
||||
@ -4,9 +4,7 @@
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
// TODO(danipopes): add these warnings
|
||||
// #![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![warn(missing_debug_implementations, unreachable_pub, rustdoc::all)] // TODO(danipopes): missing_docs
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
|
||||
@ -545,6 +545,7 @@ impl TransactionPtr {
|
||||
///
|
||||
/// Contains information about latency of commit stages.
|
||||
/// Inner struct stores this info in 1/65536 of seconds units.
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
pub struct CommitLatency(ffi::MDBX_commit_latency);
|
||||
|
||||
|
||||
@ -237,7 +237,7 @@ impl Compression for Zstd {
|
||||
mod dictionaries_serde {
|
||||
use super::*;
|
||||
|
||||
pub fn serialize<S>(
|
||||
pub(crate) fn serialize<S>(
|
||||
dictionaries: &Option<Arc<ZstdDictionaries<'static>>>,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
@ -250,7 +250,7 @@ mod dictionaries_serde {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
pub(crate) fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Option<Arc<ZstdDictionaries<'static>>>, D::Error>
|
||||
where
|
||||
@ -264,7 +264,7 @@ mod dictionaries_serde {
|
||||
/// List of [`ZstdDictionary`]
|
||||
#[cfg_attr(test, derive(PartialEq))]
|
||||
#[derive(Serialize, Deserialize, Deref)]
|
||||
pub struct ZstdDictionaries<'a>(Vec<ZstdDictionary<'a>>);
|
||||
pub(crate) struct ZstdDictionaries<'a>(Vec<ZstdDictionary<'a>>);
|
||||
|
||||
impl<'a> std::fmt::Debug for ZstdDictionaries<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
@ -274,12 +274,12 @@ impl<'a> std::fmt::Debug for ZstdDictionaries<'a> {
|
||||
|
||||
impl<'a> ZstdDictionaries<'a> {
|
||||
/// Creates [`ZstdDictionaries`].
|
||||
pub fn new(raw: Vec<RawDictionary>) -> Self {
|
||||
pub(crate) fn new(raw: Vec<RawDictionary>) -> Self {
|
||||
Self(raw.into_iter().map(ZstdDictionary::Raw).collect())
|
||||
}
|
||||
|
||||
/// Loads a list [`RawDictionary`] into a list of [`ZstdDictionary::Loaded`].
|
||||
pub fn load(raw: Vec<RawDictionary>) -> Self {
|
||||
pub(crate) fn load(raw: Vec<RawDictionary>) -> Self {
|
||||
Self(
|
||||
raw.into_iter()
|
||||
.map(|dict| ZstdDictionary::Loaded(DecoderDictionary::copy(&dict)))
|
||||
@ -288,7 +288,7 @@ impl<'a> ZstdDictionaries<'a> {
|
||||
}
|
||||
|
||||
/// Creates a list of decompressors from a list of [`ZstdDictionary::Loaded`].
|
||||
pub fn decompressors(&self) -> Result<Vec<Decompressor<'_>>, NippyJarError> {
|
||||
pub(crate) fn decompressors(&self) -> Result<Vec<Decompressor<'_>>, NippyJarError> {
|
||||
Ok(self
|
||||
.iter()
|
||||
.flat_map(|dict| {
|
||||
@ -300,7 +300,7 @@ impl<'a> ZstdDictionaries<'a> {
|
||||
}
|
||||
|
||||
/// Creates a list of compressors from a list of [`ZstdDictionary::Raw`].
|
||||
pub fn compressors(&self) -> Result<Vec<Compressor<'_>>, NippyJarError> {
|
||||
pub(crate) fn compressors(&self) -> Result<Vec<Compressor<'_>>, NippyJarError> {
|
||||
Ok(self
|
||||
.iter()
|
||||
.flat_map(|dict| {
|
||||
@ -314,14 +314,14 @@ impl<'a> ZstdDictionaries<'a> {
|
||||
|
||||
/// A Zstd dictionary. It's created and serialized with [`ZstdDictionary::Raw`], and deserialized as
|
||||
/// [`ZstdDictionary::Loaded`].
|
||||
pub enum ZstdDictionary<'a> {
|
||||
pub(crate) enum ZstdDictionary<'a> {
|
||||
Raw(RawDictionary),
|
||||
Loaded(DecoderDictionary<'a>),
|
||||
}
|
||||
|
||||
impl<'a> ZstdDictionary<'a> {
|
||||
/// Returns a reference to the expected `RawDictionary`
|
||||
pub fn raw(&self) -> Option<&RawDictionary> {
|
||||
pub(crate) fn raw(&self) -> Option<&RawDictionary> {
|
||||
match self {
|
||||
ZstdDictionary::Raw(dict) => Some(dict),
|
||||
ZstdDictionary::Loaded(_) => None,
|
||||
@ -329,7 +329,7 @@ impl<'a> ZstdDictionary<'a> {
|
||||
}
|
||||
|
||||
/// Returns a reference to the expected `DecoderDictionary`
|
||||
pub fn loaded(&self) -> Option<&DecoderDictionary<'_>> {
|
||||
pub(crate) fn loaded(&self) -> Option<&DecoderDictionary<'_>> {
|
||||
match self {
|
||||
ZstdDictionary::Raw(_) => None,
|
||||
ZstdDictionary::Loaded(dict) => Some(dict),
|
||||
|
||||
@ -5,8 +5,7 @@
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
// TODO(danipopes): add these warnings
|
||||
// #![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
|
||||
#![warn(missing_debug_implementations, unreachable_pub, rustdoc::all)] // TODO(danipopes): missing_docs
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
@ -438,12 +437,12 @@ impl<H: NippyJarHeader> PerfectHashingFunction for NippyJar<H> {
|
||||
#[derive(Debug)]
|
||||
pub struct DataReader {
|
||||
/// Data file descriptor. Needs to be kept alive as long as `data_mmap` handle.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
data_file: File,
|
||||
/// Mmap handle for data.
|
||||
data_mmap: Mmap,
|
||||
/// Offset file descriptor. Needs to be kept alive as long as `offset_mmap` handle.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
offset_file: File,
|
||||
/// Mmap handle for offsets.
|
||||
offset_mmap: Mmap,
|
||||
@ -805,7 +804,7 @@ mod tests {
|
||||
let block_start = 500;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct BlockJarHeader {
|
||||
struct BlockJarHeader {
|
||||
block_start: usize,
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
use crate::{compression::Compression, ColumnResult, NippyJar, NippyJarError, NippyJarHeader};
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
fmt,
|
||||
fs::{File, OpenOptions},
|
||||
io::{Read, Seek, SeekFrom, Write},
|
||||
path::Path,
|
||||
@ -40,6 +41,12 @@ pub struct NippyJarWriter<'a, H> {
|
||||
column: usize,
|
||||
}
|
||||
|
||||
impl<H> fmt::Debug for NippyJarWriter<'_, H> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NippyJarWriter").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, H: NippyJarHeader> NippyJarWriter<'a, H> {
|
||||
pub fn new(jar: &'a mut NippyJar<H>) -> Result<Self, NippyJarError> {
|
||||
let (data_file, offsets_file, is_created) =
|
||||
|
||||
@ -71,7 +71,7 @@ impl ChainInfoTracker {
|
||||
}
|
||||
|
||||
/// Returns the canonical head of the chain.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn get_canonical_num_hash(&self) -> BlockNumHash {
|
||||
self.inner.canonical_head.read().num_hash()
|
||||
}
|
||||
@ -82,14 +82,14 @@ impl ChainInfoTracker {
|
||||
}
|
||||
|
||||
/// Returns the safe header of the chain.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn get_safe_num_hash(&self) -> Option<BlockNumHash> {
|
||||
let h = self.inner.safe_block.read();
|
||||
h.as_ref().map(|h| h.num_hash())
|
||||
}
|
||||
|
||||
/// Returns the finalized header of the chain.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn get_finalized_num_hash(&self) -> Option<BlockNumHash> {
|
||||
let h = self.inner.finalized_block.read();
|
||||
h.as_ref().map(|h| h.num_hash())
|
||||
|
||||
@ -103,7 +103,7 @@ pub struct DatabaseProvider<TX> {
|
||||
/// Chain spec
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
/// Snapshot provider
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
snapshot_provider: Option<Arc<SnapshotProvider>>,
|
||||
}
|
||||
|
||||
|
||||
@ -353,7 +353,7 @@ mod tests {
|
||||
const STORAGE: B256 = b256!("0000000000000000000000000000000000000000000000000000000000000001");
|
||||
|
||||
fn assert_state_provider<T: StateProvider>() {}
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
fn assert_historical_state_provider<T: DbTx>() {
|
||||
assert_state_provider::<HistoricalStateProvider<T>>();
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_state_provider<T: StateProvider>() {}
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
fn assert_latest_state_provider<T: DbTx>() {
|
||||
assert_state_provider::<LatestStateProvider<T>>();
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ impl BlobStoreSize {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
struct DynStore {
|
||||
store: Box<dyn BlobStore>,
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ pub(crate) struct SenderIdentifiers {
|
||||
|
||||
impl SenderIdentifiers {
|
||||
/// Returns the address for the given identifier.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn address(&self, id: &SenderId) -> Option<&Address> {
|
||||
self.sender_to_address.get(id)
|
||||
}
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
#![allow(dead_code, unused)]
|
||||
use crate::{
|
||||
identifier::TransactionId, pool::size::SizeTracker, traits::BestTransactionsAttributes,
|
||||
PoolTransaction, SubPoolLimit, ValidPoolTransaction,
|
||||
@ -85,7 +84,7 @@ impl<T: PoolTransaction> BlobTransactions<T> {
|
||||
/// Returns all transactions that satisfy the given basefee and blob_fee.
|
||||
pub(crate) fn satisfy_attributes(
|
||||
&self,
|
||||
best_transactions_attributes: BestTransactionsAttributes,
|
||||
_best_transactions_attributes: BestTransactionsAttributes,
|
||||
) -> Vec<Arc<ValidPoolTransaction<T>>> {
|
||||
Vec::new()
|
||||
}
|
||||
@ -102,7 +101,7 @@ impl<T: PoolTransaction> BlobTransactions<T> {
|
||||
|
||||
/// Returns whether the pool is empty
|
||||
#[cfg(test)]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.by_id.is_empty()
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ impl<T: ParkedOrd> ParkedPool<T> {
|
||||
|
||||
/// Returns whether the pool is empty
|
||||
#[cfg(test)]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.by_id.is_empty()
|
||||
}
|
||||
|
||||
@ -833,7 +833,7 @@ impl<T: TransactionOrdering> Drop for TxPool<T> {
|
||||
|
||||
// Additional test impls
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
#[allow(missing_docs)]
|
||||
#[allow(dead_code)]
|
||||
impl<T: TransactionOrdering> TxPool<T> {
|
||||
pub(crate) fn pending(&self) -> &PendingPool<T> {
|
||||
&self.pending_pool
|
||||
@ -897,7 +897,7 @@ impl<T: PoolTransaction> AllTransactions<T> {
|
||||
}
|
||||
|
||||
/// Returns an iterator over all _unique_ hashes in the pool
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn hashes_iter(&self) -> impl Iterator<Item = TxHash> + '_ {
|
||||
self.by_hash.keys().copied()
|
||||
}
|
||||
@ -1142,7 +1142,7 @@ impl<T: PoolTransaction> AllTransactions<T> {
|
||||
/// Returns a mutable iterator over all transactions for the given sender, starting with the
|
||||
/// lowest nonce
|
||||
#[cfg(test)]
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn txs_iter_mut(
|
||||
&mut self,
|
||||
sender: SenderId,
|
||||
@ -1656,7 +1656,7 @@ pub(crate) type InsertResult<T> = Result<InsertOk<T>, InsertErr<T>>;
|
||||
pub(crate) enum InsertErr<T: PoolTransaction> {
|
||||
/// Attempted to replace existing transaction, but was underpriced
|
||||
Underpriced {
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
transaction: Arc<ValidPoolTransaction<T>>,
|
||||
existing: TxHash,
|
||||
},
|
||||
@ -1691,7 +1691,7 @@ pub(crate) struct InsertOk<T: PoolTransaction> {
|
||||
/// Where to move the transaction to.
|
||||
move_to: SubPool,
|
||||
/// Current state of the inserted tx.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
state: TxState,
|
||||
/// The transaction that was replaced by this.
|
||||
replaced_tx: Option<(Arc<ValidPoolTransaction<T>>, SubPool)>,
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{
|
||||
test_utils::{MockTransactionFactory, MockValidTx},
|
||||
EthPooledTransaction,
|
||||
};
|
||||
use crate::EthPooledTransaction;
|
||||
use rand::Rng;
|
||||
use reth_primitives::{
|
||||
constants::MIN_PROTOCOL_BASE_FEE, sign_message, AccessList, Address, Bytes,
|
||||
@ -26,7 +23,7 @@ impl<R: Rng> TransactionGenerator<R> {
|
||||
}
|
||||
|
||||
/// Generates random random signers
|
||||
pub fn with_num_signers(mut rng: R, num_signers: usize) -> Self {
|
||||
pub fn with_num_signers(rng: R, num_signers: usize) -> Self {
|
||||
let mut signer_keys = Vec::with_capacity(num_signers);
|
||||
for _ in 0..num_signers {
|
||||
signer_keys.push(B256::random());
|
||||
@ -118,10 +115,10 @@ impl TransactionBuilder {
|
||||
nonce,
|
||||
gas_limit,
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
max_priority_fee_per_gas: _,
|
||||
to,
|
||||
value,
|
||||
access_list,
|
||||
access_list: _,
|
||||
input,
|
||||
} = self;
|
||||
let tx: Transaction = TxLegacy {
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
//! Mock Types
|
||||
//! Mock types.
|
||||
|
||||
#![allow(dead_code, unused_macros)]
|
||||
|
||||
use crate::{
|
||||
identifier::{SenderIdentifiers, TransactionId},
|
||||
@ -13,11 +15,11 @@ use rand::{
|
||||
};
|
||||
use reth_primitives::{
|
||||
constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE},
|
||||
hex, AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction,
|
||||
AccessList, Address, BlobTransactionSidecar, Bytes, FromRecoveredPooledTransaction,
|
||||
FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered,
|
||||
Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered,
|
||||
TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, TxValue, B256, EIP1559_TX_TYPE_ID,
|
||||
EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U128, U256,
|
||||
TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, B256, EIP1559_TX_TYPE_ID,
|
||||
EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256,
|
||||
};
|
||||
use std::{ops::Range, sync::Arc, time::Instant};
|
||||
|
||||
@ -88,6 +90,7 @@ macro_rules! set_value {
|
||||
*$field = new_value;
|
||||
}
|
||||
#[cfg(feature = "optimism")]
|
||||
#[allow(unused_variables)]
|
||||
MockTransaction::Deposit(ref mut tx) => {
|
||||
op_set_value!(tx, $this, new_value);
|
||||
}
|
||||
@ -104,6 +107,7 @@ macro_rules! get_value {
|
||||
MockTransaction::Eip4844 { $field, .. } => $field.clone(),
|
||||
MockTransaction::Eip2930 { $field, .. } => $field.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
#[allow(unused_variables)]
|
||||
MockTransaction::Deposit(tx) => {
|
||||
op_get_value!(tx, $field)
|
||||
}
|
||||
@ -311,8 +315,8 @@ impl MockTransaction {
|
||||
|
||||
/// Sets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844)
|
||||
pub fn set_priority_fee(&mut self, val: u128) -> &mut Self {
|
||||
if let (MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_priority_fee_per_gas, .. }) = self
|
||||
if let MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } = self
|
||||
{
|
||||
*max_priority_fee_per_gas = val;
|
||||
}
|
||||
@ -325,8 +329,8 @@ impl MockTransaction {
|
||||
}
|
||||
|
||||
pub fn get_priority_fee(&self) -> Option<u128> {
|
||||
if let (MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_priority_fee_per_gas, .. }) = self
|
||||
if let MockTransaction::Eip1559 { max_priority_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_priority_fee_per_gas, .. } = self
|
||||
{
|
||||
Some(*max_priority_fee_per_gas)
|
||||
} else {
|
||||
@ -335,8 +339,8 @@ impl MockTransaction {
|
||||
}
|
||||
|
||||
pub fn set_max_fee(&mut self, val: u128) -> &mut Self {
|
||||
if let (MockTransaction::Eip1559 { max_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_fee_per_gas, .. }) = self
|
||||
if let MockTransaction::Eip1559 { max_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_fee_per_gas, .. } = self
|
||||
{
|
||||
*max_fee_per_gas = val;
|
||||
}
|
||||
@ -349,8 +353,8 @@ impl MockTransaction {
|
||||
}
|
||||
|
||||
pub fn get_max_fee(&self) -> Option<u128> {
|
||||
if let (MockTransaction::Eip1559 { max_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_fee_per_gas, .. }) = self
|
||||
if let MockTransaction::Eip1559 { max_fee_per_gas, .. } |
|
||||
MockTransaction::Eip4844 { max_fee_per_gas, .. } = self
|
||||
{
|
||||
Some(*max_fee_per_gas)
|
||||
} else {
|
||||
@ -439,30 +443,30 @@ impl MockTransaction {
|
||||
|
||||
/// Returns a clone with a decreased nonce
|
||||
pub fn prev(&self) -> Self {
|
||||
let mut next = self.clone().with_hash(B256::random());
|
||||
let next = self.clone().with_hash(B256::random());
|
||||
next.with_nonce(self.get_nonce() - 1)
|
||||
}
|
||||
|
||||
/// Returns a clone with an increased nonce
|
||||
pub fn next(&self) -> Self {
|
||||
let mut next = self.clone().with_hash(B256::random());
|
||||
let next = self.clone().with_hash(B256::random());
|
||||
next.with_nonce(self.get_nonce() + 1)
|
||||
}
|
||||
|
||||
/// Returns a clone with an increased nonce
|
||||
pub fn skip(&self, skip: u64) -> Self {
|
||||
let mut next = self.clone().with_hash(B256::random());
|
||||
let next = self.clone().with_hash(B256::random());
|
||||
next.with_nonce(self.get_nonce() + skip + 1)
|
||||
}
|
||||
|
||||
/// Returns a clone with incremented nonce
|
||||
pub fn inc_nonce(mut self) -> Self {
|
||||
pub fn inc_nonce(self) -> Self {
|
||||
let nonce = self.get_nonce() + 1;
|
||||
self.with_nonce(nonce)
|
||||
}
|
||||
|
||||
/// Sets a new random hash
|
||||
pub fn rng_hash(mut self) -> Self {
|
||||
pub fn rng_hash(self) -> Self {
|
||||
self.with_hash(B256::random())
|
||||
}
|
||||
|
||||
@ -473,7 +477,7 @@ impl MockTransaction {
|
||||
|
||||
/// Returns a new transaction with a higher gas price
|
||||
pub fn inc_price_by(&self, value: u128) -> Self {
|
||||
let mut next = self.clone();
|
||||
let next = self.clone();
|
||||
let gas = self.get_gas_price().checked_add(value).unwrap();
|
||||
next.with_gas_price(gas)
|
||||
}
|
||||
@ -485,21 +489,21 @@ impl MockTransaction {
|
||||
|
||||
/// Returns a new transaction with a lower gas price
|
||||
pub fn decr_price_by(&self, value: u128) -> Self {
|
||||
let mut next = self.clone();
|
||||
let next = self.clone();
|
||||
let gas = self.get_gas_price().checked_sub(value).unwrap();
|
||||
next.with_gas_price(gas)
|
||||
}
|
||||
|
||||
/// Returns a new transaction with a higher value
|
||||
pub fn inc_value(&self) -> Self {
|
||||
let mut next = self.clone();
|
||||
let next = self.clone();
|
||||
let val = self.get_value().checked_add(U256::from(1)).unwrap();
|
||||
next.with_value(val)
|
||||
}
|
||||
|
||||
/// Returns a new transaction with a higher gas limit
|
||||
pub fn inc_limit(&self) -> Self {
|
||||
let mut next = self.clone();
|
||||
let next = self.clone();
|
||||
let gas = self.get_gas_limit() + 1;
|
||||
next.with_gas_limit(gas)
|
||||
}
|
||||
@ -850,8 +854,8 @@ impl From<MockTransaction> for Transaction {
|
||||
fn from(mock: MockTransaction) -> Self {
|
||||
match mock {
|
||||
MockTransaction::Legacy {
|
||||
hash,
|
||||
sender,
|
||||
hash: _,
|
||||
sender: _,
|
||||
nonce,
|
||||
gas_price,
|
||||
gas_limit,
|
||||
@ -868,8 +872,8 @@ impl From<MockTransaction> for Transaction {
|
||||
input: input.clone(),
|
||||
}),
|
||||
MockTransaction::Eip1559 {
|
||||
hash,
|
||||
sender,
|
||||
hash: _,
|
||||
sender: _,
|
||||
nonce,
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
@ -891,7 +895,7 @@ impl From<MockTransaction> for Transaction {
|
||||
}),
|
||||
MockTransaction::Eip4844 {
|
||||
hash,
|
||||
sender,
|
||||
sender: _,
|
||||
nonce,
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
@ -901,7 +905,7 @@ impl From<MockTransaction> for Transaction {
|
||||
value,
|
||||
accesslist,
|
||||
input,
|
||||
sidecar,
|
||||
sidecar: _,
|
||||
} => Self::Eip4844(TxEip4844 {
|
||||
chain_id: 1,
|
||||
nonce,
|
||||
@ -916,8 +920,8 @@ impl From<MockTransaction> for Transaction {
|
||||
input,
|
||||
}),
|
||||
MockTransaction::Eip2930 {
|
||||
hash,
|
||||
sender,
|
||||
hash: _,
|
||||
sender: _,
|
||||
nonce,
|
||||
to,
|
||||
gas_limit,
|
||||
@ -1025,6 +1029,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction {
|
||||
// performance just use a default sidecar
|
||||
sidecar: BlobTransactionSidecar::default(),
|
||||
},
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => unimplemented!(),
|
||||
})
|
||||
.boxed()
|
||||
@ -1155,7 +1160,7 @@ impl MockTransactionSet {
|
||||
let mut txs = Vec::with_capacity(tx_count);
|
||||
let mut curr_tx = MockTransaction::new_from_type(tx_type).with_nonce(from_nonce);
|
||||
for i in 0..tx_count {
|
||||
let nonce = from_nonce + i as u64;
|
||||
let _nonce = from_nonce + i as u64;
|
||||
curr_tx = curr_tx.next().with_sender(sender);
|
||||
txs.push(curr_tx.clone());
|
||||
}
|
||||
|
||||
@ -1,17 +1,16 @@
|
||||
//! Internal helpers for testing.
|
||||
#![allow(missing_docs, unused, missing_debug_implementations, unreachable_pub)]
|
||||
|
||||
#![allow(missing_docs, missing_debug_implementations)]
|
||||
|
||||
use crate::{blobstore::InMemoryBlobStore, noop::MockTransactionValidator, Pool};
|
||||
|
||||
mod gen;
|
||||
mod mock;
|
||||
mod pool;
|
||||
|
||||
use crate::{
|
||||
blobstore::InMemoryBlobStore, noop::MockTransactionValidator, Pool, PoolTransaction,
|
||||
TransactionOrigin, TransactionValidationOutcome, TransactionValidator,
|
||||
};
|
||||
pub use gen::*;
|
||||
|
||||
mod mock;
|
||||
pub use mock::*;
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
|
||||
mod pool;
|
||||
|
||||
/// A [Pool] used for testing
|
||||
pub type TestPool =
|
||||
@ -21,6 +20,7 @@ pub type TestPool =
|
||||
pub fn testing_pool() -> TestPool {
|
||||
testing_pool_with_validator(MockTransactionValidator::default())
|
||||
}
|
||||
|
||||
/// Returns a new [Pool] used for testing purposes
|
||||
pub fn testing_pool_with_validator(
|
||||
validator: MockTransactionValidator<MockTransaction>,
|
||||
|
||||
@ -1,24 +1,22 @@
|
||||
//! Test helpers for mocking an entire pool.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{
|
||||
error::PoolResult,
|
||||
pool::{txpool::TxPool, AddedTransaction},
|
||||
test_utils::{
|
||||
MockOrdering, MockTransaction, MockTransactionDistribution, MockTransactionFactory,
|
||||
},
|
||||
test_utils::{MockOrdering, MockTransactionDistribution, MockTransactionFactory},
|
||||
TransactionOrdering,
|
||||
};
|
||||
use rand::Rng;
|
||||
use reth_primitives::{Address, U128, U256};
|
||||
use reth_primitives::{Address, U256};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ops::{Deref, DerefMut},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
/// A wrapped `TxPool` with additional helpers for testing
|
||||
pub struct MockPool<T: TransactionOrdering = MockOrdering> {
|
||||
pub(crate) struct MockPool<T: TransactionOrdering = MockOrdering> {
|
||||
// The wrapped pool.
|
||||
pool: TxPool<T>,
|
||||
}
|
||||
@ -60,7 +58,7 @@ impl<T: TransactionOrdering> DerefMut for MockPool<T> {
|
||||
}
|
||||
|
||||
/// Simulates transaction execution.
|
||||
pub struct MockTransactionSimulator<R: Rng> {
|
||||
pub(crate) struct MockTransactionSimulator<R: Rng> {
|
||||
/// The pending base fee
|
||||
base_fee: u128,
|
||||
/// Generator for transactions
|
||||
@ -83,7 +81,7 @@ pub struct MockTransactionSimulator<R: Rng> {
|
||||
|
||||
impl<R: Rng> MockTransactionSimulator<R> {
|
||||
/// Returns a new mock instance
|
||||
pub fn new(mut rng: R, config: MockSimulatorConfig) -> Self {
|
||||
pub(crate) fn new(mut rng: R, config: MockSimulatorConfig) -> Self {
|
||||
let senders = config.addresses(&mut rng);
|
||||
let nonces = senders.iter().copied().map(|a| (a, 0)).collect();
|
||||
let balances = senders.iter().copied().map(|a| (a, config.balance)).collect();
|
||||
@ -113,7 +111,7 @@ impl<R: Rng> MockTransactionSimulator<R> {
|
||||
}
|
||||
|
||||
/// Executes the next scenario and applies it to the pool
|
||||
pub fn next(&mut self, pool: &mut MockPool) {
|
||||
pub(crate) fn next(&mut self, pool: &mut MockPool) {
|
||||
let sender = self.rng_address();
|
||||
let scenario = self.rng_scenario();
|
||||
let on_chain_nonce = self.nonces[&sender];
|
||||
@ -152,30 +150,29 @@ impl<R: Rng> MockTransactionSimulator<R> {
|
||||
}
|
||||
|
||||
/// How to configure a new mock transaction stream
|
||||
pub struct MockSimulatorConfig {
|
||||
pub(crate) struct MockSimulatorConfig {
|
||||
/// How many senders to generate.
|
||||
pub num_senders: usize,
|
||||
pub(crate) num_senders: usize,
|
||||
// TODO(mattsse): add a way to generate different balances
|
||||
pub balance: U256,
|
||||
pub(crate) balance: U256,
|
||||
/// Scenarios to test
|
||||
pub scenarios: Vec<ScenarioType>,
|
||||
pub(crate) scenarios: Vec<ScenarioType>,
|
||||
/// The start base fee
|
||||
pub base_fee: u128,
|
||||
pub(crate) base_fee: u128,
|
||||
/// generator for transactions
|
||||
pub tx_generator: MockTransactionDistribution,
|
||||
pub(crate) tx_generator: MockTransactionDistribution,
|
||||
}
|
||||
|
||||
impl MockSimulatorConfig {
|
||||
/// Generates a set of random addresses
|
||||
pub fn addresses(&self, rng: &mut impl rand::Rng) -> Vec<Address> {
|
||||
let _ = rng.gen::<bool>(); // TODO(dani): ::random_with
|
||||
std::iter::repeat_with(Address::random).take(self.num_senders).collect()
|
||||
pub(crate) fn addresses(&self, rng: &mut impl rand::Rng) -> Vec<Address> {
|
||||
std::iter::repeat_with(|| Address::random_with(rng)).take(self.num_senders).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ScenarioType {
|
||||
pub(crate) enum ScenarioType {
|
||||
OnchainNonce,
|
||||
HigherNonce { skip: u64 },
|
||||
}
|
||||
@ -186,7 +183,7 @@ pub enum ScenarioType {
|
||||
///
|
||||
/// An executed scenario can affect previous executed transactions
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum Scenario {
|
||||
pub(crate) enum Scenario {
|
||||
/// Send a tx with the same nonce as on chain.
|
||||
OnchainNonce { nonce: u64 },
|
||||
/// Send a tx with a higher nonce that what the sender has on chain
|
||||
@ -199,7 +196,7 @@ pub enum Scenario {
|
||||
|
||||
/// Represents an executed scenario
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ExecutedScenario {
|
||||
pub(crate) struct ExecutedScenario {
|
||||
/// balance at the time of execution
|
||||
balance: U256,
|
||||
/// nonce at the time of execution
|
||||
@ -210,7 +207,7 @@ pub struct ExecutedScenario {
|
||||
|
||||
/// All executed scenarios by a sender
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ExecutedScenarios {
|
||||
pub(crate) struct ExecutedScenarios {
|
||||
sender: Address,
|
||||
scenarios: Vec<ExecutedScenario>,
|
||||
}
|
||||
|
||||
@ -461,7 +461,7 @@ pub struct EthRequestHandler<C> {
|
||||
/// The client type that can interact with the chain.
|
||||
client: Arc<C>,
|
||||
/// Used for reporting peers.
|
||||
#[allow(unused)]
|
||||
#[allow(dead_code)]
|
||||
peers: PeersHandle,
|
||||
/// Incoming request from the [NetworkManager](crate::NetworkManager).
|
||||
incoming_requests: UnboundedReceiverStream<IncomingEthRequest>,
|
||||
|
||||
Reference in New Issue
Block a user