mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 02:49:55 +00:00
chore(github): use codespell to inspect and correct spelling issues (#7775)
Signed-off-by: jsvisa <delweng@gmail.com> Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
3
.codespellrc
Normal file
3
.codespellrc
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[codespell]
|
||||||
|
skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock
|
||||||
|
ignore-words-list = crate,ser,ratatui
|
||||||
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
@ -110,6 +110,12 @@ jobs:
|
|||||||
components: rustfmt
|
components: rustfmt
|
||||||
- run: cargo fmt --all --check
|
- run: cargo fmt --all --check
|
||||||
|
|
||||||
|
codespell:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- uses: codespell-project/actions-codespell@v2
|
||||||
|
|
||||||
grafana:
|
grafana:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
@ -124,7 +130,7 @@ jobs:
|
|||||||
name: lint success
|
name: lint success
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: always()
|
if: always()
|
||||||
needs: [clippy-binaries, clippy, crate-checks, docs, fmt, grafana]
|
needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana]
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
- name: Decide whether the needed jobs succeeded or failed
|
- name: Decide whether the needed jobs succeeded or failed
|
||||||
|
|||||||
12
Makefile
12
Makefile
@ -303,11 +303,21 @@ lint-other-targets:
|
|||||||
--all-features \
|
--all-features \
|
||||||
-- -D warnings
|
-- -D warnings
|
||||||
|
|
||||||
|
lint-codespell: ensure-codespell
|
||||||
|
codespell
|
||||||
|
|
||||||
|
ensure-codespell:
|
||||||
|
@if ! command -v codespell &> /dev/null; then \
|
||||||
|
echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
make fmt && \
|
make fmt && \
|
||||||
make lint-reth && \
|
make lint-reth && \
|
||||||
make lint-op-reth && \
|
make lint-op-reth && \
|
||||||
make lint-other-targets
|
make lint-other-targets \
|
||||||
|
make lint-codespell
|
||||||
|
|
||||||
fix-lint-reth:
|
fix-lint-reth:
|
||||||
cargo +nightly clippy \
|
cargo +nightly clippy \
|
||||||
|
|||||||
@ -81,7 +81,7 @@ where
|
|||||||
{
|
{
|
||||||
let mut rows = vec![];
|
let mut rows = vec![];
|
||||||
let mut seen_keys = HashSet::new();
|
let mut seen_keys = HashSet::new();
|
||||||
let strat = proptest::collection::vec(
|
let strategy = proptest::collection::vec(
|
||||||
any_with::<TableRow<T>>((
|
any_with::<TableRow<T>>((
|
||||||
<T::Key as Arbitrary>::Parameters::default(),
|
<T::Key as Arbitrary>::Parameters::default(),
|
||||||
<T::Value as Arbitrary>::Parameters::default(),
|
<T::Value as Arbitrary>::Parameters::default(),
|
||||||
@ -94,7 +94,7 @@ where
|
|||||||
while rows.len() < per_table {
|
while rows.len() < per_table {
|
||||||
// Generate all `per_table` rows: (Key, Value)
|
// Generate all `per_table` rows: (Key, Value)
|
||||||
rows.extend(
|
rows.extend(
|
||||||
&mut strat
|
&mut strategy
|
||||||
.new_tree(runner)
|
.new_tree(runner)
|
||||||
.map_err(|e| eyre::eyre!("{e}"))?
|
.map_err(|e| eyre::eyre!("{e}"))?
|
||||||
.current()
|
.current()
|
||||||
|
|||||||
@ -8,11 +8,13 @@ use thiserror::Error;
|
|||||||
/// both execution payloads and forkchoice update attributes with respect to a method version.
|
/// both execution payloads and forkchoice update attributes with respect to a method version.
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum EngineObjectValidationError {
|
pub enum EngineObjectValidationError {
|
||||||
/// Thrown when the underlying validation error occured while validating an `ExecutionPayload`.
|
/// Thrown when the underlying validation error occurred while validating an
|
||||||
|
/// `ExecutionPayload`.
|
||||||
#[error("Payload validation error: {0}")]
|
#[error("Payload validation error: {0}")]
|
||||||
Payload(VersionSpecificValidationError),
|
Payload(VersionSpecificValidationError),
|
||||||
|
|
||||||
/// Thrown when the underlying validation error occured while validating a `PayloadAttributes`.
|
/// Thrown when the underlying validation error occurred while validating a
|
||||||
|
/// `PayloadAttributes`.
|
||||||
#[error("Payload attributes validation error: {0}")]
|
#[error("Payload attributes validation error: {0}")]
|
||||||
PayloadAttributes(VersionSpecificValidationError),
|
PayloadAttributes(VersionSpecificValidationError),
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
//! Ethereum specifc
|
//! Ethereum specific
|
||||||
|
|
||||||
#![doc(
|
#![doc(
|
||||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||||
|
|||||||
@ -81,8 +81,8 @@ pub enum ECIESErrorImpl {
|
|||||||
/// a message from the (partially filled) buffer.
|
/// a message from the (partially filled) buffer.
|
||||||
#[error("stream closed due to not being readable")]
|
#[error("stream closed due to not being readable")]
|
||||||
UnreadableStream,
|
UnreadableStream,
|
||||||
// Error when data is not recieved from peer for a prolonged period.
|
// Error when data is not received from peer for a prolonged period.
|
||||||
#[error("never recieved data from remote peer")]
|
#[error("never received data from remote peer")]
|
||||||
StreamTimeout,
|
StreamTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -21,7 +21,7 @@ pub const SIGNATURE_DECODED_SIZE_BYTES: usize = mem::size_of::<Signature>();
|
|||||||
pub trait ValidateTx68 {
|
pub trait ValidateTx68 {
|
||||||
/// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68)
|
/// Validates a [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68)
|
||||||
/// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the
|
/// entry. Returns [`ValidationOutcome`] which signals to the caller whether to fetch the
|
||||||
/// transaction or wether to drop it, and whether the sender of the announcement should be
|
/// transaction or to drop it, and whether the sender of the announcement should be
|
||||||
/// penalized.
|
/// penalized.
|
||||||
fn should_fetch(
|
fn should_fetch(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@ -164,7 +164,7 @@ impl Default for ComponentsBuilder<(), (), (), ()> {
|
|||||||
|
|
||||||
/// A type that configures all the customizable components of the node and knows how to build them.
|
/// A type that configures all the customizable components of the node and knows how to build them.
|
||||||
///
|
///
|
||||||
/// Implementors of this trait are responsible for building all the components of the node: See
|
/// Implementers of this trait are responsible for building all the components of the node: See
|
||||||
/// [NodeComponents].
|
/// [NodeComponents].
|
||||||
///
|
///
|
||||||
/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize
|
/// The [ComponentsBuilder] is a generic implementation of this trait that can be used to customize
|
||||||
|
|||||||
@ -363,7 +363,7 @@ impl RpcServerArgs {
|
|||||||
|
|
||||||
impl RethRpcConfig for RpcServerArgs {
|
impl RethRpcConfig for RpcServerArgs {
|
||||||
fn is_ipc_enabled(&self) -> bool {
|
fn is_ipc_enabled(&self) -> bool {
|
||||||
// By default IPC is enabled therefor it is enabled if the `ipcdisable` is false.
|
// By default IPC is enabled therefore it is enabled if the `ipcdisable` is false.
|
||||||
!self.ipcdisable
|
!self.ipcdisable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -131,7 +131,7 @@ pub trait EngineApi<Engine: EngineTypes> {
|
|||||||
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
||||||
/// adversarial.
|
/// adversarial.
|
||||||
///
|
///
|
||||||
/// Implementors should take care when acting on the input to this method, specifically
|
/// Implementers should take care when acting on the input to this method, specifically
|
||||||
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
||||||
/// correctly and without panics.
|
/// correctly and without panics.
|
||||||
#[method(name = "getPayloadBodiesByRangeV1")]
|
#[method(name = "getPayloadBodiesByRangeV1")]
|
||||||
|
|||||||
@ -34,7 +34,7 @@ pub trait GanacheApi {
|
|||||||
/// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots
|
/// is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots
|
||||||
/// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.).
|
/// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.).
|
||||||
///
|
///
|
||||||
/// Reutnrs `true` if a snapshot was reverted, otherwise `false`.
|
/// Returns `true` if a snapshot was reverted, otherwise `false`.
|
||||||
#[method(name = "revert")]
|
#[method(name = "revert")]
|
||||||
async fn evm_revert(&self, snapshot_id: U256) -> RpcResult<bool>;
|
async fn evm_revert(&self, snapshot_id: U256) -> RpcResult<bool>;
|
||||||
|
|
||||||
|
|||||||
@ -717,7 +717,7 @@ impl RpcModuleSelection {
|
|||||||
|
|
||||||
/// Creates a new [RpcModule] based on the configured reth modules.
|
/// Creates a new [RpcModule] based on the configured reth modules.
|
||||||
///
|
///
|
||||||
/// Note: This will always create new instance of the module handlers and is therefor only
|
/// Note: This will always create new instance of the module handlers and is therefore only
|
||||||
/// recommended for launching standalone transports. If multiple transports need to be
|
/// recommended for launching standalone transports. If multiple transports need to be
|
||||||
/// configured it's recommended to use the [RpcModuleBuilder].
|
/// configured it's recommended to use the [RpcModuleBuilder].
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
|||||||
@ -292,7 +292,7 @@ where
|
|||||||
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
||||||
/// adversarial.
|
/// adversarial.
|
||||||
///
|
///
|
||||||
/// Implementors should take care when acting on the input to this method, specifically
|
/// Implementers should take care when acting on the input to this method, specifically
|
||||||
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
||||||
/// correctly and without panics.
|
/// correctly and without panics.
|
||||||
pub async fn get_payload_bodies_by_range(
|
pub async fn get_payload_bodies_by_range(
|
||||||
@ -661,7 +661,7 @@ where
|
|||||||
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
|
||||||
/// adversarial.
|
/// adversarial.
|
||||||
///
|
///
|
||||||
/// Implementors should take care when acting on the input to this method, specifically
|
/// Implementers should take care when acting on the input to this method, specifically
|
||||||
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
/// ensuring that the range is limited properly, and that the range boundaries are computed
|
||||||
/// correctly and without panics.
|
/// correctly and without panics.
|
||||||
///
|
///
|
||||||
|
|||||||
@ -53,7 +53,7 @@ impl DevSigner {
|
|||||||
/// Generates a random dev signer which satisfies [EthSigner] trait
|
/// Generates a random dev signer which satisfies [EthSigner] trait
|
||||||
pub(crate) fn random() -> Box<dyn EthSigner> {
|
pub(crate) fn random() -> Box<dyn EthSigner> {
|
||||||
let mut signers = Self::random_signers(1);
|
let mut signers = Self::random_signers(1);
|
||||||
signers.pop().expect("expect to generate at leas one signer")
|
signers.pop().expect("expect to generate at least one signer")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates provided number of random dev signers
|
/// Generates provided number of random dev signers
|
||||||
|
|||||||
@ -12,7 +12,7 @@
|
|||||||
//!
|
//!
|
||||||
//! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See
|
//! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See
|
||||||
//! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the
|
//! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the
|
||||||
//! available jsonrpsee `blocking` attribute to give implementors more freedom because the
|
//! available jsonrpsee `blocking` attribute to give implementers more freedom because the
|
||||||
//! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a
|
//! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a
|
||||||
//! lot of handlers make use of async functions, caching for example, but are also using blocking
|
//! lot of handlers make use of async functions, caching for example, but are also using blocking
|
||||||
//! disk-io, hence these calls are spawned as futures to a blocking task manually.
|
//! disk-io, hence these calls are spawned as futures to a blocking task manually.
|
||||||
|
|||||||
@ -136,7 +136,7 @@ where
|
|||||||
T::Key: std::hash::Hash + Arbitrary,
|
T::Key: std::hash::Hash + Arbitrary,
|
||||||
T::Value: Arbitrary,
|
T::Value: Arbitrary,
|
||||||
{
|
{
|
||||||
let strat = proptest::collection::vec(
|
let strategy = proptest::collection::vec(
|
||||||
any_with::<TableRow<T>>((
|
any_with::<TableRow<T>>((
|
||||||
<T::Key as Arbitrary>::Parameters::default(),
|
<T::Key as Arbitrary>::Parameters::default(),
|
||||||
<T::Value as Arbitrary>::Parameters::default(),
|
<T::Value as Arbitrary>::Parameters::default(),
|
||||||
@ -147,8 +147,8 @@ where
|
|||||||
.boxed();
|
.boxed();
|
||||||
|
|
||||||
let mut runner = TestRunner::new(ProptestConfig::default());
|
let mut runner = TestRunner::new(ProptestConfig::default());
|
||||||
let mut preload = strat.new_tree(&mut runner).unwrap().current();
|
let mut preload = strategy.new_tree(&mut runner).unwrap().current();
|
||||||
let mut input = strat.new_tree(&mut runner).unwrap().current();
|
let mut input = strategy.new_tree(&mut runner).unwrap().current();
|
||||||
|
|
||||||
let mut unique_keys = HashSet::new();
|
let mut unique_keys = HashSet::new();
|
||||||
preload.retain(|(k, _)| unique_keys.insert(k.clone()));
|
preload.retain(|(k, _)| unique_keys.insert(k.clone()));
|
||||||
|
|||||||
@ -23,7 +23,7 @@ mod sealed {
|
|||||||
use crate::{database::Database, mock::DatabaseMock, DatabaseEnv};
|
use crate::{database::Database, mock::DatabaseMock, DatabaseEnv};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Sealed trait to limit the implementors of the Database trait.
|
/// Sealed trait to limit the implementers of the Database trait.
|
||||||
pub trait Sealed: Sized {}
|
pub trait Sealed: Sized {}
|
||||||
|
|
||||||
impl<DB: Database> Sealed for &DB {}
|
impl<DB: Database> Sealed for &DB {}
|
||||||
|
|||||||
@ -501,7 +501,7 @@ impl<R> Default for Geometry<R> {
|
|||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `process_id` – A proceess id of the reader process.
|
/// * `process_id` – A process id of the reader process.
|
||||||
/// * `thread_id` – A thread id of the reader thread.
|
/// * `thread_id` – A thread id of the reader thread.
|
||||||
/// * `read_txn_id` – An oldest read transaction number on which stalled.
|
/// * `read_txn_id` – An oldest read transaction number on which stalled.
|
||||||
/// * `gap` – A lag from the last committed txn.
|
/// * `gap` – A lag from the last committed txn.
|
||||||
|
|||||||
@ -1071,7 +1071,7 @@ mod tests {
|
|||||||
let num_rows = 2;
|
let num_rows = 2;
|
||||||
|
|
||||||
// (missing_offsets, expected number of rows)
|
// (missing_offsets, expected number of rows)
|
||||||
// If a row wasnt fully pruned, then it should clear it up as well
|
// If a row wasn't fully pruned, then it should clear it up as well
|
||||||
let missing_offsets_scenarios = [(1, 1), (2, 1), (3, 0)];
|
let missing_offsets_scenarios = [(1, 1), (2, 1), (3, 0)];
|
||||||
|
|
||||||
for (missing_offsets, expected_rows) in missing_offsets_scenarios {
|
for (missing_offsets, expected_rows) in missing_offsets_scenarios {
|
||||||
|
|||||||
@ -127,7 +127,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> {
|
|||||||
tracing::warn!(
|
tracing::warn!(
|
||||||
target: "provider::historical_sp",
|
target: "provider::historical_sp",
|
||||||
target = self.block_number,
|
target = self.block_number,
|
||||||
"Attempt to calculate state root for an old block might result in OOM, tread carefully"
|
"Attempt to calculate state root for an old block might result in OOM, treat carefully"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -399,7 +399,7 @@ impl<T: TransactionOrdering> PendingPool<T> {
|
|||||||
unique_senders = self.highest_nonces.len();
|
unique_senders = self.highest_nonces.len();
|
||||||
non_local_senders -= unique_removed;
|
non_local_senders -= unique_removed;
|
||||||
|
|
||||||
// we can re-use the temp array
|
// we can reuse the temp array
|
||||||
removed.clear();
|
removed.clear();
|
||||||
|
|
||||||
// loop through the highest nonces set, removing transactions until we reach the limit
|
// loop through the highest nonces set, removing transactions until we reach the limit
|
||||||
|
|||||||
@ -1766,8 +1766,8 @@ pub(crate) struct PoolInternalTransaction<T: PoolTransaction> {
|
|||||||
pub(crate) transaction: Arc<ValidPoolTransaction<T>>,
|
pub(crate) transaction: Arc<ValidPoolTransaction<T>>,
|
||||||
/// The `SubPool` that currently contains this transaction.
|
/// The `SubPool` that currently contains this transaction.
|
||||||
pub(crate) subpool: SubPool,
|
pub(crate) subpool: SubPool,
|
||||||
/// Keeps track of the current state of the transaction and therefor in which subpool it should
|
/// Keeps track of the current state of the transaction and therefore in which subpool it
|
||||||
/// reside
|
/// should reside
|
||||||
pub(crate) state: TxState,
|
pub(crate) state: TxState,
|
||||||
/// The total cost all transactions before this transaction.
|
/// The total cost all transactions before this transaction.
|
||||||
///
|
///
|
||||||
|
|||||||
Reference in New Issue
Block a user