From 7c17c6e46908d14b8b881c033cf68066646e4aec Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Mon, 3 Jun 2024 15:21:45 +0200 Subject: [PATCH] add `doc_markdown` clippy lint (#8552) Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz --- Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 10 +- bin/reth/src/commands/db/mod.rs | 2 +- bin/reth/src/commands/db/tui.rs | 24 +- bin/reth/src/commands/node/mod.rs | 10 +- bin/reth/src/commands/stage/dump/execution.rs | 4 +- bin/reth/src/commands/stage/dump/mod.rs | 4 +- bin/reth/src/utils.rs | 4 +- book/cli/reth.md | 2 +- book/cli/reth/config.md | 2 +- book/cli/reth/db.md | 2 +- book/cli/reth/db/checksum.md | 2 +- book/cli/reth/db/clear.md | 2 +- book/cli/reth/db/clear/mdbx.md | 2 +- book/cli/reth/db/clear/static-file.md | 2 +- book/cli/reth/db/clear/static_file.md | 40 ++-- book/cli/reth/db/diff.md | 2 +- book/cli/reth/db/drop.md | 2 +- book/cli/reth/db/get.md | 2 +- book/cli/reth/db/get/mdbx.md | 2 +- book/cli/reth/db/get/static-file.md | 2 +- book/cli/reth/db/get/static_file.md | 40 ++-- book/cli/reth/db/list.md | 2 +- book/cli/reth/db/path.md | 2 +- book/cli/reth/db/static_file.md | 8 +- book/cli/reth/db/stats.md | 2 +- book/cli/reth/db/version.md | 2 +- book/cli/reth/debug.md | 2 +- book/cli/reth/dump-genesis.md | 2 +- book/cli/reth/import.md | 2 +- book/cli/reth/init-state.md | 2 +- book/cli/reth/init.md | 2 +- book/cli/reth/node.md | 14 +- book/cli/reth/p2p.md | 10 +- book/cli/reth/p2p/body.md | 2 +- book/cli/reth/p2p/header.md | 2 +- book/cli/reth/recover.md | 2 +- book/cli/reth/recover/storage-tries.md | 2 +- book/cli/reth/stage.md | 2 +- book/cli/reth/stage/drop.md | 2 +- book/cli/reth/stage/dump.md | 6 +- book/cli/reth/stage/dump/account-hashing.md | 4 +- book/cli/reth/stage/dump/execution.md | 2 +- book/cli/reth/stage/dump/merkle.md | 2 +- book/cli/reth/stage/dump/storage-hashing.md | 4 +- book/cli/reth/stage/run.md | 10 +- book/cli/reth/stage/unwind.md | 10 +- book/cli/reth/stage/unwind/num-blocks.md | 2 +- book/cli/reth/stage/unwind/to-block.md | 2 +- book/cli/reth/test-vectors.md | 2 +- book/cli/reth/test-vectors/tables.md | 2 +- crates/blockchain-tree-api/src/error.rs | 16 +- crates/blockchain-tree-api/src/lib.rs | 24 +- crates/blockchain-tree/src/block_buffer.rs | 8 +- crates/blockchain-tree/src/block_indices.rs | 8 +- crates/blockchain-tree/src/blockchain_tree.rs | 30 +-- crates/blockchain-tree/src/bundle.rs | 2 +- crates/blockchain-tree/src/chain.rs | 12 +- crates/blockchain-tree/src/lib.rs | 6 +- crates/blockchain-tree/src/noop.rs | 4 +- crates/blockchain-tree/src/shareable.rs | 6 +- crates/blockchain-tree/src/state.rs | 2 +- crates/cli/runner/src/lib.rs | 10 +- crates/config/src/config.rs | 4 +- crates/consensus/auto-seal/src/lib.rs | 6 +- crates/consensus/beacon/src/engine/error.rs | 4 +- crates/consensus/beacon/src/engine/event.rs | 2 +- .../consensus/beacon/src/engine/forkchoice.rs | 20 +- .../beacon/src/engine/hooks/controller.rs | 4 +- .../consensus/beacon/src/engine/hooks/mod.rs | 2 +- .../beacon/src/engine/hooks/prune.rs | 12 +- .../beacon/src/engine/hooks/static_file.rs | 38 ++-- crates/consensus/beacon/src/engine/message.rs | 4 +- crates/consensus/beacon/src/engine/mod.rs | 47 ++-- crates/consensus/beacon/src/engine/sync.rs | 14 +- crates/consensus/consensus/src/lib.rs | 4 +- crates/engine-primitives/src/error.rs | 8 +- crates/engine-primitives/src/lib.rs | 16 +- crates/engine-primitives/src/payload.rs | 8 +- crates/engine-primitives/src/traits.rs | 18 +- crates/ethereum-forks/src/forkid.rs | 10 +- crates/ethereum/consensus/src/lib.rs | 2 +- .../ethereum/engine-primitives/src/payload.rs | 6 +- crates/ethereum/evm/src/eip6110.rs | 2 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/ethereum/node/src/node.rs | 2 +- crates/evm/execution-errors/src/lib.rs | 4 +- crates/evm/execution-types/src/bundle.rs | 12 +- crates/evm/execution-types/src/chain.rs | 12 +- crates/evm/src/execute.rs | 10 +- crates/evm/src/lib.rs | 14 +- crates/evm/src/noop.rs | 2 +- crates/evm/src/test_utils.rs | 2 +- crates/exex/src/context.rs | 2 +- crates/exex/src/event.rs | 8 +- crates/exex/src/lib.rs | 18 +- crates/exex/src/manager.rs | 60 ++--- crates/exex/src/notification.rs | 8 +- crates/fs-util/src/lib.rs | 6 +- crates/metrics/src/common/mpsc.rs | 28 +-- crates/net/common/src/ratelimit.rs | 12 +- crates/net/common/src/stream.rs | 2 +- crates/net/discv4/README.md | 4 +- crates/net/discv4/src/config.rs | 6 +- crates/net/discv4/src/lib.rs | 59 ++--- crates/net/discv4/src/proto.rs | 12 +- crates/net/discv5/src/config.rs | 20 +- crates/net/discv5/src/lib.rs | 6 +- crates/net/dns/src/config.rs | 2 +- crates/net/dns/src/error.rs | 2 +- crates/net/dns/src/lib.rs | 12 +- crates/net/dns/src/query.rs | 4 +- crates/net/dns/src/resolver.rs | 6 +- crates/net/dns/src/sync.rs | 2 +- crates/net/downloaders/src/bodies/bodies.rs | 10 +- crates/net/downloaders/src/bodies/noop.rs | 2 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 8 +- crates/net/downloaders/src/bodies/task.rs | 10 +- crates/net/downloaders/src/headers/noop.rs | 2 +- .../src/headers/reverse_headers.rs | 36 +-- crates/net/downloaders/src/headers/task.rs | 12 +- .../net/downloaders/src/headers/test_utils.rs | 2 +- crates/net/downloaders/src/lib.rs | 4 +- .../src/test_utils/bodies_client.rs | 2 +- crates/net/ecies/src/algorithm.rs | 12 +- crates/net/ecies/src/error.rs | 4 +- crates/net/ecies/src/lib.rs | 2 +- crates/net/ecies/src/mac.rs | 4 +- crates/net/ecies/src/stream.rs | 2 +- crates/net/ecies/src/util.rs | 2 +- crates/net/eth-wire-types/src/message.rs | 34 +-- crates/net/eth-wire-types/src/status.rs | 2 +- crates/net/eth-wire/src/capability.rs | 10 +- crates/net/eth-wire/src/errors/eth.rs | 4 +- crates/net/eth-wire/src/hello.rs | 14 +- crates/net/eth-wire/src/multiplex.rs | 40 ++-- crates/net/eth-wire/src/p2pstream.rs | 6 +- crates/net/eth-wire/src/protocol.rs | 8 +- crates/net/eth-wire/tests/fuzz_roundtrip.rs | 4 +- crates/net/nat/src/lib.rs | 24 +- crates/net/network-api/src/lib.rs | 16 +- crates/net/network-api/src/reputation.rs | 4 +- crates/net/network/src/builder.rs | 2 +- crates/net/network/src/config.rs | 41 ++-- crates/net/network/src/discovery.rs | 4 +- crates/net/network/src/error.rs | 2 +- crates/net/network/src/eth_requests.rs | 4 +- crates/net/network/src/fetch/mod.rs | 6 +- crates/net/network/src/manager.rs | 8 +- crates/net/network/src/message.rs | 14 +- crates/net/network/src/metrics.rs | 44 ++-- crates/net/network/src/network.rs | 4 +- crates/net/network/src/peers/manager.rs | 22 +- crates/net/network/src/protocol.rs | 14 +- crates/net/network/src/session/active.rs | 2 +- crates/net/network/src/session/config.rs | 4 +- crates/net/network/src/session/conn.rs | 4 +- crates/net/network/src/session/handle.rs | 4 +- crates/net/network/src/session/mod.rs | 22 +- crates/net/network/src/state.rs | 11 +- crates/net/network/src/swarm.rs | 8 +- crates/net/network/src/test_utils/init.rs | 10 +- crates/net/network/src/test_utils/testnet.rs | 4 +- crates/net/network/src/transactions/mod.rs | 22 +- crates/net/p2p/src/bodies/downloader.rs | 2 +- crates/net/p2p/src/bodies/response.rs | 2 +- crates/net/p2p/src/error.rs | 12 +- crates/net/p2p/src/full_block.rs | 16 +- crates/net/p2p/src/headers/downloader.rs | 13 +- crates/net/p2p/src/headers/mod.rs | 2 +- crates/net/p2p/src/lib.rs | 2 +- crates/net/p2p/src/priority.rs | 6 +- crates/net/p2p/src/sync.rs | 2 +- crates/net/types/src/lib.rs | 12 +- crates/net/types/src/node_record.rs | 8 +- crates/node-core/src/args/dev.rs | 2 +- crates/node-core/src/args/gas_price_oracle.rs | 2 +- crates/node-core/src/args/log.rs | 2 +- crates/node-core/src/args/mod.rs | 4 +- crates/node-core/src/args/network.rs | 11 +- crates/node-core/src/args/rpc_server.rs | 4 +- crates/node-core/src/args/utils.rs | 10 +- crates/node-core/src/cli/config.rs | 20 +- crates/node-core/src/dirs.rs | 20 +- crates/node-core/src/engine/engine_store.rs | 8 +- crates/node-core/src/engine/mod.rs | 8 +- crates/node-core/src/engine/skip_fcu.rs | 2 +- .../node-core/src/engine/skip_new_payload.rs | 2 +- crates/node-core/src/node_config.rs | 26 +-- crates/node-core/src/utils.rs | 4 +- crates/node/api/src/node.rs | 2 +- crates/node/builder/src/builder/mod.rs | 83 +++---- crates/node/builder/src/builder/states.rs | 8 +- crates/node/builder/src/components/builder.rs | 16 +- crates/node/builder/src/components/payload.rs | 2 +- crates/node/builder/src/exex.rs | 16 +- crates/node/builder/src/hooks.rs | 2 +- crates/node/builder/src/launch/common.rs | 34 +-- crates/node/builder/src/launch/mod.rs | 2 +- crates/node/builder/src/node.rs | 16 +- crates/node/builder/src/rpc.rs | 15 +- crates/node/builder/src/setup.rs | 2 +- crates/node/events/src/cl.rs | 4 +- crates/node/events/src/node.rs | 8 +- crates/optimism/consensus/src/lib.rs | 4 +- crates/optimism/evm/src/l1.rs | 16 +- crates/optimism/node/src/lib.rs | 2 +- crates/optimism/node/src/node.rs | 2 +- crates/optimism/node/src/rpc.rs | 8 +- crates/optimism/node/src/txpool.rs | 10 +- crates/optimism/payload/src/builder.rs | 4 +- crates/optimism/payload/src/error.rs | 2 +- crates/optimism/payload/src/payload.rs | 6 +- crates/payload/basic/src/lib.rs | 31 +-- crates/payload/builder/src/database.rs | 10 +- crates/payload/builder/src/noop.rs | 2 +- crates/payload/builder/src/service.rs | 29 +-- crates/payload/builder/src/test_utils.rs | 8 +- crates/payload/ethereum/src/lib.rs | 2 +- crates/payload/validator/src/lib.rs | 8 +- crates/primitives/benches/integer_list.rs | 4 +- crates/primitives/src/account.rs | 6 +- crates/primitives/src/block.rs | 16 +- crates/primitives/src/chain/spec.rs | 50 ++-- crates/primitives/src/constants/eip4844.rs | 10 +- crates/primitives/src/constants/mod.rs | 2 +- crates/primitives/src/exex/mod.rs | 12 +- crates/primitives/src/header.rs | 6 +- crates/primitives/src/integer_list.rs | 2 +- crates/primitives/src/proofs.rs | 10 +- crates/primitives/src/prune/mod.rs | 10 +- crates/primitives/src/prune/segment.rs | 4 +- crates/primitives/src/prune/target.rs | 10 +- crates/primitives/src/receipt.rs | 14 +- crates/primitives/src/revm/config.rs | 2 +- crates/primitives/src/revm/env.rs | 10 +- crates/primitives/src/revm/mod.rs | 2 +- crates/primitives/src/stage/checkpoints.rs | 2 +- crates/primitives/src/stage/id.rs | 6 +- crates/primitives/src/storage.rs | 2 +- crates/primitives/src/transaction/eip1559.rs | 4 +- crates/primitives/src/transaction/eip2930.rs | 4 +- crates/primitives/src/transaction/eip4844.rs | 6 +- crates/primitives/src/transaction/legacy.rs | 4 +- crates/primitives/src/transaction/mod.rs | 74 +++--- crates/primitives/src/transaction/optimism.rs | 4 +- crates/primitives/src/transaction/pooled.rs | 40 ++-- crates/primitives/src/transaction/sidecar.rs | 16 +- .../primitives/src/transaction/signature.rs | 8 +- crates/primitives/src/transaction/tx_type.rs | 10 +- crates/primitives/src/transaction/variant.rs | 26 +-- crates/primitives/src/trie/subnode.rs | 2 +- crates/prune/src/builder.rs | 6 +- crates/prune/src/metrics.rs | 4 +- crates/prune/src/pruner.rs | 28 +-- crates/prune/src/segments/account_history.rs | 4 +- crates/prune/src/segments/mod.rs | 22 +- crates/prune/src/segments/set.rs | 6 +- crates/prune/src/segments/storage_history.rs | 4 +- crates/revm/src/batch.rs | 2 +- crates/revm/src/database.rs | 6 +- crates/revm/src/state_change.rs | 2 +- crates/rpc/ipc/src/server/mod.rs | 6 +- crates/rpc/ipc/src/server/rpc_service.rs | 2 +- crates/rpc/rpc-api/src/optimism.rs | 6 +- crates/rpc/rpc-builder/src/auth.rs | 10 +- crates/rpc/rpc-builder/src/constants.rs | 2 +- crates/rpc/rpc-builder/src/cors.rs | 2 +- crates/rpc/rpc-builder/src/error.rs | 2 +- crates/rpc/rpc-builder/src/eth.rs | 2 +- crates/rpc/rpc-builder/src/lib.rs | 214 +++++++++--------- crates/rpc/rpc-builder/src/metrics.rs | 2 +- crates/rpc/rpc-builder/tests/it/utils.rs | 2 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 10 +- crates/rpc/rpc-engine-api/src/error.rs | 4 +- crates/rpc/rpc-engine-api/src/metrics.rs | 4 +- crates/rpc/rpc-layer/src/auth_client_layer.rs | 4 +- crates/rpc/rpc-layer/src/auth_layer.rs | 2 +- crates/rpc/rpc-testing-util/src/debug.rs | 6 +- crates/rpc/rpc-testing-util/src/trace.rs | 4 +- crates/rpc/rpc-types-compat/src/block.rs | 8 +- .../rpc-types-compat/src/engine/payload.rs | 40 ++-- .../rpc-types-compat/src/transaction/mod.rs | 2 +- crates/rpc/rpc-types/src/mev.rs | 24 +- crates/rpc/rpc-types/src/rpc.rs | 2 +- crates/rpc/rpc/src/debug.rs | 8 +- crates/rpc/rpc/src/eth/api/call.rs | 6 +- crates/rpc/rpc/src/eth/api/fee_history.rs | 23 +- crates/rpc/rpc/src/eth/api/fees.rs | 2 +- crates/rpc/rpc/src/eth/api/mod.rs | 14 +- crates/rpc/rpc/src/eth/api/optimism.rs | 4 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 27 +-- crates/rpc/rpc/src/eth/api/state.rs | 2 +- crates/rpc/rpc/src/eth/api/transactions.rs | 12 +- crates/rpc/rpc/src/eth/bundle.rs | 4 +- crates/rpc/rpc/src/eth/cache/config.rs | 2 +- crates/rpc/rpc/src/eth/cache/mod.rs | 20 +- .../rpc/rpc/src/eth/cache/multi_consumer.rs | 2 +- crates/rpc/rpc/src/eth/error.rs | 8 +- crates/rpc/rpc/src/eth/filter.rs | 4 +- crates/rpc/rpc/src/eth/gas_oracle.rs | 8 +- crates/rpc/rpc/src/eth/id_provider.rs | 2 +- crates/rpc/rpc/src/eth/pubsub.rs | 4 +- crates/rpc/rpc/src/eth/revm_utils.rs | 26 +-- crates/rpc/rpc/src/eth/signer.rs | 4 +- crates/rpc/rpc/src/eth/utils.rs | 4 +- crates/rpc/rpc/src/lib.rs | 2 +- crates/rpc/rpc/src/result.rs | 2 +- crates/rpc/rpc/src/reth.rs | 2 +- crates/rpc/rpc/src/rpc.rs | 2 +- crates/rpc/rpc/src/trace.rs | 6 +- crates/stages/api/src/metrics/listener.rs | 2 +- crates/stages/api/src/metrics/sync_metrics.rs | 3 +- crates/stages/api/src/pipeline/ctrl.rs | 2 +- crates/stages/api/src/pipeline/mod.rs | 30 +-- crates/stages/api/src/pipeline/set.rs | 4 +- crates/stages/api/src/stage.rs | 6 +- crates/stages/api/src/test_utils.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 44 ++-- .../stages/src/stages/hashing_account.rs | 8 +- .../stages/src/stages/hashing_storage.rs | 2 +- crates/stages/stages/src/stages/headers.rs | 4 +- .../src/stages/index_account_history.rs | 2 +- .../src/stages/index_storage_history.rs | 2 +- crates/stages/stages/src/stages/merkle.rs | 6 +- .../stages/src/stages/sender_recovery.rs | 8 +- crates/stages/stages/src/stages/tx_lookup.rs | 8 +- crates/stages/stages/src/test_utils/runner.rs | 8 +- .../stages/stages/src/test_utils/test_db.rs | 24 +- crates/static-file-types/src/filters.rs | 2 +- crates/static-file-types/src/segment.rs | 8 +- crates/static-file/src/event.rs | 2 +- crates/static-file/src/segments/headers.rs | 2 +- crates/static-file/src/segments/mod.rs | 6 +- crates/static-file/src/segments/receipts.rs | 2 +- .../static-file/src/segments/transactions.rs | 6 +- .../static-file/src/static_file_producer.rs | 29 +-- .../codecs/derive/src/compact/enums.rs | 8 +- .../codecs/derive/src/compact/flags.rs | 4 +- .../codecs/derive/src/compact/generator.rs | 2 +- .../storage/codecs/derive/src/compact/mod.rs | 4 +- .../codecs/derive/src/compact/structs.rs | 2 +- crates/storage/codecs/derive/src/lib.rs | 4 +- crates/storage/codecs/src/alloy/txkind.rs | 2 +- crates/storage/db/benches/hash_keys.rs | 2 +- crates/storage/db/benches/utils.rs | 2 +- crates/storage/db/src/abstraction/cursor.rs | 6 +- .../db/src/abstraction/database_metrics.rs | 6 +- crates/storage/db/src/abstraction/mock.rs | 2 +- crates/storage/db/src/abstraction/table.rs | 2 +- .../storage/db/src/abstraction/transaction.rs | 6 +- .../storage/db/src/implementation/mdbx/mod.rs | 2 +- .../storage/db/src/implementation/mdbx/tx.rs | 14 +- crates/storage/db/src/lib.rs | 2 +- crates/storage/db/src/lockfile.rs | 2 +- crates/storage/db/src/mdbx.rs | 2 +- crates/storage/db/src/metrics.rs | 12 +- crates/storage/db/src/static_file/mask.rs | 6 +- crates/storage/db/src/static_file/mod.rs | 2 +- .../storage/db/src/tables/codecs/compact.rs | 2 +- crates/storage/db/src/tables/raw.rs | 2 +- crates/storage/db/src/version.rs | 17 +- crates/storage/errors/src/lockfile.rs | 2 +- crates/storage/libmdbx-rs/src/codec.rs | 2 +- crates/storage/libmdbx-rs/src/cursor.rs | 66 +++--- crates/storage/libmdbx-rs/src/environment.rs | 33 +-- crates/storage/libmdbx-rs/src/error.rs | 6 +- crates/storage/libmdbx-rs/src/flags.rs | 68 +++--- crates/storage/libmdbx-rs/src/lib.rs | 2 +- crates/storage/libmdbx-rs/src/transaction.rs | 16 +- crates/storage/libmdbx-rs/src/txn_manager.rs | 18 +- .../storage/nippy-jar/src/compression/lz4.rs | 2 +- .../storage/nippy-jar/src/compression/zstd.rs | 14 +- crates/storage/nippy-jar/src/cursor.rs | 12 +- crates/storage/nippy-jar/src/filter/cuckoo.rs | 4 +- crates/storage/nippy-jar/src/lib.rs | 12 +- crates/storage/nippy-jar/src/phf/fmph.rs | 4 +- crates/storage/nippy-jar/src/phf/go_fmph.rs | 4 +- crates/storage/nippy-jar/src/writer.rs | 20 +- .../src/bundle_state/state_reverts.rs | 4 +- .../provider/src/providers/chain_info.rs | 2 +- .../provider/src/providers/consistent_view.rs | 4 +- .../src/providers/database/provider.rs | 6 +- crates/storage/provider/src/providers/mod.rs | 10 +- .../src/providers/state/historical.rs | 30 +-- .../provider/src/providers/state/macros.rs | 8 +- .../provider/src/providers/state/mod.rs | 2 +- .../src/providers/static_file/manager.rs | 20 +- .../src/providers/static_file/writer.rs | 12 +- .../storage/provider/src/test_utils/events.rs | 2 +- crates/storage/provider/src/traits/chain.rs | 10 +- .../storage/provider/src/traits/chain_info.rs | 4 +- crates/storage/provider/src/traits/state.rs | 2 +- .../provider/src/traits/tree_viewer.rs | 2 +- crates/storage/storage-api/src/block.rs | 8 +- crates/storage/storage-api/src/state.rs | 2 +- crates/tasks/src/lib.rs | 40 ++-- crates/tasks/src/pool.rs | 6 +- crates/tasks/src/shutdown.rs | 6 +- crates/tokio-util/src/event_stream.rs | 2 +- crates/tracing/src/layers.rs | 2 +- crates/transaction-pool/benches/reorder.rs | 6 +- crates/transaction-pool/benches/truncate.rs | 2 +- crates/transaction-pool/src/blobstore/disk.rs | 6 +- crates/transaction-pool/src/blobstore/mod.rs | 4 +- crates/transaction-pool/src/config.rs | 10 +- crates/transaction-pool/src/error.rs | 12 +- crates/transaction-pool/src/lib.rs | 6 +- crates/transaction-pool/src/maintain.rs | 4 +- crates/transaction-pool/src/pool/best.rs | 4 +- crates/transaction-pool/src/pool/blob.rs | 10 +- crates/transaction-pool/src/pool/listener.rs | 8 +- crates/transaction-pool/src/pool/mod.rs | 22 +- crates/transaction-pool/src/pool/parked.rs | 10 +- crates/transaction-pool/src/pool/pending.rs | 10 +- crates/transaction-pool/src/pool/txpool.rs | 8 +- .../transaction-pool/src/test_utils/mock.rs | 64 +++--- crates/transaction-pool/src/test_utils/mod.rs | 10 +- crates/transaction-pool/src/traits.rs | 38 ++-- .../src/validate/constants.rs | 2 +- crates/transaction-pool/src/validate/eth.rs | 24 +- crates/transaction-pool/src/validate/mod.rs | 14 +- crates/transaction-pool/src/validate/task.rs | 14 +- crates/trie/parallel/src/async_root.rs | 4 +- crates/trie/parallel/src/parallel_root.rs | 4 +- crates/trie/trie/src/hashed_cursor/default.rs | 4 +- .../trie/trie/src/hashed_cursor/post_state.rs | 16 +- crates/trie/trie/src/node_iter.rs | 4 +- crates/trie/trie/src/prefix_set/mod.rs | 4 +- crates/trie/trie/src/state.rs | 22 +- crates/trie/trie/src/test_utils.rs | 8 +- crates/trie/trie/src/trie.rs | 6 +- crates/trie/trie/src/walker.rs | 4 +- docs/crates/eth-wire.md | 26 +-- docs/crates/network.md | 8 +- docs/repo/layout.md | 4 +- testing/testing-utils/src/generators.rs | 12 +- .../testing-utils/src/genesis_allocator.rs | 2 +- 440 files changed, 2166 insertions(+), 2145 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 756fe6298..d73f2d29b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -156,6 +156,7 @@ use_self = "warn" missing_const_for_fn = "warn" empty_line_after_doc_comments = "warn" iter_on_single_items = "warn" +doc_markdown = "warn" unnecessary_struct_initialization = "warn" string_lit_as_bytes = "warn" diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index c18640a9a..033abdd37 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -59,10 +59,10 @@ pub struct Cli { /// port numbers that conflict with each other. /// /// Changes to the following port numbers: - /// - DISCOVERY_PORT: default + `instance` - 1 - /// - AUTH_PORT: default + `instance` * 100 - 100 - /// - HTTP_RPC_PORT: default - `instance` + 1 - /// - WS_RPC_PORT: default + `instance` * 2 - 2 + /// - `DISCOVERY_PORT`: default + `instance` - 1 + /// - `AUTH_PORT`: default + `instance` * 100 - 100 + /// - `HTTP_RPC_PORT`: default - `instance` + 1 + /// - `WS_RPC_PORT`: default + `instance` * 2 - 2 #[arg(long, value_name = "INSTANCE", global = true, default_value_t = 1, value_parser = value_parser!(u16).range(..=200))] instance: u16, @@ -90,7 +90,7 @@ impl Cli { /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the - /// [NodeCommand](node::NodeCommand). + /// [`NodeCommand`](node::NodeCommand). /// /// /// # Example diff --git a/bin/reth/src/commands/db/mod.rs b/bin/reth/src/commands/db/mod.rs index 4872ea963..e7edc19a9 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/bin/reth/src/commands/db/mod.rs @@ -89,7 +89,7 @@ pub enum Subcommands { Path, } -/// db_ro_exec opens a database in read-only mode, and then execute with the provided command +/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command macro_rules! db_ro_exec { ($chain:expr, $db_path:expr, $db_args:ident, $sfp:ident, $tool:ident, $command:block) => { let db = open_db_read_only($db_path, $db_args)?; diff --git a/bin/reth/src/commands/db/tui.rs b/bin/reth/src/commands/db/tui.rs index 5a0a1c24f..005da0591 100644 --- a/bin/reth/src/commands/db/tui.rs +++ b/bin/reth/src/commands/db/tui.rs @@ -20,7 +20,7 @@ use std::{ }; use tracing::error; -/// Available keybindings for the [DbListTUI] +/// Available keybindings for the [`DbListTUI`] static CMDS: [(&str, &str); 6] = [ ("q", "Quit"), ("↑", "Entry above"), @@ -30,8 +30,8 @@ static CMDS: [(&str, &str); 6] = [ ("G", "Go to a specific page"), ]; -/// Modified version of the [ListState] struct that exposes the `offset` field. -/// Used to make the [DbListTUI] keys clickable. +/// Modified version of the [`ListState`] struct that exposes the `offset` field. +/// Used to make the [`DbListTUI`] keys clickable. struct ExpListState { pub(crate) offset: usize, } @@ -46,15 +46,15 @@ pub(crate) enum ViewMode { } enum Entries { - /// Pairs of [Table::Key] and [RawValue] + /// Pairs of [`Table::Key`] and [`RawValue`] RawValues(Vec<(T::Key, RawValue)>), - /// Pairs of [Table::Key] and [Table::Value] + /// Pairs of [`Table::Key`] and [`Table::Value`] Values(Vec>), } impl Entries { - /// Creates new empty [Entries] as [Entries::RawValues] if `raw_values == true` and as - /// [Entries::Values] if `raw == false`. + /// Creates new empty [Entries] as [`Entries::RawValues`] if `raw_values == true` and as + /// [`Entries::Values`] if `raw == false`. const fn new_with_raw_values(raw_values: bool) -> Self { if raw_values { Self::RawValues(Vec::new()) @@ -63,8 +63,8 @@ impl Entries { } } - /// Sets the internal entries [Vec], converting the [Table::Value] into [RawValue] - /// if needed. + /// Sets the internal entries [Vec], converting the [`Table::Value`] into + /// [`RawValue`] if needed. fn set(&mut self, new_entries: Vec>) { match self { Self::RawValues(old_entries) => { @@ -83,8 +83,8 @@ impl Entries { } } - /// Returns an iterator over keys of the internal [Vec]. For both [Entries::RawValues] and - /// [Entries::Values], this iterator will yield [Table::Key]. + /// Returns an iterator over keys of the internal [Vec]. For both [`Entries::RawValues`] and + /// [`Entries::Values`], this iterator will yield [`Table::Key`]. const fn iter_keys(&self) -> EntriesKeyIter<'_, T> { EntriesKeyIter { entries: self, index: 0 } } @@ -210,7 +210,7 @@ where self.reset(); } - /// Show the [DbListTUI] in the terminal. + /// Show the [`DbListTUI`] in the terminal. pub(crate) fn run(mut self) -> eyre::Result<()> { // Setup backend enable_raw_mode()?; diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index 9f2a4d67a..7f8e278f7 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -62,10 +62,10 @@ pub struct NodeCommand { /// port numbers that conflict with each other. /// /// Changes to the following port numbers: - /// - DISCOVERY_PORT: default + `instance` - 1 - /// - AUTH_PORT: default + `instance` * 100 - 100 - /// - HTTP_RPC_PORT: default - `instance` + 1 - /// - WS_RPC_PORT: default + `instance` * 2 - 2 + /// - `DISCOVERY_PORT`: default + `instance` - 1 + /// - `AUTH_PORT`: default + `instance` * 100 - 100 + /// - `HTTP_RPC_PORT`: default - `instance` + 1 + /// - `WS_RPC_PORT`: default + `instance` * 2 - 2 #[arg(long, value_name = "INSTANCE", global = true, default_value_t = 1, value_parser = value_parser!(u16).range(..=200))] pub instance: u16, @@ -119,7 +119,7 @@ impl NodeCommand { Self::parse() } - /// Parsers only the default [NodeCommand] arguments from the given iterator + /// Parsers only the default [`NodeCommand`] arguments from the given iterator pub fn try_parse_args_from(itr: I) -> Result where I: IntoIterator, diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index 919503b24..2ace21aa7 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -117,8 +117,8 @@ fn import_tables_with_range( Ok(()) } -/// Dry-run an unwind to FROM block, so we can get the PlainStorageState and -/// PlainAccountState safely. There might be some state dependency from an address +/// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and +/// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. async fn unwind_and_copy( db_tool: &DbTool, diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/bin/reth/src/commands/stage/dump/mod.rs index c17962ab6..3ba4fd789 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/bin/reth/src/commands/stage/dump/mod.rs @@ -70,9 +70,9 @@ pub struct Command { pub enum Stages { /// Execution stage. Execution(StageCommand), - /// StorageHashing stage. + /// `StorageHashing` stage. StorageHashing(StageCommand), - /// AccountHashing stage. + /// `AccountHashing` stage. AccountHashing(StageCommand), /// Merkle stage. Merkle(StageCommand), diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index f312b2d1b..5f8c57976 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -29,7 +29,7 @@ pub use reth_node_core::utils::*; pub struct DbTool { /// The provider factory that the db tool will use. pub provider_factory: ProviderFactory, - /// The [ChainSpec] that the db tool will use. + /// The [`ChainSpec`] that the db tool will use. pub chain: Arc, } @@ -126,7 +126,7 @@ impl DbTool { self.provider_factory.db_ref().view(|tx| tx.get::(key))?.map_err(|e| eyre::eyre!(e)) } - /// Grabs the content of the DupSort table for the given key and subkey + /// Grabs the content of the `DupSort` table for the given key and subkey pub fn get_dup(&self, key: T::Key, subkey: T::SubKey) -> Result> { self.provider_factory .db_ref() diff --git a/book/cli/reth.md b/book/cli/reth.md index ae1728171..a4ba8f3d3 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -38,7 +38,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/config.md b/book/cli/reth/config.md index 72c195e4a..1b2a89c66 100644 --- a/book/cli/reth/config.md +++ b/book/cli/reth/config.md @@ -29,7 +29,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 8f4a02be5..85edd12af 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -46,7 +46,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md index 66f0a86a9..912a2761c 100644 --- a/book/cli/reth/db/checksum.md +++ b/book/cli/reth/db/checksum.md @@ -47,7 +47,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/clear.md b/book/cli/reth/db/clear.md index aefceb94d..6e89214ce 100644 --- a/book/cli/reth/db/clear.md +++ b/book/cli/reth/db/clear.md @@ -39,7 +39,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/clear/mdbx.md b/book/cli/reth/db/clear/mdbx.md index 5befebf64..72917730f 100644 --- a/book/cli/reth/db/clear/mdbx.md +++ b/book/cli/reth/db/clear/mdbx.md @@ -38,7 +38,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/clear/static-file.md b/book/cli/reth/db/clear/static-file.md index 18ec5b5ca..e15f7795d 100644 --- a/book/cli/reth/db/clear/static-file.md +++ b/book/cli/reth/db/clear/static-file.md @@ -41,7 +41,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/clear/static_file.md b/book/cli/reth/db/clear/static_file.md index 2c503dd71..363da7967 100644 --- a/book/cli/reth/db/clear/static_file.md +++ b/book/cli/reth/db/clear/static_file.md @@ -16,33 +16,33 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 + [default: 1] -h, --help @@ -51,7 +51,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -61,12 +61,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -76,22 +76,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -99,12 +99,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -115,7 +115,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index 898e05db3..024ebaeac 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -37,7 +37,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/drop.md b/book/cli/reth/db/drop.md index 25facee03..f03222f51 100644 --- a/book/cli/reth/db/drop.md +++ b/book/cli/reth/db/drop.md @@ -37,7 +37,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/get.md b/book/cli/reth/db/get.md index 366ab792d..c23eab0d8 100644 --- a/book/cli/reth/db/get.md +++ b/book/cli/reth/db/get.md @@ -39,7 +39,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/get/mdbx.md b/book/cli/reth/db/get/mdbx.md index 5b2ce0b0f..abac45932 100644 --- a/book/cli/reth/db/get/mdbx.md +++ b/book/cli/reth/db/get/mdbx.md @@ -47,7 +47,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/get/static-file.md b/book/cli/reth/db/get/static-file.md index 8381d4697..47ad2603a 100644 --- a/book/cli/reth/db/get/static-file.md +++ b/book/cli/reth/db/get/static-file.md @@ -47,7 +47,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/get/static_file.md b/book/cli/reth/db/get/static_file.md index 12df536f2..51071116c 100644 --- a/book/cli/reth/db/get/static_file.md +++ b/book/cli/reth/db/get/static_file.md @@ -19,13 +19,13 @@ Arguments: Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] --raw @@ -34,21 +34,21 @@ Options: --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 + [default: 1] -h, --help @@ -57,7 +57,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -67,12 +67,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -82,22 +82,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -105,12 +105,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -121,7 +121,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info diff --git a/book/cli/reth/db/list.md b/book/cli/reth/db/list.md index 130552420..0941536a9 100644 --- a/book/cli/reth/db/list.md +++ b/book/cli/reth/db/list.md @@ -80,7 +80,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/path.md b/book/cli/reth/db/path.md index 0c65fe03a..5318be6f7 100644 --- a/book/cli/reth/db/path.md +++ b/book/cli/reth/db/path.md @@ -34,7 +34,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/static_file.md b/book/cli/reth/db/static_file.md index a6f965075..16246c8d4 100644 --- a/book/cli/reth/db/static_file.md +++ b/book/cli/reth/db/static_file.md @@ -29,12 +29,12 @@ Options: -f, --from Starting block for the static_file - + [default: 0] -b, --block-interval Number of blocks in the static_file - + [default: 500000] --chain @@ -48,7 +48,7 @@ Options: -p, --parallel Sets the number of static files built in parallel. Note: Each parallel build is memory-intensive - + [default: 1] --only-stats @@ -88,7 +88,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/stats.md b/book/cli/reth/db/stats.md index ab130217a..835a1ad60 100644 --- a/book/cli/reth/db/stats.md +++ b/book/cli/reth/db/stats.md @@ -47,7 +47,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/db/version.md b/book/cli/reth/db/version.md index 57d9df550..932928b26 100644 --- a/book/cli/reth/db/version.md +++ b/book/cli/reth/db/version.md @@ -34,7 +34,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index 5ce34dba3..2779b8d77 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -31,7 +31,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/dump-genesis.md b/book/cli/reth/dump-genesis.md index 74966a5e5..5add92402 100644 --- a/book/cli/reth/dump-genesis.md +++ b/book/cli/reth/dump-genesis.md @@ -23,7 +23,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 9c320d0b6..5386fd029 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -43,7 +43,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index e5f3f1a8d..48421076c 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -54,7 +54,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index f9e825d4e..f57388d68 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -34,7 +34,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 999601f04..78b424966 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -37,7 +37,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] @@ -79,10 +79,10 @@ Networking: [default: 30303] --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 --discovery.v5.port The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set @@ -171,8 +171,8 @@ Networking: Experimental, for usage in research. Sets the max accumulated byte size of transactions to request in one request. - Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see RLPx specs). This allows a node to request a specific size + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size response. By default, nodes request only 128 KiB worth of transactions, but should a peer request @@ -218,7 +218,7 @@ RPC: [default: 8546] --ws.origins - Origins from which to accept WebSocket requests + Origins from which to accept `WebSocket` requests --ws.api Rpc Modules to be configured for the WS server @@ -498,7 +498,7 @@ Dev testnet: --dev.block-time Interval between blocks. - Parses strings using [humantime::parse_duration] + Parses strings using [`humantime::parse_duration`] --dev.block-time 12s Pruning: diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index e60471d1a..578aee341 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -42,7 +42,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] @@ -73,10 +73,10 @@ Networking: [default: 30303] --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 --discovery.v5.port The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set @@ -165,8 +165,8 @@ Networking: Experimental, for usage in research. Sets the max accumulated byte size of transactions to request in one request. - Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see RLPx specs). This allows a node to request a specific size + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size response. By default, nodes request only 128 KiB worth of transactions, but should a peer request diff --git a/book/cli/reth/p2p/body.md b/book/cli/reth/p2p/body.md index 6e3aa2cd6..3b6c6b162 100644 --- a/book/cli/reth/p2p/body.md +++ b/book/cli/reth/p2p/body.md @@ -18,7 +18,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/p2p/header.md b/book/cli/reth/p2p/header.md index dce6e545a..c00b81ddb 100644 --- a/book/cli/reth/p2p/header.md +++ b/book/cli/reth/p2p/header.md @@ -18,7 +18,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/recover.md b/book/cli/reth/recover.md index 6d6531e2d..9ffd8eb70 100644 --- a/book/cli/reth/recover.md +++ b/book/cli/reth/recover.md @@ -27,7 +27,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index baffe7ec6..245b95ec6 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -34,7 +34,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage.md b/book/cli/reth/stage.md index 3a7ba05a8..17a888b6e 100644 --- a/book/cli/reth/stage.md +++ b/book/cli/reth/stage.md @@ -30,7 +30,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 7b4ae73b8..c611b0cd0 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -34,7 +34,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index d1ff9dd82..ecff50054 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -8,8 +8,8 @@ Usage: reth stage dump [OPTIONS] Commands: execution Execution stage - storage-hashing StorageHashing stage - account-hashing AccountHashing stage + storage-hashing `StorageHashing` stage + account-hashing `AccountHashing` stage merkle Merkle stage help Print this message or the help of the given subcommand(s) @@ -41,7 +41,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/dump/account-hashing.md b/book/cli/reth/stage/dump/account-hashing.md index 5de3b55f5..4c5030a55 100644 --- a/book/cli/reth/stage/dump/account-hashing.md +++ b/book/cli/reth/stage/dump/account-hashing.md @@ -1,6 +1,6 @@ # reth stage dump account-hashing -AccountHashing stage +`AccountHashing` stage ```bash $ reth stage dump account-hashing --help @@ -26,7 +26,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/dump/execution.md b/book/cli/reth/stage/dump/execution.md index 0abd21582..420602f8a 100644 --- a/book/cli/reth/stage/dump/execution.md +++ b/book/cli/reth/stage/dump/execution.md @@ -26,7 +26,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/dump/merkle.md b/book/cli/reth/stage/dump/merkle.md index c3c1a08d3..aa32e5e32 100644 --- a/book/cli/reth/stage/dump/merkle.md +++ b/book/cli/reth/stage/dump/merkle.md @@ -26,7 +26,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/dump/storage-hashing.md b/book/cli/reth/stage/dump/storage-hashing.md index e110b43d0..5ef1483de 100644 --- a/book/cli/reth/stage/dump/storage-hashing.md +++ b/book/cli/reth/stage/dump/storage-hashing.md @@ -1,6 +1,6 @@ # reth stage dump storage-hashing -StorageHashing stage +`StorageHashing` stage ```bash $ reth stage dump storage-hashing --help @@ -26,7 +26,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 07c5be00b..a84bf06b8 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -79,7 +79,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] @@ -110,10 +110,10 @@ Networking: [default: 30303] --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 --discovery.v5.port The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set @@ -202,8 +202,8 @@ Networking: Experimental, for usage in research. Sets the max accumulated byte size of transactions to request in one request. - Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see RLPx specs). This allows a node to request a specific size + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size response. By default, nodes request only 128 KiB worth of transactions, but should a peer request diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index d998a577c..23e90033f 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -39,7 +39,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] @@ -89,10 +89,10 @@ Networking: [default: 30303] --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv4 + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx address, if it's also IPv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 --discovery.v5.port The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discv5.addr` is set @@ -181,8 +181,8 @@ Networking: Experimental, for usage in research. Sets the max accumulated byte size of transactions to request in one request. - Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see RLPx specs). This allows a node to request a specific size + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size response. By default, nodes request only 128 KiB worth of transactions, but should a peer request diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/cli/reth/stage/unwind/num-blocks.md index e3b393abe..74a3f469b 100644 --- a/book/cli/reth/stage/unwind/num-blocks.md +++ b/book/cli/reth/stage/unwind/num-blocks.md @@ -38,7 +38,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/cli/reth/stage/unwind/to-block.md index e836463b4..4994206a8 100644 --- a/book/cli/reth/stage/unwind/to-block.md +++ b/book/cli/reth/stage/unwind/to-block.md @@ -38,7 +38,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md index 501464534..da1b3c933 100644 --- a/book/cli/reth/test-vectors.md +++ b/book/cli/reth/test-vectors.md @@ -27,7 +27,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/book/cli/reth/test-vectors/tables.md b/book/cli/reth/test-vectors/tables.md index a0fd602c3..3b8f52f2c 100644 --- a/book/cli/reth/test-vectors/tables.md +++ b/book/cli/reth/test-vectors/tables.md @@ -27,7 +27,7 @@ Options: Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 [default: 1] diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index ae9365aa6..b791d747c 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -79,13 +79,13 @@ impl CanonicalError { } /// Returns `true` if the underlying error matches - /// [BlockchainTreeError::BlockHashNotFoundInChain]. + /// [`BlockchainTreeError::BlockHashNotFoundInChain`]. pub const fn is_block_hash_not_found(&self) -> bool { matches!(self, Self::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. })) } /// Returns `Some(BlockNumber)` if the underlying error matches - /// [CanonicalError::OptimisticTargetRevert]. + /// [`CanonicalError::OptimisticTargetRevert`]. pub const fn optimistic_revert_block_number(&self) -> Option { match self { Self::OptimisticTargetRevert(block_number) => Some(*block_number), @@ -104,27 +104,27 @@ pub struct InsertBlockError { // === impl InsertBlockError === impl InsertBlockError { - /// Create a new InsertInvalidBlockError + /// Create a new `InsertInvalidBlockError` pub fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { Self { inner: InsertBlockErrorData::boxed(block, kind) } } - /// Create a new InsertInvalidBlockError from a tree error + /// Create a new `InsertInvalidBlockError` from a tree error pub fn tree_error(error: BlockchainTreeError, block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::Tree(error)) } - /// Create a new InsertInvalidBlockError from a consensus error + /// Create a new `InsertInvalidBlockError` from a consensus error pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::Consensus(error)) } - /// Create a new InsertInvalidBlockError from a consensus error + /// Create a new `InsertInvalidBlockError` from a consensus error pub fn sender_recovery_error(block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::SenderRecovery) } - /// Create a new InsertInvalidBlockError from an execution error + /// Create a new `InsertInvalidBlockError` from an execution error pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::Execution(error)) } @@ -231,7 +231,7 @@ pub enum InsertBlockErrorKind { /// Canonical error. #[error(transparent)] Canonical(#[from] CanonicalError), - /// BlockchainTree error. + /// `BlockchainTree` error. #[error(transparent)] BlockchainTree(BlockchainTreeError), } diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 6e1f7b10d..56995d08e 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -18,11 +18,11 @@ use std::collections::BTreeMap; pub mod error; -/// * [BlockchainTreeEngine::insert_block]: Connect block to chain, execute it and if valid insert +/// * [`BlockchainTreeEngine::insert_block`]: Connect block to chain, execute it and if valid insert /// block inside tree. -/// * [BlockchainTreeEngine::finalize_block]: Remove chains that join to now finalized block, as +/// * [`BlockchainTreeEngine::finalize_block`]: Remove chains that join to now finalized block, as /// chain becomes invalid. -/// * [BlockchainTreeEngine::make_canonical]: Check if we have the hash of block that we want to +/// * [`BlockchainTreeEngine::make_canonical`]: Check if we have the hash of block that we want to /// finalize and commit it to db. If we don't have the block, syncing should start to fetch the /// blocks from p2p. Do reorg in tables if canonical chain if needed. pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { @@ -60,8 +60,8 @@ pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { /// The `validation_kind` parameter controls which validation checks are performed. /// /// Caution: If the block was received from the consensus layer, this should always be called - /// with [BlockValidationKind::Exhaustive] to validate the state root, if possible to adhere to - /// the engine API spec. + /// with [`BlockValidationKind::Exhaustive`] to validate the state root, if possible to adhere + /// to the engine API spec. fn insert_block( &self, block: SealedBlockWithSenders, @@ -157,14 +157,14 @@ impl std::fmt::Display for BlockValidationKind { } } -/// All possible outcomes of a canonicalization attempt of [BlockchainTreeEngine::make_canonical]. +/// All possible outcomes of a canonicalization attempt of [`BlockchainTreeEngine::make_canonical`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum CanonicalOutcome { /// The block is already canonical. AlreadyCanonical { /// Block number and hash of current head. head: BlockNumHash, - /// The corresponding [SealedHeader] that was attempted to be made a current head and + /// The corresponding [`SealedHeader`] that was attempted to be made a current head and /// is already canonical. header: SealedHeader, }, @@ -201,13 +201,13 @@ impl CanonicalOutcome { /// From Engine API spec, block inclusion can be valid, accepted or invalid. /// Invalid case is already covered by error, but we need to make distinction /// between valid blocks that extend canonical chain and the ones that fork off -/// into side chains (see [BlockAttachment]). If we don't know the block +/// into side chains (see [`BlockAttachment`]). If we don't know the block /// parent we are returning Disconnected status as we can't make a claim if /// block is valid or not. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum BlockStatus { /// If block is valid and block extends canonical chain. - /// In BlockchainTree terms, it forks off canonical tip. + /// In `BlockchainTree` terms, it forks off canonical tip. Valid(BlockAttachment), /// If block is valid and block forks off canonical chain. /// If blocks is not connected to canonical chain. @@ -286,7 +286,7 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Returns the _buffered_ (disconnected) header with matching hash from the internal buffer if /// it exists. /// - /// Caution: Unlike [Self::block_by_hash] this will only return headers that are currently + /// Caution: Unlike [`Self::block_by_hash`] this will only return headers that are currently /// disconnected from the canonical chain. fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option; @@ -304,7 +304,7 @@ pub trait BlockchainTreeViewer: Send + Sync { /// If there is a buffered block with the given hash, this returns the block itself. fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option; - /// Return BlockchainTree best known canonical chain tip (BlockHash, BlockNumber) + /// Return `BlockchainTree` best known canonical chain tip (`BlockHash`, `BlockNumber`) fn canonical_tip(&self) -> BlockNumHash; /// Return block number and hash that extends the canonical chain tip by one. @@ -325,7 +325,7 @@ pub trait BlockchainTreeViewer: Send + Sync { /// Returns the pending block and its receipts in one call. /// /// This exists to prevent a potential data race if the pending block changes in between - /// [Self::pending_block] and [Self::pending_receipts] calls. + /// [`Self::pending_block`] and [`Self::pending_receipts`] calls. fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)>; /// Returns the pending receipts if there is one. diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 153ff0288..4af77d4d9 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -7,10 +7,10 @@ use std::collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}; /// It allows to store unconnected blocks for potential future inclusion. /// /// The buffer has three main functionalities: -/// * [BlockBuffer::insert_block] for inserting blocks inside the buffer. -/// * [BlockBuffer::remove_block_with_children] for connecting blocks if the parent gets received +/// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer. +/// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received /// and inserted. -/// * [BlockBuffer::remove_old_blocks] to remove old blocks that precede the finalized number. +/// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number. /// /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block /// is done by last recently used block. @@ -22,7 +22,7 @@ pub struct BlockBuffer { /// to the buffered children. /// Allows connecting buffered blocks by parent. pub(crate) parent_to_child: HashMap>, - /// BTreeMap tracking the earliest blocks by block number. + /// `BTreeMap` tracking the earliest blocks by block number. /// Used for removal of old blocks that precede finalization. pub(crate) earliest_blocks: BTreeMap>, /// LRU used for tracing oldest inserted blocks that are going to be diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 292f5a12a..fd4372ab9 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -18,14 +18,14 @@ pub struct BlockIndices { /// Last finalized block. last_finalized_block: BlockNumber, /// Non-finalized canonical chain. Contains N number (depends on `finalization_depth`) of - /// blocks. These blocks are found in fork_to_child but not inside `blocks_to_chain` or + /// blocks. These blocks are found in `fork_to_child` but not inside `blocks_to_chain` or /// `number_to_block` as those are sidechain specific indices. canonical_chain: CanonicalChain, /// Index needed when discarding the chain, so we can remove connected chains from tree. /// /// This maintains insertion order for all child blocks, so - /// [BlockIndices::pending_block_num_hash] returns always the same block: the first child block - /// we inserted. + /// [`BlockIndices::pending_block_num_hash`] returns always the same block: the first child + /// block we inserted. /// /// NOTE: It contains just blocks that are forks as a key and not all blocks. fork_to_child: HashMap>, @@ -122,7 +122,7 @@ impl BlockIndices { self.fork_to_child.entry(first.parent_hash).or_default().insert_if_absent(first.hash()); } - /// Get the [BlockchainId] the given block belongs to if it exists. + /// Get the [`BlockchainId`] the given block belongs to if it exists. pub(crate) fn get_block_chain_id(&self, block: &BlockHash) -> Option { self.blocks_to_chain.get(block).cloned() } diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index f006888fd..a267d5ddb 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -46,13 +46,13 @@ use tracing::{debug, error, info, instrument, trace, warn}; /// canonical blocks to the tree (by removing them from the database), and commit the sidechain /// blocks to the database to become the canonical chain (reorg). /// -/// include_mmd!("docs/mermaid/tree.mmd") +/// `include_mmd!("docs/mermaid/tree.mmd`") /// /// # Main functions -/// * [BlockchainTree::insert_block]: Connect a block to a chain, execute it, and if valid, insert +/// * [`BlockchainTree::insert_block`]: Connect a block to a chain, execute it, and if valid, insert /// the block into the tree. -/// * [BlockchainTree::finalize_block]: Remove chains that branch off of the now finalized block. -/// * [BlockchainTree::make_canonical]: Check if we have the hash of a block that is the current +/// * [`BlockchainTree::finalize_block`]: Remove chains that branch off of the now finalized block. +/// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current /// canonical head and commit it to db. #[derive(Debug)] pub struct BlockchainTree { @@ -179,9 +179,9 @@ where /// Check if the block is known to blockchain tree or database and return its status. /// /// Function will check: - /// * if block is inside database returns [BlockStatus::Valid]. - /// * if block is inside buffer returns [BlockStatus::Disconnected]. - /// * if block is part of the canonical returns [BlockStatus::Valid]. + /// * if block is inside database returns [`BlockStatus::Valid`]. + /// * if block is inside buffer returns [`BlockStatus::Disconnected`]. + /// * if block is part of the canonical returns [`BlockStatus::Valid`]. /// /// Returns an error if /// - an error occurred while reading from the database. @@ -225,7 +225,7 @@ where Ok(None) } - /// Expose internal indices of the BlockchainTree. + /// Expose internal indices of the `BlockchainTree`. #[inline] pub const fn block_indices(&self) -> &BlockIndices { self.state.block_indices() @@ -272,7 +272,7 @@ where /// needed for evm `BLOCKHASH` opcode. /// Return none if: /// * block unknown. - /// * chain_id not present in state. + /// * `chain_id` not present in state. /// * there are no parent hashes stored. pub fn post_state_data(&self, block_hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); @@ -730,8 +730,8 @@ where /// Check if block is found inside a sidechain and its attachment. /// - /// if it is canonical or extends the canonical chain, return [BlockAttachment::Canonical] - /// if it does not extend the canonical chain, return [BlockAttachment::HistoricalFork] + /// if it is canonical or extends the canonical chain, return [`BlockAttachment::Canonical`] + /// if it does not extend the canonical chain, return [`BlockAttachment::HistoricalFork`] /// if the block is not in the tree or its chain id is not valid, return None #[track_caller] fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { @@ -754,7 +754,7 @@ where /// Insert a block (with recovered senders) into the tree. /// - /// Returns the [BlockStatus] on success: + /// Returns the [`BlockStatus`] on success: /// /// - The block is already part of a sidechain in the tree, or /// - The block is already part of the canonical chain, or @@ -767,8 +767,8 @@ where /// This means that if the block becomes canonical, we need to fetch the missing blocks over /// P2P. /// - /// If the [BlockValidationKind::SkipStateRootValidation] variant is provided the state root is - /// not validated. + /// If the [`BlockValidationKind::SkipStateRootValidation`] variant is provided the state root + /// is not validated. /// /// # Note /// @@ -1353,7 +1353,7 @@ where /// /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in - /// [BlockBuffer](crate::block_buffer::BlockBuffer) during the pipeline sync. + /// [`BlockBuffer`](crate::block_buffer::BlockBuffer) during the pipeline sync. pub(crate) fn update_chains_metrics(&mut self) { let height = self.state.block_indices.canonical_tip().number; diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index ccfbc2adc..4dc3b9604 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -1,4 +1,4 @@ -//! [BundleStateDataProvider] implementations used by the tree. +//! [`BundleStateDataProvider`] implementations used by the tree. use reth_primitives::{BlockHash, BlockNumber, ForkBlock}; use reth_provider::{BundleStateDataProvider, BundleStateForkProvider, BundleStateWithReceipts}; diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 9b7e394cd..b69e03174 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -64,8 +64,8 @@ impl AppendableChain { /// Create a new chain that forks off of the canonical chain. /// - /// if [BlockValidationKind::Exhaustive] is specified, the method will verify the state root of - /// the block. + /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root + /// of the block. pub fn new_canonical_fork( block: SealedBlockWithSenders, parent_header: &SealedHeader, @@ -103,7 +103,7 @@ impl AppendableChain { /// Create a new chain that forks off of an existing sidechain. /// - /// This differs from [AppendableChain::new_canonical_fork] in that this starts a new fork. + /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. pub(crate) fn new_chain_fork( &self, block: SealedBlockWithSenders, @@ -162,11 +162,11 @@ impl AppendableChain { /// state root after execution if possible and requested. /// /// Note: State root validation is limited to blocks that extend the canonical chain and is - /// optional, see [BlockValidationKind]. So this function takes two parameters to determine + /// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine /// if the state can and should be validated. - /// - [BlockAttachment] represents if the block extends the canonical chain, and thus we can + /// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can /// cache the trie state updates. - /// - [BlockValidationKind] determines if the state root __should__ be validated. + /// - [`BlockValidationKind`] determines if the state root __should__ be validated. fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, diff --git a/crates/blockchain-tree/src/lib.rs b/crates/blockchain-tree/src/lib.rs index 6f5717abd..6d5a017eb 100644 --- a/crates/blockchain-tree/src/lib.rs +++ b/crates/blockchain-tree/src/lib.rs @@ -1,8 +1,8 @@ //! Implementation of a tree-like structure for blockchains. //! -//! The [BlockchainTree] can validate, execute, and revert blocks in multiple competing sidechains. -//! This structure is used for Reth's sync mode at the tip instead of the pipeline, and is the -//! primary executor and validator of payloads sent from the consensus layer. +//! The [`BlockchainTree`] can validate, execute, and revert blocks in multiple competing +//! sidechains. This structure is used for Reth's sync mode at the tip instead of the pipeline, and +//! is the primary executor and validator of payloads sent from the consensus layer. //! //! Blocks and their resulting state transitions are kept in-memory until they are persisted. //! diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index c6dedfa64..4ca8710aa 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -14,7 +14,7 @@ use reth_provider::{ }; use std::collections::BTreeMap; -/// A BlockchainTree that does nothing. +/// A `BlockchainTree` that does nothing. /// /// Caution: this is only intended for testing purposes, or for wiring components together. #[derive(Debug, Clone, Default)] @@ -25,7 +25,7 @@ pub struct NoopBlockchainTree { } impl NoopBlockchainTree { - /// Create a new NoopBlockchainTree with a canon state notification sender. + /// Create a new `NoopBlockchainTree` with a canon state notification sender. pub fn with_canon_state_notifications( canon_state_notification_sender: CanonStateNotificationSender, ) -> Self { diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index fc323fd26..afd3797eb 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,4 +1,4 @@ -//! Wrapper around BlockchainTree that allows for it to be shared. +//! Wrapper around `BlockchainTree` that allows for it to be shared. use super::BlockchainTree; use parking_lot::RwLock; @@ -20,10 +20,10 @@ use reth_provider::{ use std::{collections::BTreeMap, sync::Arc}; use tracing::trace; -/// Shareable blockchain tree that is behind a RwLock +/// Shareable blockchain tree that is behind a `RwLock` #[derive(Clone, Debug)] pub struct ShareableBlockchainTree { - /// BlockchainTree + /// `BlockchainTree` pub tree: Arc>>, } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index 2c97c1923..e44e1aae5 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -47,7 +47,7 @@ impl TreeState { BlockchainId(id) } - /// Expose internal indices of the BlockchainTree. + /// Expose internal indices of the `BlockchainTree`. #[inline] pub(crate) const fn block_indices(&self) -> &BlockIndices { &self.block_indices diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index da1c5841d..9d5fea6a1 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -27,8 +27,8 @@ impl CliRunner { /// Executes the given _async_ command on the tokio runtime until the command future resolves or /// until the process receives a `SIGINT` or `SIGTERM` signal. /// - /// Tasks spawned by the command via the [TaskExecutor] are shut down and an attempt is made to - /// drive their shutdown to completion after the command has finished. + /// Tasks spawned by the command via the [`TaskExecutor`] are shut down and an attempt is made + /// to drive their shutdown to completion after the command has finished. pub fn run_command_until_exit( self, command: impl FnOnce(CliContext) -> F, @@ -80,7 +80,7 @@ impl CliRunner { /// Executes a regular future as a spawned blocking task until completion or until external /// signal received. /// - /// See [Runtime::spawn_blocking](tokio::runtime::Runtime::spawn_blocking) . + /// See [`Runtime::spawn_blocking`](tokio::runtime::Runtime::spawn_blocking) . pub fn run_blocking_until_ctrl_c(self, fut: F) -> Result<(), E> where F: Future> + Send + 'static, @@ -104,7 +104,7 @@ impl CliRunner { } } -/// [CliRunner] configuration when executing commands asynchronously +/// [`CliRunner`] configuration when executing commands asynchronously struct AsyncCliRunner { context: CliContext, task_manager: TaskManager, @@ -124,7 +124,7 @@ impl AsyncCliRunner { } } -/// Additional context provided by the [CliRunner] when executing commands +/// Additional context provided by the [`CliRunner`] when executing commands #[derive(Debug)] pub struct CliContext { /// Used to execute/spawn tasks diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 65fc1c647..d18f6ae8d 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -28,7 +28,7 @@ pub struct Config { } impl Config { - /// Returns the [PeersConfig] for the node. + /// Returns the [`PeersConfig`] for the node. /// /// If a peers file is provided, the basic nodes from the file are added to the configuration. pub fn peers_config_with_basic_nodes_from_file( @@ -142,7 +142,7 @@ pub struct BodiesConfig { pub downloader_request_limit: u64, /// The maximum number of block bodies returned at once from the stream /// - /// Default: 1_000 + /// Default: `1_000` pub downloader_stream_batch_size: usize, /// The size of the internal block buffer in bytes. /// diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index f51e77841..a86d21d43 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -1,7 +1,7 @@ //! A [Consensus] implementation for local testing purposes //! that automatically seals blocks. //! -//! The Mining task polls a [MiningMode], and will return a list of transactions that are ready to +//! The Mining task polls a [`MiningMode`], and will return a list of transactions that are ready to //! be mined. //! //! These downloaders poll the miner, assemble the block, and return transactions that are ready to @@ -57,7 +57,7 @@ pub struct AutoSealConsensus { } impl AutoSealConsensus { - /// Create a new instance of [AutoSealConsensus] + /// Create a new instance of [`AutoSealConsensus`] pub fn new(chain_spec: Arc) -> Self { Self { chain_spec } } @@ -143,7 +143,7 @@ where } } - /// Sets the [MiningMode] it operates in, default is [MiningMode::Auto] + /// Sets the [`MiningMode`] it operates in, default is [`MiningMode::Auto`] pub fn mode(mut self, mode: MiningMode) -> Self { self.mode = mode; self diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index aa37e8482..041a0c944 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -7,7 +7,7 @@ use reth_stages_api::PipelineError; pub type BeaconEngineResult = Result; /// The error type for the beacon consensus engine service -/// [BeaconConsensusEngine](crate::BeaconConsensusEngine) +/// [`BeaconConsensusEngine`](crate::BeaconConsensusEngine) /// /// Represents all possible error cases for the beacon consensus engine. #[derive(Debug, thiserror::Error)] @@ -24,7 +24,7 @@ pub enum BeaconConsensusEngineError { /// Hook error. #[error(transparent)] Hook(#[from] EngineHookError), - /// Common error. Wrapper around [RethError]. + /// Common error. Wrapper around [`RethError`]. #[error(transparent)] Common(#[from] RethError), } diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index d5cbdee46..4b092bd2f 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -3,7 +3,7 @@ use reth_primitives::{SealedBlock, SealedHeader, B256}; use reth_rpc_types::engine::ForkchoiceState; use std::{sync::Arc, time::Duration}; -/// Events emitted by [crate::BeaconConsensusEngine]. +/// Events emitted by [`crate::BeaconConsensusEngine`]. #[derive(Clone, Debug)] pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index f614ef976..afd19f607 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -38,26 +38,26 @@ impl ForkchoiceStateTracker { self.last_valid = Some(state); } - /// Returns the [ForkchoiceStatus] of the latest received FCU. + /// Returns the [`ForkchoiceStatus`] of the latest received FCU. /// /// Caution: this can be invalid. pub(crate) fn latest_status(&self) -> Option { self.latest.as_ref().map(|s| s.status) } - /// Returns whether the latest received FCU is valid: [ForkchoiceStatus::Valid] + /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { self.latest_status().map(|s| s.is_valid()).unwrap_or(false) } - /// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Syncing] + /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) } - /// Returns whether the latest received FCU is syncing: [ForkchoiceStatus::Invalid] + /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] pub(crate) fn is_latest_invalid(&self) -> bool { self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) @@ -75,7 +75,7 @@ impl ForkchoiceStateTracker { self.last_syncing.as_ref().map(|s| s.head_block_hash) } - /// Returns the last received ForkchoiceState to which we need to sync. + /// Returns the last received `ForkchoiceState` to which we need to sync. pub(crate) const fn sync_target_state(&self) -> Option { self.last_syncing } @@ -94,7 +94,7 @@ pub(crate) struct ReceivedForkchoiceState { status: ForkchoiceStatus, } -/// A simplified representation of [PayloadStatusEnum] specifically for FCU. +/// A simplified representation of [`PayloadStatusEnum`] specifically for FCU. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum ForkchoiceStatus { /// The forkchoice state is valid. @@ -118,7 +118,7 @@ impl ForkchoiceStatus { matches!(self, Self::Syncing) } - /// Converts the general purpose [PayloadStatusEnum] into a [ForkchoiceStatus]. + /// Converts the general purpose [`PayloadStatusEnum`] into a [`ForkchoiceStatus`]. pub(crate) const fn from_payload_status(status: &PayloadStatusEnum) -> Self { match status { PayloadStatusEnum::Valid | PayloadStatusEnum::Accepted => { @@ -137,7 +137,7 @@ impl From for ForkchoiceStatus { } } -/// A helper type to check represent hashes of a [ForkchoiceState] +/// A helper type to check represent hashes of a [`ForkchoiceState`] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum ForkchoiceStateHash { Head(B256), @@ -146,7 +146,7 @@ pub(crate) enum ForkchoiceStateHash { } impl ForkchoiceStateHash { - /// Tries to find a matching hash in the given [ForkchoiceState]. + /// Tries to find a matching hash in the given [`ForkchoiceState`]. pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option { if state.head_block_hash == hash { Some(Self::Head(hash)) @@ -159,7 +159,7 @@ impl ForkchoiceStateHash { } } - /// Returns true if this is the head hash of the [ForkchoiceState] + /// Returns true if this is the head hash of the [`ForkchoiceState`] pub(crate) const fn is_head(&self) -> bool { matches!(self, Self::Head(_)) } diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 41519b456..bd5c8a9e2 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -18,8 +18,8 @@ pub(crate) struct PolledHook { /// Manages hooks under the control of the engine. /// /// This type polls the initialized hooks one by one, respecting the DB access level -/// (i.e. [crate::hooks::EngineHookDBAccessLevel::ReadWrite] that enforces running at most one such -/// hook). +/// (i.e. [`crate::hooks::EngineHookDBAccessLevel::ReadWrite`] that enforces running at most one +/// such hook). pub(crate) struct EngineHooksController { /// Collection of hooks. /// diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs index 9d0ec626b..5d699921a 100644 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ b/crates/consensus/beacon/src/engine/hooks/mod.rs @@ -99,7 +99,7 @@ pub enum EngineHookError { /// Hook channel closed. #[error("hook channel closed")] ChannelClosed, - /// Common error. Wrapper around [RethError]. + /// Common error. Wrapper around [`RethError`]. #[error(transparent)] Common(#[from] RethError), /// An internal error occurred. diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index 29675806c..52fc38b0d 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -77,11 +77,11 @@ impl PruneHook { } /// This will try to spawn the pruner if it is idle: - /// 1. Check if pruning is needed through [Pruner::is_pruning_needed]. + /// 1. Check if pruning is needed through [`Pruner::is_pruning_needed`]. /// - /// 2.1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a - /// separate task. Set pruner state to [PrunerState::Running]. - /// 2.2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. + /// 2.1. If pruning is needed, pass tip block number to the [`Pruner::run`] and spawn it in a + /// separate task. Set pruner state to [`PrunerState::Running`]. + /// 2.2. If pruning is not needed, set pruner state back to [`PrunerState::Idle`]. /// /// If pruner is already running, do nothing. fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { @@ -141,8 +141,8 @@ impl EngineHook for PruneHook { /// The possible pruner states within the sync controller. /// -/// [PrunerState::Idle] means that the pruner is currently idle. -/// [PrunerState::Running] means that the pruner is currently running. +/// [`PrunerState::Idle`] means that the pruner is currently idle. +/// [`PrunerState::Running`] means that the pruner is currently running. /// /// NOTE: The differentiation between these two states is important, because when the pruner is /// running, it acquires the write lock over the database. This means that we cannot forward to the diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 5cf643216..4dc6946b6 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -1,4 +1,4 @@ -//! StaticFile hook for the engine implementation. +//! `StaticFile` hook for the engine implementation. use crate::{ engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, @@ -16,12 +16,12 @@ use tracing::trace; /// Manages producing static files under the control of the engine. /// -/// This type controls the [StaticFileProducer]. +/// This type controls the [`StaticFileProducer`]. #[derive(Debug)] pub struct StaticFileHook { - /// The current state of the static_file_producer. + /// The current state of the `static_file_producer`. state: StaticFileProducerState, - /// The type that can spawn the static_file_producer task. + /// The type that can spawn the `static_file_producer` task. task_spawner: Box, } @@ -34,10 +34,10 @@ impl StaticFileHook { Self { state: StaticFileProducerState::Idle(Some(static_file_producer)), task_spawner } } - /// Advances the static_file_producer state. + /// Advances the `static_file_producer` state. /// - /// This checks for the result in the channel, or returns pending if the static_file_producer is - /// idle. + /// This checks for the result in the channel, or returns pending if the `static_file_producer` + /// is idle. fn poll_static_file_producer( &mut self, cx: &mut Context<'_>, @@ -67,19 +67,19 @@ impl StaticFileHook { Poll::Ready(Ok(event)) } - /// This will try to spawn the static_file_producer if it is idle: + /// This will try to spawn the `static_file_producer` if it is idle: /// 1. Check if producing static files is needed through - /// [StaticFileProducer::get_static_file_targets](reth_static_file::StaticFileProducerInner::get_static_file_targets) - /// and then [StaticFileTargets::any](reth_static_file::StaticFileTargets::any). + /// [`StaticFileProducer::get_static_file_targets`](reth_static_file::StaticFileProducerInner::get_static_file_targets) + /// and then [`StaticFileTargets::any`](reth_static_file::StaticFileTargets::any). /// /// 2.1. If producing static files is needed, pass static file request to the - /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and + /// [`StaticFileProducer::run`](reth_static_file::StaticFileProducerInner::run) and /// spawn it in a separate task. Set static file producer state to - /// [StaticFileProducerState::Running]. + /// [`StaticFileProducerState::Running`]. /// 2.2. If producing static files is not needed, set static file producer state back to - /// [StaticFileProducerState::Idle]. + /// [`StaticFileProducerState::Idle`]. /// - /// If static_file_producer is already running, do nothing. + /// If `static_file_producer` is already running, do nothing. fn try_spawn_static_file_producer( &mut self, finalized_block_number: BlockNumber, @@ -157,14 +157,14 @@ impl EngineHook for StaticFileHook { } } -/// The possible static_file_producer states within the sync controller. +/// The possible `static_file_producer` states within the sync controller. /// -/// [StaticFileProducerState::Idle] means that the static file producer is currently idle. -/// [StaticFileProducerState::Running] means that the static file producer is currently running. +/// [`StaticFileProducerState::Idle`] means that the static file producer is currently idle. +/// [`StaticFileProducerState::Running`] means that the static file producer is currently running. #[derive(Debug)] enum StaticFileProducerState { - /// [StaticFileProducer] is idle. + /// [`StaticFileProducer`] is idle. Idle(Option>), - /// [StaticFileProducer] is running and waiting for a response + /// [`StaticFileProducer`] is running and waiting for a response Running(oneshot::Receiver>), } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index ee057aa98..4dac97583 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -16,7 +16,7 @@ use tokio::sync::oneshot; /// Represents the outcome of forkchoice update. /// -/// This is a future that resolves to [ForkChoiceUpdateResult] +/// This is a future that resolves to [`ForkChoiceUpdateResult`] #[must_use = "futures do nothing unless you `.await` or poll them"] #[derive(Debug)] pub struct OnForkChoiceUpdated { @@ -32,7 +32,7 @@ pub struct OnForkChoiceUpdated { // === impl OnForkChoiceUpdated === impl OnForkChoiceUpdated { - /// Returns the determined status of the received ForkchoiceState. + /// Returns the determined status of the received `ForkchoiceState`. pub const fn forkchoice_status(&self) -> ForkchoiceStatus { self.forkchoice_status } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 35e75320b..508566e18 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -95,7 +95,7 @@ pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// received by Engine API (JSON-RPC). /// /// The consensus engine is idle until it receives the first -/// [BeaconEngineMessage::ForkchoiceUpdated] message from the CL which would initiate the sync. At +/// [`BeaconEngineMessage::ForkchoiceUpdated`] message from the CL which would initiate the sync. At /// first, the consensus engine would run the [Pipeline] until the latest known block hash. /// Afterward, it would attempt to create/restore the [`BlockchainTreeEngine`] from the blocks /// that are currently available. In case the restoration is successful, the consensus engine would @@ -107,10 +107,10 @@ pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// ## New Payload (`engine_newPayloadV{}`) /// /// The engine receives new payloads from the CL. If the payload is connected to the canonical -/// chain, it will be fully validated added to a chain in the [BlockchainTreeEngine]: `VALID` +/// chain, it will be fully validated added to a chain in the [`BlockchainTreeEngine`]: `VALID` /// /// If the payload's chain is disconnected (at least 1 block is missing) then it will be buffered: -/// `SYNCING` ([BlockStatus::Disconnected]). +/// `SYNCING` ([`BlockStatus::Disconnected`]). /// /// ## Forkchoice Update (FCU) (`engine_forkchoiceUpdatedV{}`) /// @@ -125,14 +125,14 @@ pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// /// ### The chain is connected /// -/// All blocks of the `head_hash`'s chain are present in the [BlockchainTreeEngine] and are +/// All blocks of the `head_hash`'s chain are present in the [`BlockchainTreeEngine`] and are /// committed to the canonical chain. This also includes reorgs. /// /// ### The chain is disconnected /// -/// In this case the [BlockchainTreeEngine] doesn't know how the new chain connects to the existing -/// canonical chain. It could be a simple commit (new blocks extend the current head) or a re-org -/// that requires unwinding the canonical chain. +/// In this case the [`BlockchainTreeEngine`] doesn't know how the new chain connects to the +/// existing canonical chain. It could be a simple commit (new blocks extend the current head) or a +/// re-org that requires unwinding the canonical chain. /// /// This further distinguishes between two variants: /// @@ -231,7 +231,7 @@ where Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, EngineT: EngineTypes + Unpin + 'static, { - /// Create a new instance of the [BeaconConsensusEngine]. + /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] pub fn new( client: Client, @@ -264,16 +264,17 @@ where ) } - /// Create a new instance of the [BeaconConsensusEngine] using the given channel to configure - /// the [BeaconEngineMessage] communication channel. + /// Create a new instance of the [`BeaconConsensusEngine`] using the given channel to configure + /// the [`BeaconEngineMessage`] communication channel. /// /// By default the engine is started with idle pipeline. /// The pipeline can be launched immediately in one of the following ways descending in /// priority: - /// - Explicit [Option::Some] target block hash provided via a constructor argument. + /// - Explicit [`Option::Some`] target block hash provided via a constructor argument. /// - The process was previously interrupted amidst the pipeline run. This is checked by - /// comparing the checkpoints of the first ([StageId::Headers]) and last ([StageId::Finish]) - /// stages. In this case, the latest available header in the database is used as the target. + /// comparing the checkpoints of the first ([`StageId::Headers`]) and last + /// ([`StageId::Finish`]) stages. In this case, the latest available header in the database is + /// used as the target. /// /// Propagates any database related error. #[allow(clippy::too_many_arguments)] @@ -334,7 +335,7 @@ where Ok((this, handle)) } - /// Returns current [EngineHookContext] that's used for polling engine hooks. + /// Returns current [`EngineHookContext`] that's used for polling engine hooks. fn current_engine_hook_context(&self) -> RethResult { Ok(EngineHookContext { tip_block_number: self.blockchain.canonical_tip().number, @@ -732,7 +733,7 @@ where /// - It is fully validated and deemed VALID /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a PoW block. + /// conditions are satisfied by a `PoW` block. /// - null if client software cannot determine the ancestor of the invalid payload satisfying /// the above conditions. fn latest_valid_hash_for_invalid_payload( @@ -841,9 +842,9 @@ where /// made canonical. /// /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will - /// return an instance of [OnForkChoiceUpdated] that is INVALID. + /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. /// - /// This also updates the safe and finalized blocks in the [CanonChainTracker], if they are + /// This also updates the safe and finalized blocks in the [`CanonChainTracker`], if they are /// consistent with the head block. fn ensure_consistent_forkchoice_state( &self, @@ -971,7 +972,7 @@ where /// /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap /// - /// See [Self::on_forkchoice_updated] and [BlockchainTreeEngine::make_canonical]. + /// See [`Self::on_forkchoice_updated`] and [`BlockchainTreeEngine::make_canonical`]. fn on_failed_canonical_forkchoice_update( &mut self, state: &ForkchoiceState, @@ -1320,7 +1321,7 @@ where /// Attempt to form a new canonical chain based on the current sync target. /// /// This is invoked when we successfully __downloaded__ a new block from the network which - /// resulted in [BlockStatus::Valid]. + /// resulted in [`BlockStatus::Valid`]. /// /// Note: This will not succeed if the sync target has changed since the block download request /// was issued and the new target is still disconnected and additional missing blocks are @@ -1385,7 +1386,7 @@ where } } - /// Event handler for events emitted by the [EngineSyncController]. + /// Event handler for events emitted by the [`EngineSyncController`]. /// /// This returns a result to indicate whether the engine future should resolve (fatal error). fn on_sync_event( @@ -1785,7 +1786,7 @@ where } /// On initialization, the consensus engine will poll the message receiver and return -/// [Poll::Pending] until the first forkchoice update message is received. +/// [`Poll::Pending`] until the first forkchoice update message is received. /// /// As soon as the consensus engine receives the first forkchoice updated message and updates the /// local forkchoice state, it will launch the pipeline to sync to the head hash. @@ -1946,13 +1947,13 @@ enum BlockchainTreeAction { /// Action to insert a new block that we successfully downloaded from the network. /// There are several outcomes for inserting a downloaded block into the tree: /// - /// ## [BlockStatus::Valid] + /// ## [`BlockStatus::Valid`] /// /// The block is connected to the current canonical chain and is valid. /// If the block is an ancestor of the current forkchoice head, then we can try again to /// make the chain canonical. /// - /// ## [BlockStatus::Disconnected] + /// ## [`BlockStatus::Disconnected`] /// /// The block is not connected to the canonical chain, and we need to download the /// missing parent first. diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index cce625032..8add75eea 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -29,7 +29,7 @@ use tracing::trace; /// This type controls the [Pipeline] and supports (single) full block downloads. /// /// Caution: If the pipeline is running, this type will not emit blocks downloaded from the network -/// [EngineSyncEvent::FetchedFullBlock] until the pipeline is idle to prevent commits to the +/// [`EngineSyncEvent::FetchedFullBlock`] until the pipeline is idle to prevent commits to the /// database while the pipeline is still active. pub(crate) struct EngineSyncController where @@ -359,7 +359,7 @@ where } } -/// A wrapper type around [SealedBlock] that implements the [Ord] trait by block number. +/// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] struct OrderedSealedBlock(SealedBlock); @@ -375,7 +375,7 @@ impl Ord for OrderedSealedBlock { } } -/// The event type emitted by the [EngineSyncController]. +/// The event type emitted by the [`EngineSyncController`]. #[derive(Debug)] pub(crate) enum EngineSyncEvent { /// A full block has been downloaded from the network. @@ -402,8 +402,8 @@ pub(crate) enum EngineSyncEvent { /// The possible pipeline states within the sync controller. /// -/// [PipelineState::Idle] means that the pipeline is currently idle. -/// [PipelineState::Running] means that the pipeline is currently running. +/// [`PipelineState::Idle`] means that the pipeline is currently idle. +/// [`PipelineState::Running`] means that the pipeline is currently running. /// /// NOTE: The differentiation between these two states is important, because when the pipeline is /// running, it acquires the write lock over the database. This means that we cannot forward to the @@ -451,7 +451,7 @@ mod tests { } impl TestPipelineBuilder { - /// Create a new [TestPipelineBuilder]. + /// Create a new [`TestPipelineBuilder`]. fn new() -> Self { Self { pipeline_exec_outputs: VecDeque::new(), @@ -515,7 +515,7 @@ mod tests { } impl TestSyncControllerBuilder { - /// Create a new [TestSyncControllerBuilder]. + /// Create a new [`TestSyncControllerBuilder`]. const fn new() -> Self { Self { max_block: None, client: None } } diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 22e7053a9..fd3c694c2 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -22,7 +22,7 @@ pub mod noop; /// test helpers for mocking consensus pub mod test_utils; -/// Post execution input passed to [Consensus::validate_block_post_execution]. +/// Post execution input passed to [`Consensus::validate_block_post_execution`]. #[derive(Debug)] pub struct PostExecutionInput<'a> { /// Receipts of the block. @@ -320,7 +320,7 @@ pub enum ConsensusError { #[error(transparent)] InvalidTransaction(#[from] InvalidTransactionError), - /// Error type transparently wrapping HeaderValidationError. + /// Error type transparently wrapping `HeaderValidationError`. #[error(transparent)] HeaderValidationError(#[from] HeaderValidationError), } diff --git a/crates/engine-primitives/src/error.rs b/crates/engine-primitives/src/error.rs index f6dd3a8b7..3c1d17838 100644 --- a/crates/engine-primitives/src/error.rs +++ b/crates/engine-primitives/src/error.rs @@ -4,7 +4,7 @@ use thiserror::Error; /// Thrown when the payload or attributes are known to be invalid before processing. /// /// This is used mainly for -/// [validate_version_specific_fields](crate::validate_version_specific_fields), which validates +/// [`validate_version_specific_fields`](crate::validate_version_specific_fields), which validates /// both execution payloads and forkchoice update attributes with respect to a method version. #[derive(Error, Debug)] pub enum EngineObjectValidationError { @@ -37,14 +37,14 @@ pub enum VersionSpecificValidationError { /// block root #[error("parent beacon block root not supported before V3")] ParentBeaconBlockRootNotSupportedBeforeV3, - /// Thrown if engine_forkchoiceUpdatedV1 or engine_newPayloadV1 contains withdrawals + /// Thrown if `engine_forkchoiceUpdatedV1` or `engine_newPayloadV1` contains withdrawals #[error("withdrawals not supported in V1")] WithdrawalsNotSupportedInV1, - /// Thrown if engine_forkchoiceUpdated or engine_newPayload contains no withdrawals after + /// Thrown if `engine_forkchoiceUpdated` or `engine_newPayload` contains no withdrawals after /// Shanghai #[error("no withdrawals post-Shanghai")] NoWithdrawalsPostShanghai, - /// Thrown if engine_forkchoiceUpdated or engine_newPayload contains withdrawals before + /// Thrown if `engine_forkchoiceUpdated` or `engine_newPayload` contains withdrawals before /// Shanghai #[error("withdrawals pre-Shanghai")] HasWithdrawalsPreShanghai, diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine-primitives/src/lib.rs index 015993df9..646ce1193 100644 --- a/crates/engine-primitives/src/lib.rs +++ b/crates/engine-primitives/src/lib.rs @@ -12,7 +12,7 @@ use core::fmt; use reth_primitives::ChainSpec; /// Contains traits to abstract over payload attributes types and default implementations of the -/// [PayloadAttributes] trait for ethereum mainnet and optimism types. +/// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. pub mod traits; use serde::{de::DeserializeOwned, ser::Serialize}; pub use traits::{BuiltPayload, PayloadAttributes, PayloadBuilderAttributes}; @@ -21,7 +21,7 @@ pub use traits::{BuiltPayload, PayloadAttributes, PayloadBuilderAttributes}; pub mod error; pub use error::{EngineObjectValidationError, VersionSpecificValidationError}; -/// Contains types used in implementations of the [PayloadAttributes] trait. +/// Contains types used in implementations of the [`PayloadAttributes`] trait. pub mod payload; pub use payload::PayloadOrAttributes; @@ -70,7 +70,7 @@ pub trait EngineTypes: /// * If V3, this ensures that the payload timestamp is within the Cancun timestamp. /// * If V4, this ensures that the payload timestamp is within the Prague timestamp. /// -/// Otherwise, this will return [EngineObjectValidationError::UnsupportedFork]. +/// Otherwise, this will return [`EngineObjectValidationError::UnsupportedFork`]. pub fn validate_payload_timestamp( chain_spec: &ChainSpec, version: EngineApiMessageVersion, @@ -187,13 +187,13 @@ pub fn validate_withdrawals_presence( /// Before Cancun, the `parentBeaconBlockRoot` field must be [None]. /// /// If the engine API message version is V1 or V2, and the timestamp is post-Cancun, then this will -/// return [EngineObjectValidationError::UnsupportedFork]. +/// return [`EngineObjectValidationError::UnsupportedFork`]. /// /// If the timestamp is before the Cancun fork and the engine API message version is V3, then this -/// will return [EngineObjectValidationError::UnsupportedFork]. +/// will return [`EngineObjectValidationError::UnsupportedFork`]. /// /// If the engine API message version is V3, but the `parentBeaconBlockRoot` is [None], then -/// this will return [VersionSpecificValidationError::NoParentBeaconBlockRootPostCancun]. +/// this will return [`VersionSpecificValidationError::NoParentBeaconBlockRootPostCancun`]. /// /// This implements the following Engine API spec rules: /// @@ -317,10 +317,10 @@ impl MessageValidationKind { /// Validates the presence or exclusion of fork-specific fields based on the ethereum execution /// payload, or payload attributes, and the message version. /// -/// The object being validated is provided by the [PayloadOrAttributes] argument, which can be +/// The object being validated is provided by the [`PayloadOrAttributes`] argument, which can be /// either an execution payload, or payload attributes. /// -/// The version is provided by the [EngineApiMessageVersion] argument. +/// The version is provided by the [`EngineApiMessageVersion`] argument. pub fn validate_version_specific_fields( chain_spec: &ChainSpec, version: EngineApiMessageVersion, diff --git a/crates/engine-primitives/src/payload.rs b/crates/engine-primitives/src/payload.rs index b982eaafa..adb13eb1f 100644 --- a/crates/engine-primitives/src/payload.rs +++ b/crates/engine-primitives/src/payload.rs @@ -4,10 +4,10 @@ use reth_rpc_types::engine::ExecutionPayload; use super::MessageValidationKind; -/// Either an [ExecutionPayload] or a types that implements the [PayloadAttributes] trait. +/// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. #[derive(Debug)] pub enum PayloadOrAttributes<'a, AttributesType> { - /// An [ExecutionPayload] and optional parent beacon block root. + /// An [`ExecutionPayload`] and optional parent beacon block root. ExecutionPayload { /// The inner execution payload payload: &'a ExecutionPayload, @@ -22,7 +22,7 @@ impl<'a, AttributesType> PayloadOrAttributes<'a, AttributesType> where AttributesType: PayloadAttributes, { - /// Construct a [PayloadOrAttributes] from an [ExecutionPayload] and optional parent beacon + /// Construct a [`PayloadOrAttributes`] from an [`ExecutionPayload`] and optional parent beacon /// block root. pub const fn from_execution_payload( payload: &'a ExecutionPayload, @@ -55,7 +55,7 @@ where } } - /// Return a [MessageValidationKind] for the payload or attributes. + /// Return a [`MessageValidationKind`] for the payload or attributes. pub const fn message_validation_kind(&self) -> MessageValidationKind { match self { Self::ExecutionPayload { .. } => MessageValidationKind::Payload, diff --git a/crates/engine-primitives/src/traits.rs b/crates/engine-primitives/src/traits.rs index fab2a25aa..3966e619e 100644 --- a/crates/engine-primitives/src/traits.rs +++ b/crates/engine-primitives/src/traits.rs @@ -10,7 +10,7 @@ use reth_rpc_types::{ Withdrawal, }; -/// Represents a built payload type that contains a built [SealedBlock] and can be converted into +/// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. pub trait BuiltPayload: Send + Sync + std::fmt::Debug { /// Returns the built block (sealed) @@ -26,14 +26,14 @@ pub trait BuiltPayload: Send + Sync + std::fmt::Debug { /// receives, into a type that the payload builder can use. pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// The payload attributes that can be used to construct this type. Used as the argument in - /// [PayloadBuilderAttributes::try_new]. + /// [`PayloadBuilderAttributes::try_new`]. type RpcPayloadAttributes; - /// The error type used in [PayloadBuilderAttributes::try_new]. + /// The error type used in [`PayloadBuilderAttributes::try_new`]. type Error: std::error::Error; /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [PayloadId] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent and attributes fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, @@ -41,7 +41,7 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { where Self: Sized; - /// Returns the [PayloadId] for the running payload job. + /// Returns the [`PayloadId`] for the running payload job. fn payload_id(&self) -> PayloadId; /// Returns the parent block hash for the running payload job. @@ -62,8 +62,8 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Returns the withdrawals for the running payload job. fn withdrawals(&self) -> &Withdrawals; - /// Returns the configured [CfgEnvWithHandlerCfg] and [BlockEnv] for the targeted payload (that - /// has the `parent` as its parent). + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// (that has the `parent` as its parent). /// /// The `chain_spec` is used to determine the correct chain id and hardfork for the payload /// based on its timestamp. @@ -94,8 +94,8 @@ pub trait PayloadAttributes: /// Return the parent beacon block root for the payload attributes. fn parent_beacon_block_root(&self) -> Option; - /// Ensures that the payload attributes are valid for the given [ChainSpec] and - /// [EngineApiMessageVersion]. + /// Ensures that the payload attributes are valid for the given [`ChainSpec`] and + /// [`EngineApiMessageVersion`]. fn ensure_well_formed_attributes( &self, chain_spec: &ChainSpec, diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index cf28cec88..d2cab7a80 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -121,7 +121,7 @@ pub struct ForkId { /// See: /// /// -/// for how geth implements ForkId values and forward compatibility. +/// for how geth implements `ForkId` values and forward compatibility. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable)] pub struct EnrForkIdEntry { /// The inner forkid @@ -183,7 +183,7 @@ pub enum ValidationError { RemoteStale { /// locally configured forkId local: ForkId, - /// ForkId received from remote + /// `ForkId` received from remote remote: ForkId, }, /// Local node is on an incompatible chain or needs a software update. @@ -191,7 +191,7 @@ pub enum ValidationError { LocalIncompatibleOrStale { /// locally configured forkId local: ForkId, - /// ForkId received from remote + /// `ForkId` received from remote remote: ForkId, }, } @@ -389,9 +389,9 @@ impl ForkFilter { /// See also [`ForkFilter::set_head`] #[derive(Debug, Clone, Eq, PartialEq)] pub struct ForkTransition { - /// The new, active ForkId + /// The new, active `ForkId` pub current: ForkId, - /// The previously active ForkId before the transition + /// The previously active `ForkId` before the transition pub past: ForkId, } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 3783063b4..cbef399cc 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -31,7 +31,7 @@ pub struct EthBeaconConsensus { } impl EthBeaconConsensus { - /// Create a new instance of [EthBeaconConsensus] + /// Create a new instance of [`EthBeaconConsensus`] pub fn new(chain_spec: Arc) -> Self { Self { chain_spec } } diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 58d8b8a95..e059c8ae9 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -178,7 +178,7 @@ impl EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [PayloadId] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent and attributes pub fn new(parent: B256, attributes: PayloadAttributes) -> Self { let id = payload_id(&parent, &attributes); @@ -200,7 +200,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [PayloadId] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent and attributes fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { Ok(Self::new(parent, attributes)) } @@ -293,7 +293,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { } } -/// Generates the payload id for the configured payload from the [PayloadAttributes]. +/// Generates the payload id for the configured payload from the [`PayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> PayloadId { diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index 0df99a997..074cd1f0b 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -17,7 +17,7 @@ sol! { } /// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -/// (address is from the passed [ChainSpec]) deposits from receipts, and return them as a +/// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a /// [vector](Vec) of (requests)[Request]. pub fn parse_deposits_from_receipts<'a, I>( chain_spec: &ChainSpec, diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 397653e31..7b1e7d455 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -131,7 +131,7 @@ where /// # Note /// /// It does __not__ apply post-execution changes that do not require an [EVM](Evm), for that see - /// [EthBlockExecutor::post_execution]. + /// [`EthBlockExecutor::post_execution`]. fn execute_state_transitions( &self, block: &BlockWithSenders, diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 026e39cf0..72305981d 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -25,7 +25,7 @@ use reth_transaction_pool::{ pub struct EthereumNode; impl EthereumNode { - /// Returns a [ComponentsBuilder] configured for a regular Ethereum node. + /// Returns a [`ComponentsBuilder`] configured for a regular Ethereum node. pub fn components() -> ComponentsBuilder< Node, EthereumPoolBuilder, diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 08db509d8..31693f6ce 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -97,7 +97,7 @@ pub enum BlockValidationError { DepositRequestDecode(String), } -/// BlockExecutor Errors +/// `BlockExecutor` Errors #[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping `BlockValidationError` @@ -148,7 +148,7 @@ impl BlockExecutionError { Self::Other(Box::new(error)) } - /// Create a new [BlockExecutionError::Other] from a given message. + /// Create a new [`BlockExecutionError::Other`] from a given message. pub fn msg(msg: impl Display) -> Self { Self::Other(msg.to_string().into()) } diff --git a/crates/evm/execution-types/src/bundle.rs b/crates/evm/execution-types/src/bundle.rs index 9952d5de4..10be00a51 100644 --- a/crates/evm/execution-types/src/bundle.rs +++ b/crates/evm/execution-types/src/bundle.rs @@ -50,7 +50,7 @@ impl From for BatchBlockExecutionOutput { pub type BundleStateInit = HashMap, Option, HashMap)>; -/// Types used inside RevertsInit to initialize revms reverts. +/// Types used inside `RevertsInit` to initialize revms reverts. pub type AccountRevertInit = (Option>, Vec); /// Type used to initialize revms reverts. @@ -120,7 +120,7 @@ impl BundleStateWithReceipts { self.bundle.state().iter().map(|(a, acc)| (*a, acc.info.as_ref())) } - /// Return iterator over all [BundleAccount]s in the bundle + /// Return iterator over all [`BundleAccount`]s in the bundle pub fn bundle_accounts_iter(&self) -> impl Iterator { self.bundle.state().iter().map(|(a, acc)| (*a, acc)) } @@ -132,7 +132,7 @@ impl BundleStateWithReceipts { /// Get storage if value is known. /// - /// This means that depending on status we can potentially return U256::ZERO. + /// This means that depending on status we can potentially return `U256::ZERO`. pub fn storage(&self, address: &Address, storage_key: U256) -> Option { self.bundle.account(address).and_then(|a| a.storage_slot(storage_key)) } @@ -142,8 +142,8 @@ impl BundleStateWithReceipts { self.bundle.bytecode(code_hash).map(Bytecode) } - /// Returns [HashedPostState] for this bundle state. - /// See [HashedPostState::from_bundle_state] for more info. + /// Returns [`HashedPostState`] for this bundle state. + /// See [`HashedPostState::from_bundle_state`] for more info. pub fn hash_state_slow(&self) -> HashedPostState { HashedPostState::from_bundle_state(&self.bundle.state) } @@ -288,7 +288,7 @@ impl BundleStateWithReceipts { self.receipts.extend(other.receipts.receipt_vec); } - /// Prepends present the state with the given BundleState. + /// Prepends present the state with the given `BundleState`. /// It adds changes from the given state but does not override any existing changes. /// /// Reverts and receipts are not updated. diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index a71150c86..d1090391b 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -15,7 +15,7 @@ use std::{borrow::Cow, collections::BTreeMap, fmt, ops::RangeInclusive}; /// The chain contains the state of accounts after execution of its blocks, /// changesets for those blocks (and their transactions), as well as the blocks themselves. /// -/// Used inside the BlockchainTree. +/// Used inside the `BlockchainTree`. /// /// # Warning /// @@ -25,7 +25,7 @@ pub struct Chain { /// All blocks in this chain. blocks: BTreeMap, /// The state of all accounts after execution of the _all_ blocks in this chain's range from - /// [Chain::first] to [Chain::tip], inclusive. + /// [`Chain::first`] to [`Chain::tip`], inclusive. /// /// This state also contains the individual changes that lead to the current state. state: BundleStateWithReceipts, @@ -403,7 +403,7 @@ impl<'a> ChainBlocks<'a> { self.blocks.values().flat_map(|block| block.transactions_with_sender()) } - /// Returns an iterator over all [TransactionSignedEcRecovered] in the blocks + /// Returns an iterator over all [`TransactionSignedEcRecovered`] in the blocks /// /// Note: This clones the transactions since it is assumed this is part of a shared [Chain]. #[inline] @@ -459,13 +459,13 @@ pub enum ChainSplit { /// Given block split is lower than first block. NoSplitCanonical(Chain), /// Chain is split into two: `[canonical]` and `[pending]` - /// The target of this chain split [ChainSplitTarget] belongs to the `canonical` chain. + /// The target of this chain split [`ChainSplitTarget`] belongs to the `canonical` chain. Split { /// Contains lower block numbers that are considered canonicalized. It ends with - /// the [ChainSplitTarget] block. The state of this chain is now empty and no longer + /// the [`ChainSplitTarget`] block. The state of this chain is now empty and no longer /// usable. canonical: Chain, - /// Right contains all subsequent blocks __after__ the [ChainSplitTarget] that are still + /// Right contains all subsequent blocks __after__ the [`ChainSplitTarget`] that are still /// pending. /// /// The state of the original chain is moved here. diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index beefdac7f..b13e286a9 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -12,7 +12,7 @@ pub use reth_storage_errors::provider::ProviderError; /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). /// -/// This executor does not validate the output, see [BatchExecutor] for that. +/// This executor does not validate the output, see [`BatchExecutor`] for that. pub trait Executor { /// The input type for the executor. type Input<'a>; @@ -25,7 +25,7 @@ pub trait Executor { /// /// # Note /// Execution happens without any validation of the output. To validate the output, use the - /// [BatchExecutor]. + /// [`BatchExecutor`]. /// /// # Returns /// The output of the block execution. @@ -91,7 +91,7 @@ pub trait BatchExecutor { /// /// Contains the state changes, transaction receipts, and total gas used in the block. /// -/// TODO(mattsse): combine with BundleStateWithReceipts +/// TODO(mattsse): combine with `BundleStateWithReceipts` #[derive(Debug)] pub struct BlockExecutionOutput { /// The changed state of the block after execution. @@ -166,8 +166,8 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// /// # Verification /// - /// The on [Executor::execute] the executor is expected to validate the execution output of the - /// input, this includes: + /// The on [`Executor::execute`] the executor is expected to validate the execution output of + /// the input, this includes: /// - Cumulative gas used must match the input's gas used. /// - Receipts must match the input's receipts root. /// diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index c7f90057f..f74dc97c5 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -30,7 +30,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns new EVM with the given database /// - /// This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is up to + /// This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. fn evm<'a, DB: Database + 'a>( @@ -77,8 +77,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Returns a new EVM with the given inspector. /// - /// Caution: This does not automatically configure the EVM with [ConfigureEvmEnv] methods. It is - /// up to the caller to call an appropriate method to fill the transaction and block + /// Caution: This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It + /// is up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. fn evm_with_inspector<'a, DB, I>(&'a self, db: DB, inspector: I) -> Evm<'a, I, DB> where @@ -96,10 +96,10 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { - /// Fill transaction environment from a [TransactionSigned] and the given sender address. + /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); - /// Fill [CfgEnvWithHandlerCfg] fields according to the chain spec and given header + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header fn fill_cfg_env( cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, @@ -107,8 +107,8 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { total_difficulty: U256, ); - /// Convenience function to call both [fill_cfg_env](ConfigureEvmEnv::fill_cfg_env) and - /// [fill_block_env]. + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and + /// [`fill_block_env`]. fn fill_cfg_and_block_env( cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 220bbb39a..67ef964e7 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -12,7 +12,7 @@ use crate::execute::{ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; -/// A [BlockExecutorProvider] implementation that does nothing. +/// A [`BlockExecutorProvider`] implementation that does nothing. #[derive(Debug, Default, Clone)] #[non_exhaustive] pub struct NoopBlockExecutorProvider; diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index d898165a5..9199747ca 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -11,7 +11,7 @@ use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; use std::sync::Arc; -/// A [BlockExecutorProvider] that returns mocked execution results. +/// A [`BlockExecutorProvider`] that returns mocked execution results. #[derive(Clone, Debug, Default)] pub struct MockExecutorProvider { exec_results: Arc>>, diff --git a/crates/exex/src/context.rs b/crates/exex/src/context.rs index 7cedb4977..b76bdc106 100644 --- a/crates/exex/src/context.rs +++ b/crates/exex/src/context.rs @@ -9,7 +9,7 @@ use tokio::sync::mpsc::{Receiver, UnboundedSender}; use crate::{ExExEvent, ExExNotification}; -/// Captures the context that an ExEx has access to. +/// Captures the context that an `ExEx` has access to. #[derive(Debug)] pub struct ExExContext { /// The current head of the blockchain at launch. diff --git a/crates/exex/src/event.rs b/crates/exex/src/event.rs index 7929cf031..47e4225b4 100644 --- a/crates/exex/src/event.rs +++ b/crates/exex/src/event.rs @@ -1,12 +1,12 @@ use reth_primitives::BlockNumber; -/// Events emitted by an ExEx. +/// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ExExEvent { - /// Highest block processed by the ExEx. + /// Highest block processed by the `ExEx`. /// - /// The ExEx must guarantee that it will not require all earlier blocks in the future, meaning - /// that Reth is allowed to prune them. + /// The `ExEx` must guarantee that it will not require all earlier blocks in the future, + /// meaning that Reth is allowed to prune them. /// /// On reorgs, it's possible for the height to go down. FinishedHeight(BlockNumber), diff --git a/crates/exex/src/lib.rs b/crates/exex/src/lib.rs index 4e2d0dd85..cb246dc6a 100644 --- a/crates/exex/src/lib.rs +++ b/crates/exex/src/lib.rs @@ -1,27 +1,27 @@ // todo: expand this (examples, assumptions, invariants) -//! Execution extensions (ExEx). +//! Execution extensions (`ExEx`). //! //! An execution extension is a task that derives its state from Reth's state. //! //! Some examples of such state derives are rollups, bridges, and indexers. //! -//! An ExEx is a [`Future`] resolving to a `Result<()>` that is run indefinitely alongside Reth. +//! An `ExEx` is a [`Future`] resolving to a `Result<()>` that is run indefinitely alongside Reth. //! -//! ExEx's are initialized using an async closure that resolves to the ExEx; this closure gets +//! `ExEx`'s are initialized using an async closure that resolves to the `ExEx`; this closure gets //! passed an [`ExExContext`] where it is possible to spawn additional tasks and modify Reth. //! -//! Most ExEx's will want to derive their state from the [`CanonStateNotification`] channel given in -//! [`ExExContext`]. A new notification is emitted whenever blocks are executed in live and +//! Most `ExEx`'s will want to derive their state from the [`CanonStateNotification`] channel given +//! in [`ExExContext`]. A new notification is emitted whenever blocks are executed in live and //! historical sync. //! //! # Pruning //! -//! ExEx's **SHOULD** emit an `ExExEvent::FinishedHeight` event to signify what blocks have been +//! `ExEx`'s **SHOULD** emit an `ExExEvent::FinishedHeight` event to signify what blocks have been //! processed. This event is used by Reth to determine what state can be pruned. //! -//! An ExEx will only receive notifications for blocks greater than the block emitted in the event. -//! To clarify: if the ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for -//! any `block_number > 0`. +//! An `ExEx` will only receive notifications for blocks greater than the block emitted in the +//! event. To clarify: if the `ExEx` emits `ExExEvent::FinishedHeight(0)` it will receive +//! notifications for any `block_number > 0`. //! //! [`Future`]: std::future::Future //! [`ExExContext`]: crate::ExExContext diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 48bd8619b..5bd2c5ed8 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -19,46 +19,46 @@ use tokio::sync::{ }; use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; -/// Metrics for an ExEx. +/// Metrics for an `ExEx`. #[derive(Metrics)] #[metrics(scope = "exex")] struct ExExMetrics { - /// The total number of notifications sent to an ExEx. + /// The total number of notifications sent to an `ExEx`. notifications_sent_total: Counter, - /// The total number of events an ExEx has sent to the manager. + /// The total number of events an `ExEx` has sent to the manager. events_sent_total: Counter, } -/// A handle to an ExEx used by the [`ExExManager`] to communicate with ExEx's. +/// A handle to an `ExEx` used by the [`ExExManager`] to communicate with `ExEx`'s. /// -/// A handle should be created for each ExEx with a unique ID. The channels returned by -/// [`ExExHandle::new`] should be given to the ExEx, while the handle itself should be given to the -/// manager in [`ExExManager::new`]. +/// A handle should be created for each `ExEx` with a unique ID. The channels returned by +/// [`ExExHandle::new`] should be given to the `ExEx`, while the handle itself should be given to +/// the manager in [`ExExManager::new`]. #[derive(Debug)] pub struct ExExHandle { /// The execution extension's ID. id: String, - /// Metrics for an ExEx. + /// Metrics for an `ExEx`. metrics: ExExMetrics, - /// Channel to send [`ExExNotification`]s to the ExEx. + /// Channel to send [`ExExNotification`]s to the `ExEx`. sender: PollSender, - /// Channel to receive [`ExExEvent`]s from the ExEx. + /// Channel to receive [`ExExEvent`]s from the `ExEx`. receiver: UnboundedReceiver, - /// The ID of the next notification to send to this ExEx. + /// The ID of the next notification to send to this `ExEx`. next_notification_id: usize, - /// The finished block number of the ExEx. + /// The finished block number of the `ExEx`. /// - /// If this is `None`, the ExEx has not emitted a `FinishedHeight` event. + /// If this is `None`, the `ExEx` has not emitted a `FinishedHeight` event. finished_height: Option, } impl ExExHandle { - /// Create a new handle for the given ExEx. + /// Create a new handle for the given `ExEx`. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a - /// [`Receiver`] for [`ExExNotification`]s that should be given to the ExEx. + /// [`Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. pub fn new(id: String) -> (Self, UnboundedSender, Receiver) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); @@ -139,7 +139,7 @@ impl ExExHandle { } } -/// Metrics for the ExEx manager. +/// Metrics for the `ExEx` manager. #[derive(Metrics)] #[metrics(scope = "exex_manager")] pub struct ExExManagerMetrics { @@ -151,7 +151,7 @@ pub struct ExExManagerMetrics { /// /// Note that this might be slightly bigger than the maximum capacity in some cases. buffer_size: Gauge, - /// Current number of ExEx's on the node. + /// Current number of `ExEx`'s on the node. num_exexs: Gauge, } @@ -166,7 +166,7 @@ pub struct ExExManagerMetrics { /// - Monitoring #[derive(Debug)] pub struct ExExManager { - /// Handles to communicate with the ExEx's. + /// Handles to communicate with the `ExEx`'s. exex_handles: Vec, /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. @@ -191,22 +191,22 @@ pub struct ExExManager { /// Whether the manager is ready to receive new notifications. is_ready: watch::Sender, - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. finished_height: watch::Sender, - /// A handle to the ExEx manager. + /// A handle to the `ExEx` manager. handle: ExExManagerHandle, - /// Metrics for the ExEx manager. + /// Metrics for the `ExEx` manager. metrics: ExExManagerMetrics, } impl ExExManager { /// Create a new [`ExExManager`]. /// - /// You must provide an [`ExExHandle`] for each ExEx and the maximum capacity of the + /// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the /// notification buffer in the manager. /// - /// When the capacity is exceeded (which can happen if an ExEx is slow) no one can send + /// When the capacity is exceeded (which can happen if an `ExEx` is slow) no one can send /// notifications over [`ExExManagerHandle`]s until there is capacity again. pub fn new(handles: Vec, max_capacity: usize) -> Self { let num_exexs = handles.len(); @@ -363,9 +363,9 @@ impl Future for ExExManager { /// A handle to communicate with the [`ExExManager`]. #[derive(Debug)] pub struct ExExManagerHandle { - /// Channel to send notifications to the ExEx manager. + /// Channel to send notifications to the `ExEx` manager. exex_tx: UnboundedSender, - /// The number of ExEx's running on the node. + /// The number of `ExEx`'s running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. /// @@ -378,7 +378,7 @@ pub struct ExExManagerHandle { is_ready: ReusableBoxFuture<'static, watch::Receiver>, /// The current capacity of the manager's internal notification buffer. current_capacity: Arc, - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. finished_height: watch::Receiver, } @@ -422,12 +422,12 @@ impl ExExManagerHandle { self.exex_tx.send(notification) } - /// Get the current capacity of the ExEx manager's internal notification buffer. + /// Get the current capacity of the `ExEx` manager's internal notification buffer. pub fn capacity(&self) -> usize { self.current_capacity.load(Ordering::Relaxed) } - /// Whether there is capacity in the ExEx manager's internal notification buffer. + /// Whether there is capacity in the `ExEx` manager's internal notification buffer. /// /// If this returns `false`, the owner of the handle should **NOT** send new notifications over /// the channel until the manager is ready again, as this can lead to unbounded memory growth. @@ -435,12 +435,12 @@ impl ExExManagerHandle { self.current_capacity.load(Ordering::Relaxed) > 0 } - /// Returns `true` if there are ExEx's installed in the node. + /// Returns `true` if there are `ExEx`'s installed in the node. pub const fn has_exexs(&self) -> bool { self.num_exexs > 0 } - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. pub fn finished_height(&self) -> watch::Receiver { self.finished_height.clone() } diff --git a/crates/exex/src/notification.rs b/crates/exex/src/notification.rs index ae8091e0c..9f1beec41 100644 --- a/crates/exex/src/notification.rs +++ b/crates/exex/src/notification.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use reth_provider::{CanonStateNotification, Chain}; -/// Notifications sent to an ExEx. +/// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. @@ -25,7 +25,7 @@ pub enum ExExNotification { } impl ExExNotification { - /// Returns the committed chain from the [Self::ChainCommitted] and [Self::ChainReorged] + /// Returns the committed chain from the [`Self::ChainCommitted`] and [`Self::ChainReorged`] /// variants, if any. pub fn committed_chain(&self) -> Option> { match self { @@ -34,8 +34,8 @@ impl ExExNotification { } } - /// Returns the reverted chain from the [Self::ChainReorged] and [Self::ChainReverted] variants, - /// if any. + /// Returns the reverted chain from the [`Self::ChainReorged`] and [`Self::ChainReverted`] + /// variants, if any. pub fn reverted_chain(&self) -> Option> { match self { Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 6c5c2a722..8de46f561 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -13,11 +13,11 @@ use std::{ path::{Path, PathBuf}, }; -/// Result alias for [FsPathError]. +/// Result alias for [`FsPathError`]. pub type Result = std::result::Result; -/// Various error variants for `std::fs` operations that serve as an addition to the io::Error which -/// does not provide any information about the path. +/// Various error variants for `std::fs` operations that serve as an addition to the `io::Error` +/// which does not provide any information about the path. #[derive(Debug, thiserror::Error)] pub enum FsPathError { /// Error variant for failed write operation with additional path context. diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index d45fe87de..6d45f0f0f 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -15,7 +15,7 @@ use tokio::sync::mpsc::{ }; use tokio_util::sync::{PollSendError, PollSender}; -/// Wrapper around [mpsc::unbounded_channel] that returns a new unbounded metered channel. +/// Wrapper around [`mpsc::unbounded_channel`] that returns a new unbounded metered channel. pub fn metered_unbounded_channel( scope: &'static str, ) -> (UnboundedMeteredSender, UnboundedMeteredReceiver) { @@ -23,7 +23,7 @@ pub fn metered_unbounded_channel( (UnboundedMeteredSender::new(tx, scope), UnboundedMeteredReceiver::new(rx, scope)) } -/// Wrapper around [mpsc::channel] that returns a new bounded metered channel with the given +/// Wrapper around [`mpsc::channel`] that returns a new bounded metered channel with the given /// buffer size. pub fn metered_channel( buffer: usize, @@ -33,10 +33,10 @@ pub fn metered_channel( (MeteredSender::new(tx, scope), MeteredReceiver::new(rx, scope)) } -/// A wrapper type around [UnboundedSender](mpsc::UnboundedSender) that updates metrics on send. +/// A wrapper type around [`UnboundedSender`](mpsc::UnboundedSender) that updates metrics on send. #[derive(Debug)] pub struct UnboundedMeteredSender { - /// The [UnboundedSender](mpsc::UnboundedSender) that this wraps around + /// The [`UnboundedSender`](mpsc::UnboundedSender) that this wraps around sender: mpsc::UnboundedSender, /// Holds metrics for this type metrics: MeteredSenderMetrics, @@ -84,7 +84,7 @@ pub struct UnboundedMeteredReceiver { // === impl MeteredReceiver === impl UnboundedMeteredReceiver { - /// Creates a new [UnboundedMeteredReceiver] wrapping around the provided + /// Creates a new [`UnboundedMeteredReceiver`] wrapping around the provided /// [Receiver](mpsc::UnboundedReceiver) pub fn new(receiver: mpsc::UnboundedReceiver, scope: &'static str) -> Self { Self { receiver, metrics: MeteredReceiverMetrics::new(scope) } @@ -249,7 +249,7 @@ impl Stream for MeteredReceiver { } } -/// Throughput metrics for [MeteredSender] +/// Throughput metrics for [`MeteredSender`] #[derive(Clone, Metrics)] #[metrics(dynamic = true)] struct MeteredSenderMetrics { @@ -259,7 +259,7 @@ struct MeteredSenderMetrics { send_errors_total: Counter, } -/// Throughput metrics for [MeteredReceiver] +/// Throughput metrics for [`MeteredReceiver`] #[derive(Clone, Metrics)] #[metrics(dynamic = true)] struct MeteredReceiverMetrics { @@ -267,27 +267,27 @@ struct MeteredReceiverMetrics { messages_received_total: Counter, } -/// A wrapper type around [PollSender] that updates metrics on send. +/// A wrapper type around [`PollSender`] that updates metrics on send. #[derive(Debug)] pub struct MeteredPollSender { - /// The [PollSender] that this wraps around. + /// The [`PollSender`] that this wraps around. sender: PollSender, /// Holds metrics for this type. metrics: MeteredPollSenderMetrics, } impl MeteredPollSender { - /// Creates a new [`MeteredPollSender`] wrapping around the provided [PollSender]. + /// Creates a new [`MeteredPollSender`] wrapping around the provided [`PollSender`]. pub fn new(sender: PollSender, scope: &'static str) -> Self { Self { sender, metrics: MeteredPollSenderMetrics::new(scope) } } - /// Returns the underlying [PollSender]. + /// Returns the underlying [`PollSender`]. pub const fn inner(&self) -> &PollSender { &self.sender } - /// Calls the underlying [PollSender]'s `poll_reserve`, incrementing the appropriate + /// Calls the underlying [`PollSender`]'s `poll_reserve`, incrementing the appropriate /// metrics depending on the result. pub fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll>> { match self.sender.poll_reserve(cx) { @@ -300,7 +300,7 @@ impl MeteredPollSender { } } - /// Calls the underlying [PollSender]'s `send_item`, incrementing the appropriate + /// Calls the underlying [`PollSender`]'s `send_item`, incrementing the appropriate /// metrics depending on the result. pub fn send_item(&mut self, item: T) -> Result<(), PollSendError> { match self.sender.send_item(item) { @@ -319,7 +319,7 @@ impl Clone for MeteredPollSender { } } -/// Throughput metrics for [MeteredPollSender] +/// Throughput metrics for [`MeteredPollSender`] #[derive(Clone, Metrics)] #[metrics(dynamic = true)] struct MeteredPollSenderMetrics { diff --git a/crates/net/common/src/ratelimit.rs b/crates/net/common/src/ratelimit.rs index 16365b551..16e403f10 100644 --- a/crates/net/common/src/ratelimit.rs +++ b/crates/net/common/src/ratelimit.rs @@ -27,12 +27,12 @@ impl RateLimit { Self { rate, state, sleep: Box::pin(tokio::time::sleep_until(until)) } } - /// Returns the configured limit of the [RateLimit] + /// Returns the configured limit of the [`RateLimit`] pub const fn limit(&self) -> u64 { self.rate.limit() } - /// Checks if the [RateLimit] is ready to handle a new call + /// Checks if the [`RateLimit`] is ready to handle a new call pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { match self.state { State::Ready { .. } => return Poll::Ready(()), @@ -51,16 +51,16 @@ impl RateLimit { Poll::Ready(()) } - /// Wait until the [RateLimit] is ready. + /// Wait until the [`RateLimit`] is ready. pub async fn wait(&mut self) { poll_fn(|cx| self.poll_ready(cx)).await } - /// Updates the [RateLimit] when a new call was triggered + /// Updates the [`RateLimit`] when a new call was triggered /// /// # Panics /// - /// Panics if [RateLimit::poll_ready] returned [Poll::Pending] + /// Panics if [`RateLimit::poll_ready`] returned [`Poll::Pending`] pub fn tick(&mut self) { match self.state { State::Ready { mut until, remaining: mut rem } => { @@ -86,7 +86,7 @@ impl RateLimit { } } -/// Tracks the state of the [RateLimit] +/// Tracks the state of the [`RateLimit`] #[derive(Debug)] enum State { /// Currently limited diff --git a/crates/net/common/src/stream.rs b/crates/net/common/src/stream.rs index 605ea12d0..4cf6f12bb 100644 --- a/crates/net/common/src/stream.rs +++ b/crates/net/common/src/stream.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; use tokio::net::TcpStream; -/// This trait is for instrumenting a TCPStream with a socket addr +/// This trait is for instrumenting a `TCPStream` with a socket addr pub trait HasRemoteAddr { /// Maybe returns a [`SocketAddr`] fn remote_addr(&self) -> Option; diff --git a/crates/net/discv4/README.md b/crates/net/discv4/README.md index 8e941e41c..d5caa0ab4 100644 --- a/crates/net/discv4/README.md +++ b/crates/net/discv4/README.md @@ -14,9 +14,9 @@ This is inspired by the [discv5](https://github.com/sigp/discv5) crate and reuse The discovery service continuously attempts to connect to other nodes on the network until it has found enough peers. If UPnP (Universal Plug and Play) is supported by the router the service is running on, it will also accept connections from external nodes. In the discovery protocol, nodes exchange information about where the node can be reached to -eventually establish RLPx sessions. +eventually establish ``RLPx`` sessions. ## Trouble Shooting The discv4 protocol depends on the local system clock. If the clock is not accurate it can cause connectivity issues -because the expiration timestamps might be wrong. \ No newline at end of file +because the expiration timestamps might be wrong. diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index c9007a910..6f2254d99 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -33,7 +33,7 @@ pub struct Discv4Config { pub ping_expiration: Duration, /// The rate at which new random lookups should be triggered. pub lookup_interval: Duration, - /// The duration of we consider a FindNode request timed out. + /// The duration of we consider a `FindNode` request timed out. pub request_timeout: Duration, /// The duration after which we consider an enr request timed out. pub enr_expiration: Duration, @@ -97,7 +97,7 @@ impl Discv4Config { self } - /// Returns the corresponding [`ResolveNatInterval`], if a [NatResolver] and an interval was + /// Returns the corresponding [`ResolveNatInterval`], if a [`NatResolver`] and an interval was /// configured pub fn resolve_external_ip_interval(&self) -> Option { let resolver = self.external_ip_resolver?; @@ -258,7 +258,7 @@ impl Discv4ConfigBuilder { self } - /// A set of lists that can ban IP's or PeerIds from the server. See + /// A set of lists that can ban IP's or `PeerIds` from the server. See /// [`BanList`]. pub fn ban_list(&mut self, ban_list: BanList) -> &mut Self { self.config.ban_list = ban_list; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 91123e41e..98b82223f 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -157,9 +157,9 @@ type NodeRecordSender = OneshotSender>; /// The Discv4 frontend /// -/// This communicates with the [Discv4Service] by sending commands over a channel. +/// This communicates with the [`Discv4Service`] by sending commands over a channel. /// -/// See also [Discv4::spawn] +/// See also [`Discv4::spawn`] #[derive(Debug, Clone)] pub struct Discv4 { /// The address of the udp socket @@ -208,7 +208,7 @@ impl Discv4 { } } - /// Binds a new UdpSocket and creates the service + /// Binds a new `UdpSocket` and creates the service /// /// ``` /// # use std::io; @@ -263,7 +263,7 @@ impl Discv4 { self.local_addr } - /// Returns the [NodeRecord] of the local node. + /// Returns the [`NodeRecord`] of the local node. /// /// This includes the currently tracked external IP address of the node. pub fn node_record(&self) -> NodeRecord { @@ -283,12 +283,12 @@ impl Discv4 { /// Starts a `FindNode` recursive lookup that locates the closest nodes to the given node id. See also: /// /// The lookup initiator starts by picking α closest nodes to the target it knows of. The - /// initiator then sends concurrent FindNode packets to those nodes. α is a system-wide - /// concurrency parameter, such as 3. In the recursive step, the initiator resends FindNode to + /// initiator then sends concurrent `FindNode` packets to those nodes. α is a system-wide + /// concurrency parameter, such as 3. In the recursive step, the initiator resends `FindNode` to /// nodes it has learned about from previous queries. Of the k nodes the initiator has heard of - /// closest to the target, it picks α that it has not yet queried and resends FindNode to them. - /// Nodes that fail to respond quickly are removed from consideration until and unless they do - /// respond. + /// closest to the target, it picks α that it has not yet queried and resends `FindNode` to + /// them. Nodes that fail to respond quickly are removed from consideration until and unless + /// they do respond. // // If a round of FindNode queries fails to return a node any closer than the closest already // seen, the initiator resends the find node to all of the k closest nodes it has not already @@ -410,7 +410,7 @@ impl Discv4 { Ok(rx.await?) } - /// Terminates the spawned [Discv4Service]. + /// Terminates the spawned [`Discv4Service`]. pub fn terminate(&self) { self.send_to_service(Discv4Command::Terminated); } @@ -419,7 +419,7 @@ impl Discv4 { /// Manages discv4 peer discovery over UDP. /// /// This is a [Stream] to handles incoming and outgoing discv4 messages and emits updates via: -/// [Discv4Service::update_stream]. +/// [`Discv4Service::update_stream`]. #[must_use = "Stream does nothing unless polled"] pub struct Discv4Service { /// Local address of the UDP socket. @@ -462,7 +462,7 @@ pub struct Discv4Service { /// Entries here means we've proven the peer's endpoint but haven't completed our end of the /// endpoint proof pending_lookup: HashMap, - /// Currently active FindNode requests + /// Currently active `FindNode` requests pending_find_nodes: HashMap, /// Currently active ENR requests pending_enr_requests: HashMap, @@ -625,7 +625,7 @@ impl Discv4Service { } } - /// Returns the [PeerId] that identifies this node + /// Returns the [`PeerId`] that identifies this node pub const fn local_peer_id(&self) -> &PeerId { &self.local_node_record.id } @@ -648,7 +648,7 @@ impl Discv4Service { &mut self.local_node_record } - /// Returns true if the given PeerId is currently in the bucket + /// Returns true if the given `PeerId` is currently in the bucket pub fn contains_node(&self, id: PeerId) -> bool { let key = kad_key(id); self.kbuckets.get_index(&key).is_some() @@ -719,9 +719,10 @@ impl Discv4Service { /// Looks up the given node in the DHT /// - /// A FindNode packet requests information about nodes close to target. The target is a 64-byte - /// secp256k1 public key. When FindNode is received, the recipient should reply with Neighbors - /// packets containing the closest 16 nodes to target found in its local table. + /// A `FindNode` packet requests information about nodes close to target. The target is a + /// 64-byte secp256k1 public key. When `FindNode` is received, the recipient should reply + /// with Neighbors packets containing the closest 16 nodes to target found in its local + /// table. // // To guard against traffic amplification attacks, Neighbors replies should only be sent if the // sender of FindNode has been verified by the endpoint proof procedure. @@ -733,7 +734,7 @@ impl Discv4Service { /// /// At first the `ALPHA` (==3, defined concurrency factor) nodes that are closest to the target /// in the underlying DHT are selected to seed the lookup via `FindNode` requests. In the - /// recursive step, the initiator resends FindNode to nodes it has learned about from previous + /// recursive step, the initiator resends `FindNode` to nodes it has learned about from previous /// queries. /// /// This takes an optional Sender through which all successfully discovered nodes are sent once @@ -853,8 +854,8 @@ impl Discv4Service { /// Update the entry on RE-ping /// - /// On re-ping we check for a changed enr_seq if eip868 is enabled and when it changed we sent a - /// followup request to retrieve the updated ENR + /// On re-ping we check for a changed `enr_seq` if eip868 is enabled and when it changed we sent + /// a followup request to retrieve the updated ENR fn update_on_reping(&mut self, record: NodeRecord, mut last_enr_seq: Option) { if record.id == self.local_node_record.id { return @@ -945,7 +946,7 @@ impl Discv4Service { /// Adds all nodes /// - /// See [Self::add_node] + /// See [`Self::add_node`] pub fn add_all_nodes(&mut self, records: impl IntoIterator) { for record in records.into_iter() { self.add_node(record); @@ -1455,7 +1456,7 @@ impl Discv4Service { self.evict_failed_neighbours(now); } - /// Handles failed responses to FindNode + /// Handles failed responses to `FindNode` fn evict_failed_neighbours(&mut self, now: Instant) { let mut failed_neighbours = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { @@ -1501,7 +1502,7 @@ impl Discv4Service { } } - /// Re-pings all nodes which endpoint proofs are considered expired: [``NodeEntry::is_expired] + /// Re-pings all nodes which endpoint proofs are considered expired: [`NodeEntry::is_expired`] /// /// This will send a `Ping` to the nodes, if a node fails to respond with a `Pong` to renew the /// endpoint proof it will be removed from the table. @@ -1531,7 +1532,7 @@ impl Discv4Service { /// be an i64. /// /// Returns an error if: - /// - invalid UNIX timestamp (larger than i64::MAX) + /// - invalid UNIX timestamp (larger than `i64::MAX`) /// - timestamp is expired (lower than current local UNIX timestamp) fn ensure_not_expired(&self, timestamp: u64) -> Result<(), ()> { // ensure the timestamp is a valid UNIX timestamp @@ -1925,7 +1926,7 @@ impl Default for ReceiveCache { } } -/// The commands sent from the frontend [Discv4] to the service [Discv4Service]. +/// The commands sent from the frontend [Discv4] to the service [`Discv4Service`]. enum Discv4Command { Add(NodeRecord), SetTcpPort(u16), @@ -1964,7 +1965,7 @@ struct PingRequest { reason: PingReason, } -/// Rotates the PeerId that is periodically looked up. +/// Rotates the `PeerId` that is periodically looked up. /// /// By selecting different targets, the lookups will be seeded with different ALPHA seed nodes. #[derive(Debug)] @@ -2186,7 +2187,7 @@ struct NodeEntry { last_seen: Instant, /// Last enr seq we retrieved via a ENR request. last_enr_seq: Option, - /// ForkId if retrieved via ENR requests. + /// `ForkId` if retrieved via ENR requests. fork_id: Option, /// Counter for failed findNode requests. find_node_failures: usize, @@ -2231,7 +2232,7 @@ impl NodeEntry { self.update_now(|s| std::mem::replace(&mut s.fork_id, fork_id)) } - /// Updates the last_seen timestamp and calls the closure + /// Updates the `last_seen` timestamp and calls the closure fn update_now(&mut self, f: F) -> R where F: FnOnce(&mut Self) -> R, @@ -2260,7 +2261,7 @@ enum PingReason { EstablishBond, /// Re-ping a peer. RePing, - /// Part of a lookup to ensure endpoint is proven before we can send a FindNode request. + /// Part of a lookup to ensure endpoint is proven before we can send a `FindNode` request. Lookup(NodeRecord, LookupContext), } diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 46988f60d..ce72e264b 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -197,7 +197,7 @@ pub struct NodeEndpoint { pub address: IpAddr, /// The UDP port used for communication in the discovery protocol. pub udp_port: u16, - /// The TCP port used for communication in the RLPx protocol. + /// The TCP port used for communication in the `RLPx` protocol. pub tcp_port: u16, } @@ -343,11 +343,11 @@ impl Decodable for EnrRequest { /// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06). /// -/// This packet is used to respond to an ENRRequest packet and includes the requested ENR along with -/// the hash of the original request. +/// This packet is used to respond to an `ENRRequest` packet and includes the requested ENR along +/// with the hash of the original request. #[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] pub struct EnrResponse { - /// The hash of the ENRRequest packet being replied to. + /// The hash of the `ENRRequest` packet being replied to. pub request_hash: B256, /// The ENR (Ethereum Node Record) for the responding node. pub enr: Enr, @@ -376,7 +376,7 @@ pub struct Ping { pub to: NodeEndpoint, /// The expiration timestamp. pub expire: u64, - /// Optional enr_seq for + /// Optional `enr_seq` for pub enr_sq: Option, } @@ -469,7 +469,7 @@ pub struct Pong { pub echo: B256, /// The expiration timestamp. pub expire: u64, - /// Optional enr_seq for + /// Optional `enr_seq` for pub enr_sq: Option, } diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 01c0f0c45..962882a30 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -57,9 +57,9 @@ pub struct ConfigBuilder { /// /// Defaults to L1 mainnet if not set. fork: Option<(&'static [u8], ForkId)>, - /// RLPx TCP socket to advertise. + /// `RLPx` TCP socket to advertise. /// - /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// NOTE: IP address of `RLPx` socket overwrites IP address of same IP version in /// [`discv5::ListenConfig`]. tcp_socket: SocketAddr, /// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record @@ -260,9 +260,9 @@ pub struct Config { /// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node /// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`. pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>, - /// RLPx TCP socket to advertise. + /// `RLPx` TCP socket to advertise. /// - /// NOTE: IP address of RLPx socket overwrites IP address of same IP version in + /// NOTE: IP address of `RLPx` socket overwrites IP address of same IP version in /// [`discv5::ListenConfig`]. pub(super) tcp_socket: SocketAddr, /// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to @@ -281,7 +281,7 @@ pub struct Config { } impl Config { - /// Returns a new [`ConfigBuilder`], with the RLPx TCP port and IP version configured w.r.t. + /// Returns a new [`ConfigBuilder`], with the `RLPx` TCP port and IP version configured w.r.t. /// the given socket. pub fn builder(rlpx_tcp_socket: SocketAddr) -> ConfigBuilder { ConfigBuilder { @@ -308,7 +308,7 @@ impl Config { } } - /// Returns the RLPx (TCP) socket contained in the [`discv5::Config`]. This socket will be + /// Returns the `RLPx` (TCP) socket contained in the [`discv5::Config`]. This socket will be /// advertised to peers in the local [`Enr`](discv5::enr::Enr). pub const fn rlpx_socket(&self) -> &SocketAddr { &self.tcp_socket @@ -337,9 +337,9 @@ pub const fn ipv6(listen_config: &ListenConfig) -> Option { } } -/// Returns the amended [`discv5::ListenConfig`] based on the RLPx IP address. The ENR is limited +/// Returns the amended [`discv5::ListenConfig`] based on the `RLPx` IP address. The ENR is limited /// to one IP address per IP version (atm, may become spec'd how to advertise different addresses). -/// The RLPx address overwrites the discv5 address w.r.t. IP version. +/// The `RLPx` address overwrites the discv5 address w.r.t. IP version. pub fn amend_listen_config_wrt_rlpx( listen_config: &ListenConfig, rlpx_addr: IpAddr, @@ -365,8 +365,8 @@ pub fn amend_listen_config_wrt_rlpx( ListenConfig::from_two_sockets(discv5_socket_ipv4, discv5_socket_ipv6) } -/// Returns the sockets that can be used for discv5 with respect to the RLPx address. ENR specs only -/// acknowledge one address per IP version. +/// Returns the sockets that can be used for discv5 with respect to the `RLPx` address. ENR specs +/// only acknowledge one address per IP version. pub fn discv5_sockets_wrt_rlpx_addr( rlpx_addr: IpAddr, discv5_addr_ipv4: Option, diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index c39102173..de5c07427 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -66,7 +66,7 @@ pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0; pub struct Discv5 { /// sigp/discv5 node. discv5: Arc, - /// [`IpMode`] of the the RLPx network. + /// [`IpMode`] of the the `RLPx` network. rlpx_ip_mode: IpMode, /// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'. fork_key: Option<&'static [u8]>, @@ -330,7 +330,7 @@ impl Discv5 { } /// Tries to convert an [`Enr`](discv5::Enr) into the backwards compatible type [`NodeRecord`], - /// w.r.t. local RLPx [`IpMode`]. Uses source socket as udp socket. + /// w.r.t. local `RLPx` [`IpMode`]. Uses source socket as udp socket. pub fn try_into_reachable( &self, enr: &discv5::Enr, @@ -389,7 +389,7 @@ impl Discv5 { // Complementary //////////////////////////////////////////////////////////////////////////////////////////////// - /// Returns the RLPx [`IpMode`] of the local node. + /// Returns the `RLPx` [`IpMode`] of the local node. pub const fn ip_mode(&self) -> IpMode { self.rlpx_ip_mode } diff --git a/crates/net/dns/src/config.rs b/crates/net/dns/src/config.rs index 6a31c36ad..1b85280f1 100644 --- a/crates/net/dns/src/config.rs +++ b/crates/net/dns/src/config.rs @@ -8,7 +8,7 @@ use std::{ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// Settings for the [DnsDiscoveryService](crate::DnsDiscoveryService). +/// Settings for the [`DnsDiscoveryService`](crate::DnsDiscoveryService). #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct DnsDiscoveryConfig { diff --git a/crates/net/dns/src/error.rs b/crates/net/dns/src/error.rs index cd65f5699..6246e1c41 100644 --- a/crates/net/dns/src/error.rs +++ b/crates/net/dns/src/error.rs @@ -6,7 +6,7 @@ pub(crate) type ParseEntryResult = Result; /// Alias for lookup results pub(crate) type LookupResult = Result; -/// Error while parsing a [DnsEntry](crate::tree::DnsEntry) +/// Error while parsing a [`DnsEntry`](crate::tree::DnsEntry) #[derive(thiserror::Error, Debug)] pub enum ParseDnsEntryError { /// Unknown entry error. diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index 5000e524e..4738d935f 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -56,7 +56,7 @@ pub mod resolver; mod sync; pub mod tree; -/// [DnsDiscoveryService] front-end. +/// [`DnsDiscoveryService`] front-end. #[derive(Clone, Debug)] pub struct DnsDiscoveryHandle { /// Channel for sending commands to the service. @@ -96,7 +96,7 @@ pub struct DnsDiscoveryService { command_tx: UnboundedSender, /// Receiver half of the command channel. command_rx: UnboundedReceiverStream, - /// All subscribers for resolved [NodeRecord]s. + /// All subscribers for resolved [`NodeRecord`]s. node_record_listeners: Vec>, /// All the trees that can be synced. trees: HashMap, @@ -115,7 +115,7 @@ pub struct DnsDiscoveryService { // === impl DnsDiscoveryService === impl DnsDiscoveryService { - /// Creates a new instance of the [DnsDiscoveryService] using the given settings. + /// Creates a new instance of the [`DnsDiscoveryService`] using the given settings. /// /// ``` /// use reth_dns_discovery::{DnsDiscoveryService, DnsResolver}; @@ -170,7 +170,7 @@ impl DnsDiscoveryService { } } - /// Same as [DnsDiscoveryService::new] but also returns a new handle that's connected to the + /// Same as [`DnsDiscoveryService::new`] but also returns a new handle that's connected to the /// service pub fn new_pair(resolver: Arc, config: DnsDiscoveryConfig) -> (Self, DnsDiscoveryHandle) { let service = Self::new(resolver, config); @@ -377,7 +377,7 @@ pub struct DnsNodeRecordUpdate { pub enr: Enr, } -/// Commands sent from [DnsDiscoveryHandle] to [DnsDiscoveryService] +/// Commands sent from [`DnsDiscoveryHandle`] to [`DnsDiscoveryService`] enum DnsDiscoveryCommand { /// Sync a tree SyncTree(LinkEntry), @@ -391,7 +391,7 @@ pub enum DnsDiscoveryEvent { Enr(Enr), } -/// Converts an [Enr] into a [NodeRecord] +/// Converts an [Enr] into a [`NodeRecord`] fn convert_enr_node_record(enr: &Enr) -> Option { let node_record = NodeRecord { address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?, diff --git a/crates/net/dns/src/query.rs b/crates/net/dns/src/query.rs index dece35925..a1c67740e 100644 --- a/crates/net/dns/src/query.rs +++ b/crates/net/dns/src/query.rs @@ -62,7 +62,7 @@ impl QueryPool { self.queued_queries.push_back(Query::Root(Box::pin(resolve_root(resolver, link, timeout)))) } - /// Resolves the [DnsEntry] for `` + /// Resolves the [`DnsEntry`] for `` pub(crate) fn resolve_entry(&mut self, link: LinkEntry, hash: String, kind: ResolveKind) { let resolver = Arc::clone(&self.resolver); let timeout = self.lookup_timeout; @@ -153,7 +153,7 @@ pub(crate) enum QueryOutcome { Entry(ResolveEntryResult), } -/// Retrieves the [DnsEntry] +/// Retrieves the [`DnsEntry`] async fn resolve_entry( resolver: Arc, link: LinkEntry, diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 870ea30d8..42c444f89 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -33,7 +33,7 @@ impl Resolver for AsyncResolver

{ /// An asynchronous DNS resolver /// -/// See also [TokioAsyncResolver] +/// See also [`TokioAsyncResolver`] /// /// ``` /// # fn t() { @@ -43,7 +43,7 @@ impl Resolver for AsyncResolver

{ /// ``` /// /// Note: This [Resolver] can send multiple lookup attempts, See also -/// [ResolverOpts](trust_dns_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) +/// [`ResolverOpts`](trust_dns_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) /// by default. #[derive(Clone, Debug)] pub struct DnsResolver(TokioAsyncResolver); @@ -51,7 +51,7 @@ pub struct DnsResolver(TokioAsyncResolver); // === impl DnsResolver === impl DnsResolver { - /// Create a new resolver by wrapping the given [AsyncResolver] + /// Create a new resolver by wrapping the given [`AsyncResolver`] pub const fn new(resolver: TokioAsyncResolver) -> Self { Self(resolver) } diff --git a/crates/net/dns/src/sync.rs b/crates/net/dns/src/sync.rs index 4c6633667..cfe6862c9 100644 --- a/crates/net/dns/src/sync.rs +++ b/crates/net/dns/src/sync.rs @@ -138,7 +138,7 @@ pub(crate) enum SyncAction { Link(String), } -/// How the [SyncTree::update_root] changed the root +/// How the [`SyncTree::update_root`] changed the root enum SyncState { RootUpdate, Pending, diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 1f7a78d31..64ae86820 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -282,7 +282,7 @@ where Provider: HeaderProvider + Unpin + 'static, Self: BodyDownloader + 'static, { - /// Spawns the downloader task via [tokio::task::spawn] + /// Spawns the downloader task via [`tokio::task::spawn`] pub fn into_task(self) -> TaskDownloader { self.into_task_with(&TokioTaskExecutor::default()) } @@ -461,7 +461,7 @@ impl OrderedBodiesResponse { /// Returns the size of the response in bytes /// - /// See [BlockResponse::size] + /// See [`BlockResponse::size`] #[inline] const fn size(&self) -> usize { self.size @@ -488,7 +488,7 @@ impl Ord for OrderedBodiesResponse { } } -/// Builder for [BodiesDownloader]. +/// Builder for [`BodiesDownloader`]. #[derive(Debug, Clone)] pub struct BodiesDownloaderBuilder { /// The batch size of non-empty blocks per one request @@ -502,8 +502,8 @@ pub struct BodiesDownloaderBuilder { } impl BodiesDownloaderBuilder { - /// Creates a new [BodiesDownloaderBuilder] with configurations based on the provided - /// [BodiesConfig]. + /// Creates a new [`BodiesDownloaderBuilder`] with configurations based on the provided + /// [`BodiesConfig`]. pub fn new(config: BodiesConfig) -> Self { Self::default() .with_stream_batch_size(config.downloader_stream_batch_size) diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index 2ad60d481..b1aa0e2ea 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -6,7 +6,7 @@ use reth_network_p2p::{ use reth_primitives::BlockNumber; use std::ops::RangeInclusive; -/// A [BodyDownloader] implementation that does nothing. +/// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] pub struct NoopBodiesDownloader; diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 47f2a1960..35e5b4414 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -14,7 +14,7 @@ use std::{ task::{Context, Poll}, }; -/// The wrapper around [FuturesUnordered] that keeps information +/// The wrapper around [`FuturesUnordered`] that keeps information /// about the blocks currently being requested. #[derive(Debug)] pub(crate) struct BodiesRequestQueue { diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 905c199fe..08f223ef0 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -25,9 +25,9 @@ use std::{ /// It then proceeds to verify the downloaded bodies. In case of an validation error, /// the future will start over. /// -/// The future will filter out any empty headers (see [reth_primitives::Header::is_empty]) from the -/// request. If [BodiesRequestFuture] was initialized with all empty headers, no request will be -/// dispatched and they will be immediately returned upon polling. +/// The future will filter out any empty headers (see [`reth_primitives::Header::is_empty`]) from +/// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will +/// be dispatched and they will be immediately returned upon polling. /// /// NB: This assumes that peers respond with bodies in the order that they were requested. /// This is a reasonable assumption to make as that's [what Geth @@ -55,7 +55,7 @@ impl BodiesRequestFuture where B: BodiesClient + 'static, { - /// Returns an empty future. Use [BodiesRequestFuture::with_headers] to set the request. + /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, consensus: Arc, diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 9dfb747a6..42b21d5f1 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -17,7 +17,7 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tokio_util::sync::PollSender; -/// The maximum number of [BodyDownloaderResult]s to hold in the buffer. +/// The maximum number of [`BodyDownloaderResult`]s to hold in the buffer. pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. @@ -32,7 +32,7 @@ pub struct TaskDownloader { // === impl TaskDownloader === impl TaskDownloader { - /// Spawns the given `downloader` via [tokio::task::spawn] returns a [TaskDownloader] that's + /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// /// # Panics @@ -64,8 +64,8 @@ impl TaskDownloader { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } - /// Spawns the given `downloader` via the given [TaskSpawner] returns a [TaskDownloader] that's - /// connected to that task. + /// Spawns the given `downloader` via the given [`TaskSpawner`] returns a [`TaskDownloader`] + /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where T: BodyDownloader + 'static, @@ -101,7 +101,7 @@ impl Stream for TaskDownloader { } } -/// A [BodyDownloader] that runs on its own task +/// A [`BodyDownloader`] that runs on its own task struct SpawnedDownloader { updates: UnboundedReceiverStream>, bodies_tx: PollSender, diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 093d87f8f..210655f7e 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -5,7 +5,7 @@ use reth_network_p2p::headers::{ }; use reth_primitives::SealedHeader; -/// A [HeaderDownloader] implementation that does nothing. +/// A [`HeaderDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] pub struct NoopHeaderDownloader; diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 68f1a9113..591d1ae6a 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -54,14 +54,14 @@ impl From for ReverseHeadersDownloaderError { /// Downloads headers concurrently. /// -/// This [HeaderDownloader] downloads headers using the configured [HeadersClient]. +/// This [`HeaderDownloader`] downloads headers using the configured [`HeadersClient`]. /// Headers can be requested by hash or block number and take a `limit` parameter. This downloader /// tries to fill the gap between the local head of the node and the chain tip by issuing multiple -/// requests at a time but yielding them in batches on [Stream::poll_next]. +/// requests at a time but yielding them in batches on [`Stream::poll_next`]. /// -/// **Note:** This downloader downloads in reverse, see also [HeadersDirection::Falling], this means -/// the batches of headers that this downloader yields will start at the chain tip and move towards -/// the local head: falling block numbers. +/// **Note:** This downloader downloads in reverse, see also [`HeadersDirection::Falling`], this +/// means the batches of headers that this downloader yields will start at the chain tip and move +/// towards the local head: falling block numbers. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] pub struct ReverseHeadersDownloader { @@ -112,7 +112,7 @@ impl ReverseHeadersDownloader where H: HeadersClient + 'static, { - /// Convenience method to create a [ReverseHeadersDownloaderBuilder] without importing it + /// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it pub fn builder() -> ReverseHeadersDownloaderBuilder { ReverseHeadersDownloaderBuilder::default() } @@ -645,7 +645,7 @@ where H: HeadersClient, Self: HeaderDownloader + 'static, { - /// Spawns the downloader task via [tokio::task::spawn] + /// Spawns the downloader task via [`tokio::task::spawn`] pub fn into_task(self) -> TaskDownloader { self.into_task_with(&TokioTaskExecutor::default()) } @@ -912,7 +912,7 @@ where } } -/// The outcome of the [HeadersRequestFuture] +/// The outcome of the [`HeadersRequestFuture`] struct HeadersRequestOutcome { request: HeadersRequest, outcome: PeerRequestResult>, @@ -1070,7 +1070,7 @@ impl SyncTargetBlock { } } -/// The builder for [ReverseHeadersDownloader] with +/// The builder for [`ReverseHeadersDownloader`] with /// some default settings #[derive(Debug)] pub struct ReverseHeadersDownloaderBuilder { @@ -1087,8 +1087,8 @@ pub struct ReverseHeadersDownloaderBuilder { } impl ReverseHeadersDownloaderBuilder { - /// Creates a new [ReverseHeadersDownloaderBuilder] with configurations based on the provided - /// [HeadersConfig]. + /// Creates a new [`ReverseHeadersDownloaderBuilder`] with configurations based on the provided + /// [`HeadersConfig`]. pub fn new(config: HeadersConfig) -> Self { Self::default() .request_limit(config.downloader_request_limit) @@ -1125,7 +1125,7 @@ impl ReverseHeadersDownloaderBuilder { /// Set the stream batch size /// - /// This determines the number of headers the [ReverseHeadersDownloader] will yield on + /// This determines the number of headers the [`ReverseHeadersDownloader`] will yield on /// `Stream::next`. This will be the amount of headers the headers stage will commit at a /// time. pub const fn stream_batch_size(mut self, size: usize) -> Self { @@ -1135,7 +1135,7 @@ impl ReverseHeadersDownloaderBuilder { /// Set the min amount of concurrent requests. /// - /// If there's capacity the [ReverseHeadersDownloader] will keep at least this many requests + /// If there's capacity the [`ReverseHeadersDownloader`] will keep at least this many requests /// active at a time. pub const fn min_concurrent_requests(mut self, min_concurrent_requests: usize) -> Self { self.min_concurrent_requests = min_concurrent_requests; @@ -1154,14 +1154,14 @@ impl ReverseHeadersDownloaderBuilder { /// /// This essentially determines how much memory the downloader can use for buffering responses /// that arrive out of order. The total number of buffered headers is `request_limit * - /// max_buffered_responses`. If the [ReverseHeadersDownloader]'s buffered responses exceeds this - /// threshold it waits until there's capacity again before sending new requests. + /// max_buffered_responses`. If the [`ReverseHeadersDownloader`]'s buffered responses exceeds + /// this threshold it waits until there's capacity again before sending new requests. pub const fn max_buffered_responses(mut self, max_buffered_responses: usize) -> Self { self.max_buffered_responses = max_buffered_responses; self } - /// Build [ReverseHeadersDownloader] with provided consensus + /// Build [`ReverseHeadersDownloader`] with provided consensus /// and header client implementations pub fn build(self, client: H, consensus: Arc) -> ReverseHeadersDownloader where @@ -1198,7 +1198,7 @@ impl ReverseHeadersDownloaderBuilder { } } -/// Configures and returns the next [HeadersRequest] based on the given parameters +/// Configures and returns the next [`HeadersRequest`] based on the given parameters /// /// The request will start at the given `next_request_block_number` block. /// The `limit` of the request will either be the targeted `request_limit` or the difference of @@ -1225,7 +1225,7 @@ mod tests { use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; - /// Tests that `replace_number` works the same way as Option::replace + /// Tests that `replace_number` works the same way as `Option::replace` #[test] fn test_replace_number_semantics() { struct Fixture { diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 1d99c3b75..b3fa27fde 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -31,8 +31,8 @@ pub struct TaskDownloader { // === impl TaskDownloader === impl TaskDownloader { - /// Spawns the given `downloader` via [tokio::task::spawn] and returns a [TaskDownloader] that's - /// connected to that task. + /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] + /// that's connected to that task. /// /// # Panics /// @@ -60,8 +60,8 @@ impl TaskDownloader { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } - /// Spawns the given `downloader` via the given [TaskSpawner] returns a [TaskDownloader] that's - /// connected to that task. + /// Spawns the given `downloader` via the given [`TaskSpawner`] returns a [`TaskDownloader`] + /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where T: HeaderDownloader + 'static, @@ -107,7 +107,7 @@ impl Stream for TaskDownloader { } } -/// A [HeaderDownloader] that runs on its own task +/// A [`HeaderDownloader`] that runs on its own task struct SpawnedDownloader { updates: UnboundedReceiverStream, headers_tx: PollSender>>, @@ -169,7 +169,7 @@ impl Future for SpawnedDownloader { } } -/// Commands delegated tot the spawned [HeaderDownloader] +/// Commands delegated tot the spawned [`HeaderDownloader`] enum DownloaderUpdates { UpdateSyncGap(SealedHeader, SyncTarget), UpdateLocalHead(SealedHeader), diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index c04f08b4c..536b267f2 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -4,7 +4,7 @@ use reth_primitives::SealedHeader; -/// Returns a new [SealedHeader] that's the child header of the given `parent`. +/// Returns a new [`SealedHeader`] that's the child header of the given `parent`. pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; diff --git a/crates/net/downloaders/src/lib.rs b/crates/net/downloaders/src/lib.rs index 81e669d88..0199cc02b 100644 --- a/crates/net/downloaders/src/lib.rs +++ b/crates/net/downloaders/src/lib.rs @@ -23,13 +23,13 @@ pub mod metrics; /// Module managing file-based data retrieval and buffering. /// -/// Contains [FileClient](file_client::FileClient) to read block data from files, +/// Contains [`FileClient`](file_client::FileClient) to read block data from files, /// efficiently buffering headers and bodies for retrieval. pub mod file_client; /// Module managing file-based data retrieval and buffering of receipts. /// -/// Contains [ReceiptFileClient](receipt_file_client::ReceiptFileClient) to read receipt data from +/// Contains [`ReceiptFileClient`](receipt_file_client::ReceiptFileClient) to read receipt data from /// files, efficiently buffering receipts for retrieval. /// /// Currently configured to use codec [`HackReceipt`](file_codec_ovm_receipt::HackReceipt) based on diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index d131145f0..ae970931b 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -16,7 +16,7 @@ use std::{ }; use tokio::sync::Mutex; -/// A [BodiesClient] for testing. +/// A [`BodiesClient`] for testing. #[derive(Debug, Default)] pub struct TestBodiesClient { bodies: Arc>>, diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index c1555c468..f7e4e9af3 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -90,7 +90,7 @@ fn split_at_mut(arr: &mut [T], idx: usize) -> Result<(&mut [T], &mut [T]), EC Ok(arr.split_at_mut(idx)) } -/// A parsed RLPx encrypted message +/// A parsed `RLPx` encrypted message /// /// From the devp2p spec, this should help perform the following operations: /// @@ -103,9 +103,9 @@ fn split_at_mut(arr: &mut [T], idx: usize) -> Result<(&mut [T], &mut [T]), EC pub struct EncryptedMessage<'a> { /// The auth data, used when checking the `tag` with HMAC-SHA256. /// - /// This is not mentioned in the RLPx spec, but included in implementations. + /// This is not mentioned in the `RLPx` spec, but included in implementations. /// - /// See source comments of [Self::check_integrity] for more information. + /// See source comments of [`Self::check_integrity`] for more information. auth_data: [u8; 2], /// The remote secp256k1 public key public_key: PublicKey, @@ -118,7 +118,7 @@ pub struct EncryptedMessage<'a> { } impl<'a> EncryptedMessage<'a> { - /// Parse the given `data` into an [EncryptedMessage]. + /// Parse the given `data` into an [`EncryptedMessage`]. /// /// If the data is not long enough to contain the expected fields, this returns an error. pub fn parse(data: &mut [u8]) -> Result, ECIESError> { @@ -491,7 +491,7 @@ impl ECIES { self.parse_auth_unencrypted(unencrypted) } - /// Create an `ack` message using the internal nonce, local ephemeral public key, and RLPx + /// Create an `ack` message using the internal nonce, local ephemeral public key, and `RLPx` /// ECIES protocol version. fn create_ack_unencrypted(&self) -> impl AsRef<[u8]> { #[derive(RlpEncodable, RlpMaxEncodedLen)] @@ -850,7 +850,7 @@ mod tests { } #[test] - /// Test vectors from https://eips.ethereum.org/EIPS/eip-8 + /// Test vectors from fn eip8_test() { // EIP-8 format with version 4 and no additional list elements let auth2 = hex!( diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index c4b18a89f..424431119 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -77,8 +77,8 @@ pub enum ECIESErrorImpl { /// /// This exact error case happens when the wrapped stream in /// [`Framed`](tokio_util::codec::Framed) is closed by the peer, See - /// [ConnectionReset](std::io::ErrorKind::ConnectionReset) and the ecies codec fails to decode - /// a message from the (partially filled) buffer. + /// [`ConnectionReset`](std::io::ErrorKind::ConnectionReset) and the ecies codec fails to + /// decode a message from the (partially filled) buffer. #[error("stream closed due to not being readable")] UnreadableStream, // Error when data is not received from peer for a prolonged period. diff --git a/crates/net/ecies/src/lib.rs b/crates/net/ecies/src/lib.rs index 5fd165eb9..07fb044c5 100644 --- a/crates/net/ecies/src/lib.rs +++ b/crates/net/ecies/src/lib.rs @@ -1,4 +1,4 @@ -//! RLPx ECIES framed transport protocol. +//! `RLPx` ECIES framed transport protocol. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/net/ecies/src/mac.rs b/crates/net/ecies/src/mac.rs index a3e655f33..30baa298c 100644 --- a/crates/net/ecies/src/mac.rs +++ b/crates/net/ecies/src/mac.rs @@ -1,7 +1,7 @@ //! # Ethereum MAC Module //! //! This module provides the implementation of the Ethereum MAC (Message Authentication Code) -//! construction, as specified in the Ethereum RLPx protocol. +//! construction, as specified in the Ethereum `RLPx` protocol. //! //! The Ethereum MAC is a nonstandard MAC construction that utilizes AES-256 (as a block cipher) //! and Keccak-256. It is specifically designed for messages of 128 bits in length and is not @@ -20,7 +20,7 @@ use typenum::U16; /// Type alias for a fixed-size array of 16 bytes used as headers. /// -/// This type is defined as [`GenericArray`] and is commonly employed in Ethereum RLPx +/// This type is defined as [`GenericArray`] and is commonly employed in Ethereum `RLPx` /// protocol-related structures for headers. It represents 16 bytes of data used in various /// cryptographic operations, such as MAC (Message Authentication Code) computation. pub type HeaderBytes = GenericArray; diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index d2930166d..0329d4fef 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -50,7 +50,7 @@ where Self::connect_with_timeout(transport, secret_key, remote_id, HANDSHAKE_TIMEOUT).await } - /// Wrapper around connect_no_timeout which enforces a timeout. + /// Wrapper around `connect_no_timeout` which enforces a timeout. pub async fn connect_with_timeout( transport: Io, secret_key: SecretKey, diff --git a/crates/net/ecies/src/util.rs b/crates/net/ecies/src/util.rs index 1984a3766..f6b30288a 100644 --- a/crates/net/ecies/src/util.rs +++ b/crates/net/ecies/src/util.rs @@ -9,7 +9,7 @@ pub(crate) fn sha256(data: &[u8]) -> B256 { B256::from(Sha256::digest(data).as_ref()) } -/// Produces a HMAC_SHA256 digest of the `input_data` and `auth_data` with the given `key`. +/// Produces a `HMAC_SHA256` digest of the `input_data` and `auth_data` with the given `key`. /// This is done by accumulating each slice in `input_data` into the HMAC state, then accumulating /// the `auth_data` and returning the resulting digest. pub(crate) fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> B256 { diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index d99e8560e..cbd5ca536 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -45,7 +45,7 @@ pub struct ProtocolMessage { } impl ProtocolMessage { - /// Create a new ProtocolMessage from a message type and message rlp bytes. + /// Create a new `ProtocolMessage` from a message type and message rlp bytes. pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; @@ -176,44 +176,44 @@ impl From for ProtocolBroadcastMessage { /// correlate request-response message pairs. This allows for request multiplexing. /// /// The `eth/67` is based on `eth/66` but only removes two messages, [`GetNodeData`] and -/// [``NodeData]. +/// [`NodeData`]. /// -/// The `eth/68` changes only NewPooledTransactionHashes to include `types` and `sized`. For -/// it, NewPooledTransactionHashes is renamed as [`NewPooledTransactionHashes66`] and +/// The `eth/68` changes only `NewPooledTransactionHashes` to include `types` and `sized`. For +/// it, `NewPooledTransactionHashes` is renamed as [`NewPooledTransactionHashes66`] and /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), - /// Represents a NewBlockHashes message broadcast to the network. + /// Represents a `NewBlockHashes` message broadcast to the network. NewBlockHashes(NewBlockHashes), - /// Represents a NewBlock message broadcast to the network. + /// Represents a `NewBlock` message broadcast to the network. NewBlock(Box), /// Represents a Transactions message broadcast to the network. Transactions(Transactions), - /// Represents a NewPooledTransactionHashes message for eth/66 version. + /// Represents a `NewPooledTransactionHashes` message for eth/66 version. NewPooledTransactionHashes66(NewPooledTransactionHashes66), - /// Represents a NewPooledTransactionHashes message for eth/68 version. + /// Represents a `NewPooledTransactionHashes` message for eth/68 version. NewPooledTransactionHashes68(NewPooledTransactionHashes68), // The following messages are request-response message pairs - /// Represents a GetBlockHeaders request-response pair. + /// Represents a `GetBlockHeaders` request-response pair. GetBlockHeaders(RequestPair), - /// Represents a BlockHeaders request-response pair. + /// Represents a `BlockHeaders` request-response pair. BlockHeaders(RequestPair), - /// Represents a GetBlockBodies request-response pair. + /// Represents a `GetBlockBodies` request-response pair. GetBlockBodies(RequestPair), - /// Represents a BlockBodies request-response pair. + /// Represents a `BlockBodies` request-response pair. BlockBodies(RequestPair), - /// Represents a GetPooledTransactions request-response pair. + /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), - /// Represents a PooledTransactions request-response pair. + /// Represents a `PooledTransactions` request-response pair. PooledTransactions(RequestPair), - /// Represents a GetNodeData request-response pair. + /// Represents a `GetNodeData` request-response pair. GetNodeData(RequestPair), - /// Represents a NodeData request-response pair. + /// Represents a `NodeData` request-response pair. NodeData(RequestPair), - /// Represents a GetReceipts request-response pair. + /// Represents a `GetReceipts` request-response pair. GetReceipts(RequestPair), /// Represents a Receipts request-response pair. Receipts(RequestPair), diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index e2dfb1712..93a2371c8 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -65,7 +65,7 @@ impl Status { Default::default() } - /// Sets the [EthVersion] for the status. + /// Sets the [`EthVersion`] for the status. pub fn set_eth_version(&mut self, version: EthVersion) { self.version = version as u8; } diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index c781e21dd..752e80709 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -67,17 +67,17 @@ impl Capability { Self::new_static("eth", version as usize) } - /// Returns the [EthVersion::Eth66] capability. + /// Returns the [`EthVersion::Eth66`] capability. pub const fn eth_66() -> Self { Self::eth(EthVersion::Eth66) } - /// Returns the [EthVersion::Eth67] capability. + /// Returns the [`EthVersion::Eth67`] capability. pub const fn eth_67() -> Self { Self::eth(EthVersion::Eth67) } - /// Returns the [EthVersion::Eth68] capability. + /// Returns the [`EthVersion::Eth68`] capability. pub const fn eth_68() -> Self { Self::eth(EthVersion::Eth68) } @@ -550,8 +550,8 @@ pub enum SharedCapabilityError { /// Unsupported `eth` version. #[error(transparent)] UnsupportedVersion(#[from] ParseVersionError), - /// Thrown when the message id for a [SharedCapability] overlaps with the reserved p2p message - /// id space [`MAX_RESERVED_MESSAGE_ID`]. + /// Thrown when the message id for a [`SharedCapability`] overlaps with the reserved p2p + /// message id space [`MAX_RESERVED_MESSAGE_ID`]. #[error("message id offset `{0}` is reserved")] ReservedMessageIdOffset(u8), } diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 89bad5174..56a1ae62b 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -1,4 +1,4 @@ -//! Error handling for (`EthStream`)[crate::EthStream] +//! Error handling for (`EthStream`)[`crate::EthStream`] use crate::{ errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, @@ -51,7 +51,7 @@ impl EthStreamError { } } - /// Returns the [io::Error] if it was caused by IO + /// Returns the [`io::Error`] if it was caused by IO pub const fn as_io(&self) -> Option<&io::Error> { if let Self::P2PStreamError(P2PStreamError::Io(io)) = self { return Some(io) diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 85dda8bfa..b8af52504 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -9,15 +9,15 @@ use crate::protocol::Protocol; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// This is a superset of [HelloMessage] that provides additional protocol [Protocol] information +/// This is a superset of [`HelloMessage`] that provides additional protocol [Protocol] information /// about the number of messages used by each capability in order to do proper message ID /// multiplexing. /// -/// This type is required for the `p2p` handshake because the [HelloMessage] does not share the +/// This type is required for the `p2p` handshake because the [`HelloMessage`] does not share the /// number of messages used by each capability. /// -/// To get the encodable [HelloMessage] without the additional protocol information, use the -/// [HelloMessageWithProtocols::message]. +/// To get the encodable [`HelloMessage`] without the additional protocol information, use the +/// [`HelloMessageWithProtocols::message`]. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct HelloMessageWithProtocols { @@ -49,7 +49,7 @@ impl HelloMessageWithProtocols { HelloMessageBuilder::new(id) } - /// Returns the raw [HelloMessage] without the additional protocol information. + /// Returns the raw [`HelloMessage`] without the additional protocol information. #[inline] pub fn message(&self) -> HelloMessage { HelloMessage { @@ -61,7 +61,7 @@ impl HelloMessageWithProtocols { } } - /// Converts the type into a [HelloMessage] without the additional protocol information. + /// Converts the type into a [`HelloMessage`] without the additional protocol information. pub fn into_message(self) -> HelloMessage { HelloMessage { protocol_version: self.protocol_version, @@ -191,7 +191,7 @@ impl HelloMessageBuilder { /// Unset fields will be set to their default values: /// - `protocol_version`: [`ProtocolVersion::V5`] /// - `client_version`: [`RETH_CLIENT_VERSION`] - /// - `capabilities`: All [EthVersion] + /// - `capabilities`: All [`EthVersion`] pub fn build(self) -> HelloMessageWithProtocols { let Self { protocol_version, client_version, protocols, port, id } = self; HelloMessageWithProtocols { diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 06c091548..262c2c193 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -1,6 +1,6 @@ //! Rlpx protocol multiplexer and satellite stream //! -//! A Satellite is a Stream that primarily drives a single RLPx subprotocol but can also handle +//! A Satellite is a Stream that primarily drives a single `RLPx` subprotocol but can also handle //! additional subprotocols. //! //! Most of other subprotocols are "dependent satellite" protocols of "eth" and not a fully standalone protocol, for example "snap", See also [snap protocol](https://github.com/ethereum/devp2p/blob/298d7a77c3bf833641579ecbbb5b13f0311eeeea/caps/snap.md?plain=1#L71) @@ -28,7 +28,7 @@ use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; -/// A Stream and Sink type that wraps a raw rlpx stream [P2PStream] and handles message ID +/// A Stream and Sink type that wraps a raw rlpx stream [`P2PStream`] and handles message ID /// multiplexing. #[derive(Debug)] pub struct RlpxProtocolMultiplexer { @@ -49,8 +49,8 @@ impl RlpxProtocolMultiplexer { /// Installs a new protocol on top of the raw p2p stream. /// - /// This accepts a closure that receives a [ProtocolConnection] that will yield messages for the - /// given capability. + /// This accepts a closure that receives a [`ProtocolConnection`] that will yield messages for + /// the given capability. pub fn install_protocol( &mut self, cap: &Capability, @@ -63,12 +63,12 @@ impl RlpxProtocolMultiplexer { self.inner.install_protocol(cap, f) } - /// Returns the [SharedCapabilities] of the underlying raw p2p stream + /// Returns the [`SharedCapabilities`] of the underlying raw p2p stream pub const fn shared_capabilities(&self) -> &SharedCapabilities { self.inner.shared_capabilities() } - /// Converts this multiplexer into a [RlpxSatelliteStream] with the given primary protocol. + /// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol. pub fn into_satellite_stream( self, cap: &Capability, @@ -102,7 +102,7 @@ impl RlpxProtocolMultiplexer { }) } - /// Converts this multiplexer into a [RlpxSatelliteStream] with the given primary protocol. + /// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol. /// /// Returns an error if the primary protocol is not supported by the remote or the handshake /// failed. @@ -125,7 +125,7 @@ impl RlpxProtocolMultiplexer { .map(|(st, _)| st) } - /// Converts this multiplexer into a [RlpxSatelliteStream] with the given primary protocol. + /// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol. /// /// Returns an error if the primary protocol is not supported by the remote or the handshake /// failed. @@ -133,7 +133,7 @@ impl RlpxProtocolMultiplexer { /// This accepts a closure that does a handshake with the remote peer and returns a tuple of the /// primary stream and extra data. /// - /// See also [UnauthedEthStream::handshake] + /// See also [`UnauthedEthStream::handshake`] pub async fn into_satellite_stream_with_tuple_handshake( mut self, cap: &Capability, @@ -202,7 +202,7 @@ impl RlpxProtocolMultiplexer { } } - /// Converts this multiplexer into a [RlpxSatelliteStream] with eth protocol as the given + /// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given /// primary protocol. pub async fn into_eth_satellite_stream( self, @@ -282,7 +282,7 @@ struct PrimaryProtocol { st: Primary, } -/// A Stream and Sink type that acts as a wrapper around a primary RLPx subprotocol (e.g. "eth") +/// A Stream and Sink type that acts as a wrapper around a primary `RLPx` subprotocol (e.g. "eth") /// /// Only emits and sends _non-empty_ messages #[derive(Debug)] @@ -374,7 +374,7 @@ impl CanDisconnect for ProtocolProxy { } } -/// A connection channel to receive _non_empty_ messages for the negotiated protocol. +/// A connection channel to receive _`non_empty`_ messages for the negotiated protocol. /// /// This is a [Stream] that returns raw bytes of the received messages for this protocol. #[derive(Debug)] @@ -390,8 +390,8 @@ impl Stream for ProtocolConnection { } } -/// A Stream and Sink type that acts as a wrapper around a primary RLPx subprotocol (e.g. "eth") -/// [EthStream] and can also handle additional subprotocols. +/// A Stream and Sink type that acts as a wrapper around a primary `RLPx` subprotocol (e.g. "eth") +/// [`EthStream`] and can also handle additional subprotocols. #[derive(Debug)] pub struct RlpxSatelliteStream { inner: MultiplexInner, @@ -401,8 +401,8 @@ pub struct RlpxSatelliteStream { impl RlpxSatelliteStream { /// Installs a new protocol on top of the raw p2p stream. /// - /// This accepts a closure that receives a [ProtocolConnection] that will yield messages for the - /// given capability. + /// This accepts a closure that receives a [`ProtocolConnection`] that will yield messages for + /// the given capability. pub fn install_protocol( &mut self, cap: &Capability, @@ -427,19 +427,19 @@ impl RlpxSatelliteStream { &mut self.primary.st } - /// Returns the underlying [P2PStream]. + /// Returns the underlying [`P2PStream`]. #[inline] pub const fn inner(&self) -> &P2PStream { &self.inner.conn } - /// Returns mutable access to the underlying [P2PStream]. + /// Returns mutable access to the underlying [`P2PStream`]. #[inline] pub fn inner_mut(&mut self) -> &mut P2PStream { &mut self.inner.conn } - /// Consumes this type and returns the wrapped [P2PStream]. + /// Consumes this type and returns the wrapped [`P2PStream`]. #[inline] pub fn into_inner(self) -> P2PStream { self.inner.conn @@ -606,7 +606,7 @@ where } } -/// Wraps a RLPx subprotocol and handles message ID multiplexing. +/// Wraps a `RLPx` subprotocol and handles message ID multiplexing. struct ProtocolStream { shared_cap: SharedCapability, /// the channel shared with the satellite stream diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index a2e1e1dae..3fdb68c5e 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -58,8 +58,8 @@ const GRACE_PERIOD: Duration = Duration::from_secs(2); /// [`MAX_P2P_CAPACITY`] is the maximum number of messages that can be buffered to be sent in the /// `p2p` stream. /// -/// Note: this default is rather low because it is expected that the [P2PStream] wraps an -/// [ECIESStream](reth_ecies::stream::ECIESStream) which internally already buffers a few MB of +/// Note: this default is rather low because it is expected that the [`P2PStream`] wraps an +/// [`ECIESStream`](reth_ecies::stream::ECIESStream) which internally already buffers a few MB of /// encoded data. const MAX_P2P_CAPACITY: usize = 2; @@ -249,7 +249,7 @@ pub struct P2PStream { outgoing_messages: VecDeque, /// Maximum number of messages that we can buffer here before the [Sink] impl returns - /// [Poll::Pending]. + /// [`Poll::Pending`]. outgoing_message_buffer_capacity: usize, /// Whether this stream is currently in the process of disconnecting by sending a disconnect diff --git a/crates/net/eth-wire/src/protocol.rs b/crates/net/eth-wire/src/protocol.rs index 5cbfdc224..26b7d6d1b 100644 --- a/crates/net/eth-wire/src/protocol.rs +++ b/crates/net/eth-wire/src/protocol.rs @@ -1,4 +1,4 @@ -//! A Protocol defines a P2P subprotocol in a RLPx connection +//! A Protocol defines a P2P subprotocol in a `RLPx` connection use crate::{capability::Capability, EthMessageID, EthVersion}; @@ -30,17 +30,17 @@ impl Protocol { Self::new(cap, messages) } - /// Returns the [EthVersion::Eth66] capability. + /// Returns the [`EthVersion::Eth66`] capability. pub const fn eth_66() -> Self { Self::eth(EthVersion::Eth66) } - /// Returns the [EthVersion::Eth67] capability. + /// Returns the [`EthVersion::Eth67`] capability. pub const fn eth_67() -> Self { Self::eth(EthVersion::Eth67) } - /// Returns the [EthVersion::Eth68] capability. + /// Returns the [`EthVersion::Eth68`] capability. pub const fn eth_68() -> Self { Self::eth(EthVersion::Eth68) } diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index 7bb3b7ad7..f20d0397c 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -21,8 +21,8 @@ where assert_eq!(thing, decoded, "expected: {thing:?}, got: {decoded:?}"); } -/// This method delegates to roundtrip_encoding, but is used to enforce that each type input to the -/// macro has a proper Default, Clone, and Serialize impl. These trait implementations are +/// This method delegates to `roundtrip_encoding`, but is used to enforce that each type input to +/// the macro has a proper Default, Clone, and Serialize impl. These trait implementations are /// necessary for test-fuzz to autogenerate a corpus. /// /// If it makes sense to remove a Default impl from a type that we fuzz, this should prevent the diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index f9cfc139f..8f7579089 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -38,11 +38,11 @@ pub enum NatResolver { /// Resolve with any available resolver. #[default] Any, - /// Resolve external IP via UPnP. + /// Resolve external IP via `UPnP`. Upnp, /// Resolve external IP via a network request. PublicIp, - /// Use the given [IpAddr] + /// Use the given [`IpAddr`] ExternalIp(IpAddr), /// Resolve nothing None, @@ -67,7 +67,7 @@ impl fmt::Display for NatResolver { } } -/// Error when parsing a [NatResolver] +/// Error when parsing a [`NatResolver`] #[derive(Debug, thiserror::Error)] pub enum ParseNatResolverError { /// Failed to parse provided IP @@ -123,16 +123,16 @@ impl ResolveNatInterval { Self { resolver, future: None, interval } } - /// Creates a new [ResolveNatInterval] that attempts to resolve the public IP with interval of - /// period. See also [tokio::time::interval] + /// Creates a new [`ResolveNatInterval`] that attempts to resolve the public IP with interval of + /// period. See also [`tokio::time::interval`] #[track_caller] pub fn interval(resolver: NatResolver, period: Duration) -> Self { let interval = tokio::time::interval(period); Self::with_interval(resolver, interval) } - /// Creates a new [ResolveNatInterval] that attempts to resolve the public IP with interval of - /// period with the first attempt starting at `start`. See also [tokio::time::interval_at] + /// Creates a new [`ResolveNatInterval`] that attempts to resolve the public IP with interval of + /// period with the first attempt starting at `start`. See also [`tokio::time::interval_at`] #[track_caller] pub fn interval_at( resolver: NatResolver, @@ -143,18 +143,18 @@ impl ResolveNatInterval { Self::with_interval(resolver, interval) } - /// Completes when the next [IpAddr] in the interval has been reached. + /// Completes when the next [`IpAddr`] in the interval has been reached. pub async fn tick(&mut self) -> Option { poll_fn(|cx| self.poll_tick(cx)).await } - /// Polls for the next resolved [IpAddr] in the interval to be reached. + /// Polls for the next resolved [`IpAddr`] in the interval to be reached. /// /// This method can return the following values: /// - /// * `Poll::Pending` if the next [IpAddr] has not yet been resolved. - /// * `Poll::Ready(Option)` if the next [IpAddr] has been resolved. This returns `None` - /// if the attempt was unsuccessful. + /// * `Poll::Pending` if the next [`IpAddr`] has not yet been resolved. + /// * `Poll::Ready(Option)` if the next [`IpAddr`] has been resolved. This returns + /// `None` if the attempt was unsuccessful. pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll> { if self.interval.poll_tick(cx).is_ready() { self.future = Some(Box::pin(self.resolver.external_addr())); diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 87fc9c5f2..957bd860b 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -21,7 +21,7 @@ pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; use reth_network_types::NodeRecord; -/// The PeerId type. +/// The `PeerId` type. pub type PeerId = alloy_primitives::B512; /// Network Error @@ -71,7 +71,7 @@ pub trait Peers: PeersInfo { self.add_peer_kind(peer, PeerKind::Basic, addr); } - /// Adds a trusted [PeerId] to the peer set. + /// Adds a trusted [`PeerId`] to the peer set. /// /// This allows marking a peer as trusted without having to know the peer's address. fn add_trusted_peer_id(&self, peer: PeerId); @@ -84,28 +84,28 @@ pub trait Peers: PeersInfo { /// Adds a peer to the known peer set, with the given kind. fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr); - /// Returns the rpc [PeerInfo] for all connected [PeerKind::Trusted] peers. + /// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Trusted`] peers. fn get_trusted_peers( &self, ) -> impl Future, NetworkError>> + Send { self.get_peers_by_kind(PeerKind::Trusted) } - /// Returns the rpc [PeerInfo] for all connected [PeerKind::Basic] peers. + /// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Basic`] peers. fn get_basic_peers(&self) -> impl Future, NetworkError>> + Send { self.get_peers_by_kind(PeerKind::Basic) } - /// Returns the rpc [PeerInfo] for all connected peers with the given kind. + /// Returns the rpc [`PeerInfo`] for all connected peers with the given kind. fn get_peers_by_kind( &self, kind: PeerKind, ) -> impl Future, NetworkError>> + Send; - /// Returns the rpc [PeerInfo] for all connected peers. + /// Returns the rpc [`PeerInfo`] for all connected peers. fn get_all_peers(&self) -> impl Future, NetworkError>> + Send; - /// Returns the rpc [PeerInfo] for the given peer id. + /// Returns the rpc [`PeerInfo`] for the given peer id. /// /// Returns `None` if the peer is not connected. fn get_peer_by_id( @@ -113,7 +113,7 @@ pub trait Peers: PeersInfo { peer_id: PeerId, ) -> impl Future, NetworkError>> + Send; - /// Returns the rpc [PeerInfo] for the given peers if they are connected. + /// Returns the rpc [`PeerInfo`] for the given peers if they are connected. /// /// Note: This only returns peers that are connected, unconnected peers are ignored but keeping /// the order in which they were requested. diff --git a/crates/net/network-api/src/reputation.rs b/crates/net/network-api/src/reputation.rs index a24f5b93d..72df807c8 100644 --- a/crates/net/network-api/src/reputation.rs +++ b/crates/net/network-api/src/reputation.rs @@ -40,12 +40,12 @@ pub enum ReputationChangeKind { } impl ReputationChangeKind { - /// Returns true if the reputation change is a [ReputationChangeKind::Reset]. + /// Returns true if the reputation change is a [`ReputationChangeKind::Reset`]. pub const fn is_reset(&self) -> bool { matches!(self, Self::Reset) } - /// Returns true if the reputation change is [ReputationChangeKind::Dropped]. + /// Returns true if the reputation change is [`ReputationChangeKind::Dropped`]. pub const fn is_dropped(&self) -> bool { matches!(self, Self::Dropped) } diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 03e93bb6b..b6e1dcf91 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -8,7 +8,7 @@ use crate::{ use reth_transaction_pool::TransactionPool; use tokio::sync::mpsc; -/// We set the max channel capacity of the EthRequestHandler to 256 +/// We set the max channel capacity of the `EthRequestHandler` to 256 /// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node. pub(crate) const ETH_REQUEST_CHANNEL_CAPACITY: usize = 256; diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 9a517d1e1..a4682d6fa 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -54,7 +54,7 @@ pub struct NetworkConfig { pub listener_addr: SocketAddr, /// How to instantiate peer manager. pub peers_config: PeersConfig, - /// How to configure the [SessionManager](crate::session::SessionManager). + /// How to configure the [`SessionManager`](crate::session::SessionManager). pub sessions_config: SessionsConfig, /// The chain spec pub chain_spec: Arc, @@ -73,9 +73,9 @@ pub struct NetworkConfig { pub executor: Box, /// The `Status` message to send to peers at the beginning. pub status: Status, - /// Sets the hello message for the p2p handshake in RLPx + /// Sets the hello message for the p2p handshake in `RLPx` pub hello_message: HelloMessageWithProtocols, - /// Additional protocols to announce and handle in RLPx + /// Additional protocols to announce and handle in `RLPx` pub extra_protocols: RlpxSubProtocols, /// Whether to disable transaction gossip pub tx_gossip_disabled: bool, @@ -104,13 +104,13 @@ impl NetworkConfig { self } - /// Sets the address for the incoming RLPx connection listener. + /// Sets the address for the incoming `RLPx` connection listener. pub const fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = listener_addr; self } - /// Returns the address for the incoming RLPx connection listener. + /// Returns the address for the incoming `RLPx` connection listener. pub const fn listener_addr(&self) -> &SocketAddr { &self.listener_addr } @@ -120,7 +120,7 @@ impl NetworkConfig where C: BlockReader + HeaderProvider + Clone + Unpin + 'static, { - /// Starts the networking stack given a [NetworkConfig] and returns a handle to the network. + /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. pub async fn start_network(self) -> Result { let client = self.client.clone(); let (handle, network, _txpool, eth) = @@ -163,7 +163,7 @@ pub struct NetworkConfigBuilder { /// The executor to use for spawning tasks. #[serde(skip)] executor: Option>, - /// Sets the hello message for the p2p handshake in RLPx + /// Sets the hello message for the p2p handshake in `RLPx` hello_message: Option, /// The executor to use for spawning tasks. #[serde(skip)] @@ -264,7 +264,7 @@ impl NetworkConfigBuilder { /// Sets the executor to use for spawning tasks. /// - /// If `None`, then [tokio::spawn] is used for spawning tasks. + /// If `None`, then [`tokio::spawn`] is used for spawning tasks. pub fn with_task_executor(mut self, executor: Box) -> Self { self.executor = Some(executor); self @@ -284,18 +284,18 @@ impl NetworkConfigBuilder { /// Sets the discovery and listener address /// - /// This is a convenience function for both [NetworkConfigBuilder::listener_addr] and - /// [NetworkConfigBuilder::discovery_addr]. + /// This is a convenience function for both [`NetworkConfigBuilder::listener_addr`] and + /// [`NetworkConfigBuilder::discovery_addr`]. /// /// By default, both are on the same port: - /// [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) + /// [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT) pub const fn set_addrs(self, addr: SocketAddr) -> Self { self.listener_addr(addr).discovery_addr(addr) } /// Sets the socket address the network will listen on. /// - /// By default, this is [DEFAULT_DISCOVERY_ADDRESS] + /// By default, this is [`DEFAULT_DISCOVERY_ADDRESS`] pub const fn listener_addr(mut self, listener_addr: SocketAddr) -> Self { self.listener_addr = Some(listener_addr); self @@ -303,7 +303,7 @@ impl NetworkConfigBuilder { /// Sets the port of the address the network will listen on. /// - /// By default, this is [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) + /// By default, this is [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT) pub fn listener_port(mut self, port: u16) -> Self { self.listener_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port); self @@ -317,7 +317,7 @@ impl NetworkConfigBuilder { /// Sets the port of the address the discovery network will listen on. /// - /// By default, this is [DEFAULT_DISCOVERY_PORT](reth_discv4::DEFAULT_DISCOVERY_PORT) + /// By default, this is [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT) pub fn discovery_port(mut self, port: u16) -> Self { self.discovery_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port); self @@ -325,10 +325,10 @@ impl NetworkConfigBuilder { /// Sets the external ip resolver to use for discovery v4. /// - /// If no [Discv4ConfigBuilder] is set via [Self::discovery], this will create a new one. + /// If no [`Discv4ConfigBuilder`] is set via [`Self::discovery`], this will create a new one. /// /// This is a convenience function for setting the external ip resolver on the default - /// [Discv4Config] config. + /// [`Discv4Config`] config. pub fn external_ip_resolver(mut self, resolver: NatResolver) -> Self { self.discovery_v4_builder .get_or_insert_with(Discv4Config::builder) @@ -354,12 +354,12 @@ impl NetworkConfigBuilder { self } - /// Convenience function for setting [Self::boot_nodes] to the mainnet boot nodes. + /// Convenience function for setting [`Self::boot_nodes`] to the mainnet boot nodes. pub fn mainnet_boot_nodes(self) -> Self { self.boot_nodes(mainnet_nodes()) } - /// Convenience function for setting [Self::boot_nodes] to the sepolia boot nodes. + /// Convenience function for setting [`Self::boot_nodes`] to the sepolia boot nodes. pub fn sepolia_boot_nodes(self) -> Self { self.boot_nodes(sepolia_nodes()) } @@ -444,7 +444,7 @@ impl NetworkConfigBuilder { self } - /// Adds a new additional protocol to the RLPx sub-protocol list. + /// Adds a new additional protocol to the `RLPx` sub-protocol list. pub fn add_rlpx_sub_protocol(mut self, protocol: impl IntoRlpxSubProtocol) -> Self { self.extra_protocols.push(protocol); self @@ -462,7 +462,8 @@ impl NetworkConfigBuilder { self } - /// Convenience function for creating a [NetworkConfig] with a noop provider that does nothing. + /// Convenience function for creating a [`NetworkConfig`] with a noop provider that does + /// nothing. #[cfg(any(test, feature = "test-utils"))] pub fn build_with_noop_provider( self, diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 679a1cedb..a0e5de8e9 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -133,7 +133,7 @@ impl Discovery { }) } - /// Registers a listener for receiving [DiscoveryEvent] updates. + /// Registers a listener for receiving [`DiscoveryEvent`] updates. pub(crate) fn add_listener(&mut self, tx: mpsc::UnboundedSender) { self.discovery_listeners.push(tx); } @@ -199,7 +199,7 @@ impl Discovery { Ok(()) } - /// Processes an incoming [NodeRecord] update from a discovery service + /// Processes an incoming [`NodeRecord`] update from a discovery service fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { let id = record.id; let addr = record.tcp_addr(); diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 702a57940..9019a79f2 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -58,7 +58,7 @@ pub enum NetworkError { Discv5Error(#[from] reth_discv5::Error), /// Error when setting up the DNS resolver failed /// - /// See also [DnsResolver](reth_dns_discovery::DnsResolver::from_system_conf) + /// See also [`DnsResolver`](reth_dns_discovery::DnsResolver::from_system_conf) #[error("failed to configure DNS resolver: {0}")] DnsResolver(#[from] ResolveError), } diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 9e7b373ca..800844e51 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -38,7 +38,7 @@ const MAX_HEADERS_SERVE: usize = 1024; /// Maximum number of block headers to serve. /// /// Used to limit lookups. With 24KB block sizes nowadays, the practical limit will always be -/// SOFT_RESPONSE_LIMIT. +/// `SOFT_RESPONSE_LIMIT`. const MAX_BODIES_SERVE: usize = 1024; /// Maximum size of replies to data retrievals. @@ -56,7 +56,7 @@ pub struct EthRequestHandler { // TODO use to report spammers #[allow(dead_code)] peers: PeersHandle, - /// Incoming request from the [NetworkManager](crate::NetworkManager). + /// Incoming request from the [`NetworkManager`](crate::NetworkManager). incoming_requests: ReceiverStream, /// Metrics for the eth request handler. metrics: EthRequestHandlerMetrics, diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index d6d6d3efb..d0dfcab59 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -249,9 +249,9 @@ impl StateFetcher { /// Called on a `GetBlockHeaders` response from a peer. /// - /// This delegates the response and returns a [BlockResponseOutcome] to either queue in a direct - /// followup request or get the peer reported if the response was a - /// [EthResponseValidator::reputation_change_err] + /// This delegates the response and returns a [`BlockResponseOutcome`] to either queue in a + /// direct followup request or get the peer reported if the response was a + /// [`EthResponseValidator::reputation_change_err`] pub(crate) fn on_block_headers_response( &mut self, peer_id: PeerId, diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 602dca098..c16082e1b 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -5,7 +5,7 @@ //! //! ## Capabilities //! -//! The network manages peers depending on their announced capabilities via their RLPx sessions. Most importantly the [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md)(`eth`). +//! The network manages peers depending on their announced capabilities via their `RLPx` sessions. Most importantly the [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md)(`eth`). //! //! ## Overview //! @@ -13,7 +13,7 @@ //! made up of peer-to-peer connections between nodes that are available on the same network. //! Responsible for peer discovery is ethereum's discovery protocol (discv4, discv5). If the address //! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections -//! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the RLPx session. +//! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session. use crate::{ budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, @@ -129,7 +129,7 @@ impl NetworkManager { self.to_eth_request_handler = Some(tx); } - /// Adds an additional protocol handler to the RLPx sub-protocol list. + /// Adds an additional protocol handler to the `RLPx` sub-protocol list. pub fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.swarm.add_rlpx_sub_protocol(protocol) } @@ -893,7 +893,7 @@ impl NetworkManager where C: BlockReader + Unpin, { - /// Drives the [NetworkManager] future until a [GracefulShutdown] signal is received. + /// Drives the [`NetworkManager`] future until a [`GracefulShutdown`] signal is received. /// /// This invokes the given function `shutdown_hook` while holding the graceful shutdown guard. pub async fn run_until_graceful_shutdown( diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 03068ee96..4bf3c0d89 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -1,7 +1,7 @@ //! Capability messaging //! -//! An RLPx stream is multiplexed via the prepended message-id of a framed message. -//! Capabilities are exchanged via the RLPx `Hello` message as pairs of `(id, version)`, +//! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. +//! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, use futures::FutureExt; use reth_eth_wire::{ @@ -104,13 +104,13 @@ pub enum PeerRequest { /// The channel to send the response for pooled transactions. response: oneshot::Sender>, }, - /// Requests NodeData from the peer. + /// Requests `NodeData` from the peer. /// /// The response should be sent through the channel. GetNodeData { - /// The request for NodeData. + /// The request for `NodeData`. request: GetNodeData, - /// The channel to send the response for NodeData. + /// The channel to send the response for `NodeData`. response: oneshot::Sender>, }, /// Requests receipts from the peer. @@ -194,9 +194,9 @@ pub enum PeerResponse { /// The receiver channel for the response to a pooled transactions request. response: oneshot::Receiver>, }, - /// Represents a response to a request for NodeData. + /// Represents a response to a request for `NodeData`. NodeData { - /// The receiver channel for the response to a NodeData request. + /// The receiver channel for the response to a `NodeData` request. response: oneshot::Receiver>, }, /// Represents a response to a request for receipts. diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 1ed8452ba..6e2d11a3a 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -9,7 +9,7 @@ use reth_primitives::TxType; /// Scope for monitoring transactions sent from the manager to the tx manager pub(crate) const NETWORK_POOL_TRANSACTIONS_SCOPE: &str = "network.pool.transactions"; -/// Metrics for the entire network, handled by NetworkManager +/// Metrics for the entire network, handled by `NetworkManager` #[derive(Metrics)] #[metrics(scope = "network")] pub struct NetworkMetrics { @@ -77,7 +77,7 @@ pub struct NetworkMetrics { pub(crate) acc_duration_poll_swarm: Gauge, } -/// Metrics for SessionManager +/// Metrics for `SessionManager` #[derive(Metrics)] #[metrics(scope = "network")] pub struct SessionManagerMetrics { @@ -245,43 +245,43 @@ macro_rules! duration_metered_exec { #[derive(Metrics)] #[metrics(scope = "network")] pub struct DisconnectMetrics { - /// Number of peer disconnects due to DisconnectRequested (0x00) + /// Number of peer disconnects due to `DisconnectRequested` (0x00) pub(crate) disconnect_requested: Counter, - /// Number of peer disconnects due to TcpSubsystemError (0x01) + /// Number of peer disconnects due to `TcpSubsystemError` (0x01) pub(crate) tcp_subsystem_error: Counter, - /// Number of peer disconnects due to ProtocolBreach (0x02) + /// Number of peer disconnects due to `ProtocolBreach` (0x02) pub(crate) protocol_breach: Counter, - /// Number of peer disconnects due to UselessPeer (0x03) + /// Number of peer disconnects due to `UselessPeer` (0x03) pub(crate) useless_peer: Counter, - /// Number of peer disconnects due to TooManyPeers (0x04) + /// Number of peer disconnects due to `TooManyPeers` (0x04) pub(crate) too_many_peers: Counter, - /// Number of peer disconnects due to AlreadyConnected (0x05) + /// Number of peer disconnects due to `AlreadyConnected` (0x05) pub(crate) already_connected: Counter, - /// Number of peer disconnects due to IncompatibleP2PProtocolVersion (0x06) + /// Number of peer disconnects due to `IncompatibleP2PProtocolVersion` (0x06) pub(crate) incompatible: Counter, - /// Number of peer disconnects due to NullNodeIdentity (0x07) + /// Number of peer disconnects due to `NullNodeIdentity` (0x07) pub(crate) null_node_identity: Counter, - /// Number of peer disconnects due to ClientQuitting (0x08) + /// Number of peer disconnects due to `ClientQuitting` (0x08) pub(crate) client_quitting: Counter, - /// Number of peer disconnects due to UnexpectedHandshakeIdentity (0x09) + /// Number of peer disconnects due to `UnexpectedHandshakeIdentity` (0x09) pub(crate) unexpected_identity: Counter, - /// Number of peer disconnects due to ConnectedToSelf (0x0a) + /// Number of peer disconnects due to `ConnectedToSelf` (0x0a) pub(crate) connected_to_self: Counter, - /// Number of peer disconnects due to PingTimeout (0x0b) + /// Number of peer disconnects due to `PingTimeout` (0x0b) pub(crate) ping_timeout: Counter, - /// Number of peer disconnects due to SubprotocolSpecific (0x10) + /// Number of peer disconnects due to `SubprotocolSpecific` (0x10) pub(crate) subprotocol_specific: Counter, } @@ -306,20 +306,20 @@ impl DisconnectMetrics { } } -/// Metrics for the EthRequestHandler +/// Metrics for the `EthRequestHandler` #[derive(Metrics)] #[metrics(scope = "network")] pub struct EthRequestHandlerMetrics { - /// Number of GetBlockHeaders requests received + /// Number of `GetBlockHeaders` requests received pub(crate) eth_headers_requests_received_total: Counter, - /// Number of GetReceipts requests received + /// Number of `GetReceipts` requests received pub(crate) eth_receipts_requests_received_total: Counter, - /// Number of GetBlockBodies requests received + /// Number of `GetBlockBodies` requests received pub(crate) eth_bodies_requests_received_total: Counter, - /// Number of GetNodeData requests received + /// Number of `GetNodeData` requests received pub(crate) eth_node_data_requests_received_total: Counter, /// Duration in seconds of call to poll @@ -327,7 +327,7 @@ pub struct EthRequestHandlerMetrics { pub(crate) acc_duration_poll_eth_req_handler: Gauge, } -/// Eth67 announcement metrics, track entries by TxType +/// Eth67 announcement metrics, track entries by `TxType` #[derive(Metrics)] #[metrics(scope = "network.transaction_fetcher")] pub struct AnnouncedTxTypesMetrics { @@ -375,7 +375,7 @@ impl TxTypesCounter { impl AnnouncedTxTypesMetrics { /// Update metrics during announcement validation, by examining each announcement entry based on - /// TxType + /// `TxType` pub(crate) fn update_eth68_announcement_metrics(&self, tx_types_counter: TxTypesCounter) { self.legacy.record(tx_types_counter.legacy as f64); self.eip2930.record(tx_types_counter.eip2930 as f64); diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index b3ec4c761..603ffd410 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -120,7 +120,7 @@ impl NetworkHandle { /// Announce a block over devp2p /// - /// Caution: in PoS this is a noop because new blocks are no longer announced over devp2p. + /// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p. /// Instead they are sent to the node by CL and can be requested over devp2p. /// Broadcasting new blocks is considered a protocol violation. pub fn announce_block(&self, block: NewBlock, hash: B256) { @@ -421,7 +421,7 @@ pub trait NetworkEvents: Send + Sync { /// Provides access to modify the network's additional protocol handlers. pub trait NetworkProtocols: Send + Sync { - /// Adds an additional protocol handler to the RLPx sub-protocol list. + /// Adds an additional protocol handler to the `RLPx` sub-protocol list. fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol); } diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers/manager.rs index 5ff84d386..559137af9 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers/manager.rs @@ -92,7 +92,7 @@ pub struct PeersManager { /// The set of trusted peer ids. /// /// This tracks peer ids that are considered trusted, but for which we don't necessarily have - /// an address: [Self::add_trusted_peer_id] + /// an address: [`Self::add_trusted_peer_id`] trusted_peer_ids: HashSet, /// Copy of the sender half, so new [`PeersHandle`] can be created on demand. manager_tx: mpsc::UnboundedSender, @@ -120,7 +120,7 @@ pub struct PeersManager { /// If non-trusted peers should be connected to, or the connection from non-trusted /// incoming peers should be accepted. trusted_nodes_only: bool, - /// Timestamp of the last time [Self::tick] was called. + /// Timestamp of the last time [`Self::tick`] was called. last_tick: Instant, /// Maximum number of backoff attempts before we give up on a peer and dropping. max_backoff_count: u8, @@ -250,7 +250,7 @@ impl PeersManager { Ok(()) } - /// Invoked when a previous call to [Self::on_incoming_pending_session] succeeded but it was + /// Invoked when a previous call to [`Self::on_incoming_pending_session`] succeeded but it was /// rejected. pub(crate) fn on_incoming_pending_session_rejected_internally(&mut self) { self.connection_info.decr_pending_in(); @@ -661,7 +661,7 @@ impl PeersManager { /// Called for a newly discovered peer. /// - /// If the peer already exists, then the address, kind and fork_id will be updated. + /// If the peer already exists, then the address, kind and `fork_id` will be updated. pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: SocketAddr, fork_id: Option) { self.add_peer_kind(peer_id, PeerKind::Basic, addr, fork_id) } @@ -681,7 +681,7 @@ impl PeersManager { /// Called for a newly discovered peer. /// - /// If the peer already exists, then the address, kind and fork_id will be updated. + /// If the peer already exists, then the address, kind and `fork_id` will be updated. pub(crate) fn add_peer_kind( &mut self, peer_id: PeerId, @@ -763,10 +763,10 @@ impl PeersManager { /// Returns the idle peer with the highest reputation. /// - /// Peers that are `trusted`, see [PeerKind], are prioritized as long as they're not currently + /// Peers that are `trusted`, see [`PeerKind`], are prioritized as long as they're not currently /// marked as banned or backed off. /// - /// If `trusted_nodes_only` is enabled, see [PeersConfig], then this will only consider + /// If `trusted_nodes_only` is enabled, see [`PeersConfig`], then this will only consider /// `trusted` peers. /// /// Returns `None` if no peer is available. @@ -842,7 +842,7 @@ impl PeersManager { &self.net_connection_state } - /// Sets net_connection_state to ShuttingDown. + /// Sets `net_connection_state` to `ShuttingDown`. pub fn on_shutdown(&mut self) { self.net_connection_state = NetworkConnectionState::ShuttingDown; } @@ -1031,7 +1031,7 @@ pub struct Peer { kind: PeerKind, /// Whether the peer is currently backed off. backed_off: bool, - /// Counts number of times the peer was backed off due to a severe [BackoffKind]. + /// Counts number of times the peer was backed off due to a severe [`BackoffKind`]. severe_backoff_counter: u8, } @@ -1294,7 +1294,7 @@ pub struct PeersConfig { /// How long to ban bad peers. #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] pub ban_duration: Duration, - /// Restrictions on PeerIds and Ips. + /// Restrictions on `PeerIds` and Ips. #[cfg_attr(feature = "serde", serde(skip))] pub ban_list: BanList, /// Restrictions on connections. @@ -1327,7 +1327,7 @@ impl Default for PeersConfig { } impl PeersConfig { - /// A set of peer_ids and ip addr that we want to never connect to + /// A set of `peer_ids` and ip addr that we want to never connect to pub fn with_ban_list(mut self, ban_list: BanList) -> Self { self.ban_list = ban_list; self diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index 83ffb0250..7be1c48a6 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -39,19 +39,19 @@ pub trait ProtocolHandler: fmt::Debug + Send + Sync + 'static { ) -> Option; } -/// A trait that allows to authenticate a protocol after the RLPx connection was established. +/// A trait that allows to authenticate a protocol after the `RLPx` connection was established. pub trait ConnectionHandler: Send + Sync + 'static { /// The connection that yields messages to send to the remote. /// /// The connection will be closed when this stream resolves. type Connection: Stream + Send + 'static; - /// Returns the protocol to announce when the RLPx connection will be established. + /// Returns the protocol to announce when the `RLPx` connection will be established. /// /// This will be negotiated with the remote peer. fn protocol(&self) -> Protocol; - /// Invoked when the RLPx connection has been established by the peer does not share the + /// Invoked when the `RLPx` connection has been established by the peer does not share the /// protocol. fn on_unsupported_by_peer( self, @@ -60,7 +60,7 @@ pub trait ConnectionHandler: Send + Sync + 'static { peer_id: PeerId, ) -> OnNotSupported; - /// Invoked when the RLPx connection was established. + /// Invoked when the `RLPx` connection was established. /// /// The returned future should resolve when the connection should disconnect. fn into_connection( @@ -81,13 +81,13 @@ pub enum OnNotSupported { Disconnect, } -/// A wrapper type for a RLPx sub-protocol. +/// A wrapper type for a `RLPx` sub-protocol. #[derive(Debug)] pub struct RlpxSubProtocol(Box); -/// A helper trait to convert a [ProtocolHandler] into a dynamic type +/// A helper trait to convert a [`ProtocolHandler`] into a dynamic type pub trait IntoRlpxSubProtocol { - /// Converts the type into a [RlpxSubProtocol]. + /// Converts the type into a [`RlpxSubProtocol`]. fn into_rlpx_sub_protocol(self) -> RlpxSubProtocol; } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 91c44ca10..64f7f1b34 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -90,7 +90,7 @@ pub(crate) struct ActiveSession { pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. pub(crate) internal_request_timeout_interval: Interval, - /// If an [ActiveSession] does not receive a response at all within this duration then it is + /// If an [`ActiveSession`] does not receive a response at all within this duration then it is /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, /// Used to reserve a slot to guarantee that the termination message is delivered diff --git a/crates/net/network/src/session/config.rs b/crates/net/network/src/session/config.rs index 8385a3697..6c7fc282d 100644 --- a/crates/net/network/src/session/config.rs +++ b/crates/net/network/src/session/config.rs @@ -1,4 +1,4 @@ -//! Configuration types for [SessionManager](crate::session::SessionManager). +//! Configuration types for [`SessionManager`](crate::session::SessionManager). use crate::{ peers::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}, @@ -29,7 +29,7 @@ const DEFAULT_MAX_PEERS: usize = /// With maxed out peers, this will allow for 3 messages per session (average) const DEFAULT_SESSION_EVENT_BUFFER_SIZE: usize = DEFAULT_MAX_PEERS * 2; -/// Configuration options when creating a [SessionManager](crate::session::SessionManager). +/// Configuration options when creating a [`SessionManager`](crate::session::SessionManager). #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(default))] diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 6b8d522aa..4c9638ae9 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -31,7 +31,7 @@ pub type EthSatelliteConnection = pub enum EthRlpxConnection { /// A That only supports the ETH protocol. EthOnly(Box), - /// A connection that supports the ETH protocol and __at least one other__ RLPx protocol. + /// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol. Satellite(Box), } @@ -45,7 +45,7 @@ impl EthRlpxConnection { } } - /// Consumes this type and returns the wrapped [P2PStream]. + /// Consumes this type and returns the wrapped [`P2PStream`]. #[inline] pub(crate) fn into_inner(self) -> P2PStream>> { match self { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index 0d8964550..609934991 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -135,7 +135,7 @@ impl ActiveSessionHandle { self.remote_addr } - /// Extracts the [PeerInfo] from the session handle. + /// Extracts the [`PeerInfo`] from the session handle. pub(crate) fn peer_info(&self) -> PeerInfo { PeerInfo { remote_id: self.remote_id, @@ -247,7 +247,7 @@ pub enum ActiveSessionMessage { /// The error that caused the session to close error: EthStreamError, }, - /// A session received a valid message via RLPx. + /// A session received a valid message via `RLPx`. ValidMessage { /// Identifier of the remote peer. peer_id: PeerId, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index caf857807..5cd53887c 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -66,10 +66,10 @@ pub struct SessionManager { next_id: usize, /// Keeps track of all sessions counter: SessionCounter, - /// The maximum initial time an [ActiveSession] waits for a response from the peer before it + /// The maximum initial time an [`ActiveSession`] waits for a response from the peer before it /// responds to an _internal_ request with a `TimeoutError` initial_internal_request_timeout: Duration, - /// If an [ActiveSession] does not receive a response at all within this duration then it is + /// If an [`ActiveSession`] does not receive a response at all within this duration then it is /// considered a protocol violation and the session will initiate a drop. protocol_breach_request_timeout: Duration, /// The timeout after which a pending session attempt is considered failed. @@ -107,7 +107,7 @@ pub struct SessionManager { active_session_tx: MeteredPollSender, /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. active_session_rx: ReceiverStream, - /// Additional RLPx sub-protocols to be used by the session manager. + /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, /// Used to measure inbound & outbound bandwidth across all managed streams bandwidth_meter: BandwidthMeter, @@ -186,7 +186,7 @@ impl SessionManager { self.hello_message.clone() } - /// Adds an additional protocol handler to the RLPx sub-protocol list. + /// Adds an additional protocol handler to the `RLPx` sub-protocol list. pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.extra_protocols.push(protocol) } @@ -208,8 +208,8 @@ impl SessionManager { /// Invoked on a received status update. /// - /// If the updated activated another fork, this will return a [ForkTransition] and updates the - /// active [ForkId]. See also [ForkFilter::set_head]. + /// If the updated activated another fork, this will return a [`ForkTransition`] and updates the + /// active [`ForkId`]. See also [`ForkFilter::set_head`]. pub(crate) fn on_status_update(&mut self, head: Head) -> Option { self.status.blockhash = head.hash; self.status.total_difficulty = head.total_difficulty; @@ -668,7 +668,7 @@ pub enum SessionEvent { /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, }, - /// A session received a valid message via RLPx. + /// A session received a valid message via `RLPx`. ValidMessage { /// The remote node's public key peer_id: PeerId, @@ -936,8 +936,8 @@ async fn authenticate( } } -/// Returns an [ECIESStream] if it can be built. If not, send a -/// [PendingSessionEvent::EciesAuthError] and returns `None` +/// Returns an [`ECIESStream`] if it can be built. If not, send a +/// [`PendingSessionEvent::EciesAuthError`] and returns `None` async fn get_eciess_stream( stream: Io, secret_key: SecretKey, @@ -955,8 +955,8 @@ async fn get_eciess_stream( /// /// On Success return the authenticated stream as [`PendingSessionEvent`]. /// -/// If additional [RlpxSubProtocolHandlers] are provided, the hello message will be updated to also -/// negotiate the additional protocols. +/// If additional [`RlpxSubProtocolHandlers`] are provided, the hello message will be updated to +/// also negotiate the additional protocols. #[allow(clippy::too_many_arguments)] async fn authenticate_stream( stream: UnauthedP2PStream>>, diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 23b75f19a..67172e53e 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -63,8 +63,9 @@ pub struct NetworkState { discovery: Discovery, /// The type that handles requests. /// - /// The fetcher streams RLPx related requests on a per-peer basis to this type. This type will - /// then queue in the request and notify the fetcher once the result has been received. + /// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type + /// will then queue in the request and notify the fetcher once the result has been + /// received. state_fetcher: StateFetcher, } @@ -379,7 +380,7 @@ where /// Invoked when received a response from a connected peer. /// /// Delegates the response result to the fetcher which may return an outcome specific - /// instruction that needs to be handled in [Self::on_block_response_outcome]. This could be + /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { match resp { @@ -518,7 +519,7 @@ pub(crate) enum StateAction { /// The reported [`ForkId`] by this peer. fork_id: ForkId, }, - /// A new node was found through the discovery, possibly with a ForkId + /// A new node was found through the discovery, possibly with a `ForkId` DiscoveredNode { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, /// A peer was added PeerAdded(PeerId), @@ -547,7 +548,7 @@ mod tests { use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; - /// Returns a testing instance of the [NetworkState]. + /// Returns a testing instance of the [`NetworkState`]. fn state() -> NetworkState { let peers = PeersManager::default(); let handle = peers.handle(); diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 9dff7705e..df56aa702 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -36,7 +36,7 @@ use tracing::trace; /// Following diagram gives displays the dataflow contained in the [`Swarm`] /// /// The [`ConnectionListener`] yields incoming [`TcpStream`]s from peers that are spawned as session -/// tasks. After a successful RLPx authentication, the task is ready to accept ETH requests or +/// tasks. After a successful `RLPx` authentication, the task is ready to accept ETH requests or /// broadcast messages. A task listens for messages from the [`SessionManager`] which include /// broadcast messages like `Transactions` or internal commands, for example to disconnect the /// session. @@ -47,7 +47,7 @@ use tracing::trace; /// [`StateFetcher`], which receives request objects from the client interfaces responsible for /// downloading headers and bodies. /// -/// include_mmd!("docs/mermaid/swarm.mmd") +/// `include_mmd!("docs/mermaid/swarm.mmd`") #[derive(Debug)] #[must_use = "Swarm does nothing unless polled"] pub(crate) struct Swarm { @@ -71,7 +71,7 @@ impl Swarm { Self { incoming, sessions, state } } - /// Adds an additional protocol handler to the RLPx sub-protocol list. + /// Adds an additional protocol handler to the `RLPx` sub-protocol list. pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.sessions_mut().add_rlpx_sub_protocol(protocol); } @@ -273,7 +273,7 @@ where self.state_mut().peers_mut().on_shutdown(); } - /// Checks if the node's network connection state is 'ShuttingDown' + /// Checks if the node's network connection state is '`ShuttingDown`' #[inline] pub(crate) const fn is_shutting_down(&self) -> bool { self.state().peers().connection_state().is_shutting_down() diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index b72046a7f..32a4be5b2 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -2,11 +2,11 @@ use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; use reth_network_types::PeerId; use std::{net::SocketAddr, time::Duration}; -/// The timeout for tests that create a GethInstance +/// The timeout for tests that create a `GethInstance` pub const GETH_TIMEOUT: Duration = Duration::from_secs(60); -/// Obtains a PeerId from an ENR. In this case, the PeerId represents the public key contained in -/// the ENR. +/// Obtains a `PeerId` from an ENR. In this case, the `PeerId` represents the public key contained +/// in the ENR. pub fn enr_to_peer_id(enr: Enr) -> PeerId { // In the following tests, methods which accept a public key expect it to contain the public // key in its 64-byte encoded (uncompressed) form. @@ -50,8 +50,8 @@ pub fn unused_tcp_and_udp_port() -> u16 { } } -/// Creates two unused SocketAddrs, intended for use as the p2p (TCP) and discovery ports (UDP) for -/// new reth instances. +/// Creates two unused `SocketAddrs`, intended for use as the p2p (TCP) and discovery ports (UDP) +/// for new reth instances. pub fn unused_tcp_udp() -> (SocketAddr, SocketAddr) { (unused_tcp_addr(), unused_udp_addr()) } diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 4616acc6b..834f559b3 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -368,7 +368,7 @@ where self.network.local_addr() } - /// The [PeerId] of this peer. + /// The [`PeerId`] of this peer. pub fn peer_id(&self) -> PeerId { *self.network.peer_id() } @@ -440,7 +440,7 @@ impl Peer where C: BlockReader + HeaderProvider + Clone, { - /// Installs a new [TestPool] + /// Installs a new [`TestPool`] pub fn install_test_pool(&mut self) { self.install_transactions_manager(TestPoolBuilder::default().into()) } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 1ce5dc943..dc5548bd5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -74,12 +74,12 @@ use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_B /// Resolves with the result of each transaction import. pub type PoolImportFuture = Pin>> + Send + 'static>>; -/// Api to interact with [TransactionsManager] task. +/// Api to interact with [`TransactionsManager`] task. /// -/// This can be obtained via [TransactionsManager::handle] and can be used to manually interact with -/// the [TransactionsManager] task once it is spawned. +/// This can be obtained via [`TransactionsManager::handle`] and can be used to manually interact +/// with the [`TransactionsManager`] task once it is spawned. /// -/// For example [TransactionsHandle::get_peer_transaction_hashes] returns the transaction hashes +/// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes /// known by a specific peer. #[derive(Debug, Clone)] pub struct TransactionsHandle { @@ -211,11 +211,11 @@ pub struct TransactionsManager { /// The import process includes: /// - validation of the transactions, e.g. transaction is well formed: valid tx type, fees are /// valid, or for 4844 transaction the blobs are valid. See also - /// [EthTransactionValidator](reth_transaction_pool::validate::EthTransactionValidator) + /// [`EthTransactionValidator`](reth_transaction_pool::validate::EthTransactionValidator) /// - if the transaction is valid, it is added into the pool. /// /// Once the new transaction reaches the __pending__ state it will be emitted by the pool via - /// [TransactionPool::pending_transactions_listener] and arrive at the `pending_transactions` + /// [`TransactionPool::pending_transactions_listener`] and arrive at the `pending_transactions` /// receiver. pool_imports: FuturesUnordered, /// Stats on pending pool imports that help the node self-monitor. @@ -226,12 +226,12 @@ pub struct TransactionsManager { peers: HashMap, /// Send half for the command channel. /// - /// This is kept so that a new [TransactionsHandle] can be created at any time. + /// This is kept so that a new [`TransactionsHandle`] can be created at any time. command_tx: mpsc::UnboundedSender, /// Incoming commands from [`TransactionsHandle`]. /// /// This will only receive commands if a user manually sends a command to the manager through - /// the [TransactionsHandle] to interact with this type directly. + /// the [`TransactionsHandle`] to interact with this type directly. command_rx: UnboundedReceiverStream, /// A stream that yields new __pending__ transactions. /// @@ -244,7 +244,7 @@ pub struct TransactionsManager { pending_transactions: ReceiverStream, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver, - /// TransactionsManager metrics + /// `TransactionsManager` metrics metrics: TransactionsManagerMetrics, } @@ -404,7 +404,7 @@ where /// Propagate the transactions to all connected peers either as full objects or hashes. /// /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [NewPooledTransactionHashes] + /// See [`NewPooledTransactionHashes`] /// /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . fn propagate_transactions( @@ -1406,7 +1406,7 @@ impl FullTransactionsBuilder { self.transactions.push(Arc::clone(&transaction.transaction)); } - /// Returns whether or not any transactions are in the [FullTransactionsBuilder]. + /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. fn is_empty(&self) -> bool { self.transactions.is_empty() } diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index f7f5e9c92..414a33fd4 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -10,7 +10,7 @@ pub type BodyDownloaderResult = DownloadResult>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, -/// while a [BodiesClient][crate::bodies::client::BodiesClient] represents a client capable of +/// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. pub trait BodyDownloader: Send + Sync + Stream + Unpin { /// Method for setting the download range. diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index ec30b4c09..ae7bd1cf8 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -18,7 +18,7 @@ impl BlockResponse { } } - /// Calculates a heuristic for the in-memory size of the [BlockResponse]. + /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. #[inline] pub fn size(&self) -> usize { match self { diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 3a7c79a8d..33e0a9d6d 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -13,12 +13,12 @@ use tokio::sync::{mpsc, oneshot}; /// Result alias for result of a request. pub type RequestResult = Result; -/// Result with [PeerId][reth_network_types::PeerId] +/// Result with [`PeerId`][reth_network_types::PeerId] pub type PeerRequestResult = RequestResult>; /// Helper trait used to validate responses. pub trait EthResponseValidator { - /// Determine whether the response matches what we requested in [HeadersRequest] + /// Determine whether the response matches what we requested in [`HeadersRequest`] fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool; /// Return the response reputation impact if any @@ -50,14 +50,14 @@ impl EthResponseValidator for RequestResult> { } } - /// [RequestError::ChannelClosed] is not possible here since these errors are mapped to + /// [`RequestError::ChannelClosed`] is not possible here since these errors are mapped to /// `ConnectionDropped`, which will be handled when the dropped connection is cleaned up. /// - /// [RequestError::ConnectionDropped] should be ignored here because this is already handled + /// [`RequestError::ConnectionDropped`] should be ignored here because this is already handled /// when the dropped connection is handled. /// - /// [RequestError::UnsupportedCapability] is not used yet because we only support active session - /// for eth protocol. + /// [`RequestError::UnsupportedCapability`] is not used yet because we only support active + /// session for eth protocol. fn reputation_change_err(&self) -> Option { if let Err(err) = self { match err { diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 0c0140686..4795f7cd1 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -45,12 +45,12 @@ impl FullBlockClient where Client: BodiesClient + HeadersClient + Clone, { - /// Returns a future that fetches the [SealedBlock] for the given hash. + /// Returns a future that fetches the [`SealedBlock`] for the given hash. /// /// Note: this future is cancel safe /// /// Caution: This does no validation of body (transactions) response but guarantees that the - /// [SealedHeader] matches the requested hash. + /// [`SealedHeader`] matches the requested hash. pub fn get_full_block(&self, hash: B256) -> FetchFullBlockFuture { let client = self.client.clone(); FetchFullBlockFuture { @@ -65,12 +65,12 @@ where } } - /// Returns a future that fetches [SealedBlock]s for the given hash and count. + /// Returns a future that fetches [`SealedBlock`]s for the given hash and count. /// /// Note: this future is cancel safe /// /// Caution: This does no validation of body (transactions) responses but guarantees that - /// the starting [SealedHeader] matches the requested hash, and that the number of headers and + /// the starting [`SealedHeader`] matches the requested hash, and that the number of headers and /// bodies received matches the requested limit. /// /// The returned future yields bodies in falling order, i.e. with descending block numbers. @@ -130,7 +130,7 @@ where self.header.as_ref().map(|h| h.number) } - /// Returns the [SealedBlock] if the request is complete and valid. + /// Returns the [`SealedBlock`] if the request is complete and valid. fn take_block(&mut self) -> Option { if self.header.is_none() || self.body.is_none() { return None @@ -353,7 +353,7 @@ fn ensure_valid_body_response( /// This first fetches the headers for the given range using the inner `Client`. Once the request /// is complete, it will fetch the bodies for the headers it received. /// -/// Once the bodies request completes, the [SealedBlock]s will be assembled and the future will +/// Once the bodies request completes, the [`SealedBlock`]s will be assembled and the future will /// yield the full block range. /// /// The full block range will be returned with falling block numbers, i.e. in descending order. @@ -420,7 +420,7 @@ where self.pending_headers.iter().map(|h| h.hash()).collect() } - /// Returns the [SealedBlock]s if the request is complete and valid. + /// Returns the [`SealedBlock`]s if the request is complete and valid. /// /// The request is complete if the number of blocks requested is equal to the number of blocks /// received. The request is valid if the returned bodies match the roots in the headers. @@ -640,7 +640,7 @@ struct FullBlockRangeStream where Client: BodiesClient + HeadersClient, { - /// The inner [FetchFullBlockRangeFuture] that is polled. + /// The inner [`FetchFullBlockRangeFuture`] that is polled. inner: FetchFullBlockRangeFuture, /// The blocks that have been received so far. /// diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 16d5c1af8..a568c7313 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -6,16 +6,17 @@ use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, -/// while a [HeadersClient][crate::headers::client::HeadersClient] represents a client capable +/// while a [`HeadersClient`][crate::headers::client::HeadersClient] represents a client capable /// of fulfilling these requests. /// -/// A [HeaderDownloader] is a [Stream] that returns batches of headers. +/// A [`HeaderDownloader`] is a [Stream] that returns batches of headers. pub trait HeaderDownloader: Send + Sync + Stream>> + Unpin { /// Updates the gap to sync which ranges from local head to the sync target /// - /// See also [HeaderDownloader::update_sync_target] and [HeaderDownloader::update_local_head] + /// See also [`HeaderDownloader::update_sync_target`] and + /// [`HeaderDownloader::update_local_head`] fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { self.update_local_head(head); self.update_sync_target(target); @@ -31,7 +32,7 @@ pub trait HeaderDownloader: fn set_batch_size(&mut self, limit: usize); } -/// Specifies the target to sync for [HeaderDownloader::update_sync_target] +/// Specifies the target to sync for [`HeaderDownloader::update_sync_target`] #[derive(Debug, Clone, Eq, PartialEq)] pub enum SyncTarget { /// This represents a range missing headers in the form of `(head,..` @@ -57,8 +58,8 @@ pub enum SyncTarget { impl SyncTarget { /// Returns the tip to sync to _inclusively_ /// - /// This returns the hash if the target is [SyncTarget::Tip] or the `parent_hash` of the given - /// header in [SyncTarget::Gap] + /// This returns the hash if the target is [`SyncTarget::Tip`] or the `parent_hash` of the given + /// header in [`SyncTarget::Gap`] pub fn tip(&self) -> BlockHashOrNumber { match self { Self::Tip(tip) => (*tip).into(), diff --git a/crates/net/p2p/src/headers/mod.rs b/crates/net/p2p/src/headers/mod.rs index 56aabf9d6..d9e2859e5 100644 --- a/crates/net/p2p/src/headers/mod.rs +++ b/crates/net/p2p/src/headers/mod.rs @@ -4,7 +4,7 @@ pub mod client; /// A downloader that receives and verifies block headers, is generic -/// over the Consensus and the HeadersClient being used. +/// over the Consensus and the `HeadersClient` being used. /// /// [`Consensus`]: reth_consensus::Consensus /// [`HeadersClient`]: client::HeadersClient diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 310afc799..074ec6e08 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -35,7 +35,7 @@ pub mod headers; /// interacting with the network implementation pub mod error; -/// Priority enum for BlockHeader and BlockBody requests +/// Priority enum for `BlockHeader` and `BlockBody` requests pub mod priority; /// Syncing related traits. diff --git a/crates/net/p2p/src/priority.rs b/crates/net/p2p/src/priority.rs index bfa12f507..da01203e8 100644 --- a/crates/net/p2p/src/priority.rs +++ b/crates/net/p2p/src/priority.rs @@ -1,4 +1,4 @@ -/// BlockHeader and BodyHeader DownloadRequest priority +/// `BlockHeader` and `BodyHeader` `DownloadRequest` priority #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum Priority { /// Queued from the back for download requests. @@ -10,12 +10,12 @@ pub enum Priority { } impl Priority { - /// Returns `true` if this is [Priority::High] + /// Returns `true` if this is [`Priority::High`] pub const fn is_high(&self) -> bool { matches!(self, Self::High) } - /// Returns `true` if this is [Priority::Normal] + /// Returns `true` if this is [`Priority::Normal`] pub const fn is_normal(&self) -> bool { matches!(self, Self::Normal) } diff --git a/crates/net/p2p/src/sync.rs b/crates/net/p2p/src/sync.rs index 0242b797e..94d40dac0 100644 --- a/crates/net/p2p/src/sync.rs +++ b/crates/net/p2p/src/sync.rs @@ -49,7 +49,7 @@ impl SyncState { } } -/// A [NetworkSyncUpdater] implementation that does nothing. +/// A [`NetworkSyncUpdater`] implementation that does nothing. #[derive(Clone, Copy, Debug, Default)] #[non_exhaustive] pub struct NoopSyncStateUpdater; diff --git a/crates/net/types/src/lib.rs b/crates/net/types/src/lib.rs index b0d841235..327fd0c9c 100644 --- a/crates/net/types/src/lib.rs +++ b/crates/net/types/src/lib.rs @@ -32,15 +32,15 @@ pub use node_record::{NodeRecord, NodeRecordParseError}; /// See: const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4; -/// Converts a [secp256k1::PublicKey] to a [PeerId] by stripping the -/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [PeerId]. +/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`] by stripping the +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [`PeerId`]. #[inline] pub fn pk2id(pk: &PublicKey) -> PeerId { PeerId::from_slice(&pk.serialize_uncompressed()[1..]) } -/// Converts a [PeerId] to a [secp256k1::PublicKey] by prepending the [PeerId] bytes with the -/// SECP256K1_TAG_PUBKEY_UNCOMPRESSED tag. +/// Converts a [`PeerId`] to a [`secp256k1::PublicKey`] by prepending the [`PeerId`] bytes with the +/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag. #[inline] pub fn id2pk(id: PeerId) -> Result { // NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed @@ -51,7 +51,7 @@ pub fn id2pk(id: PeerId) -> Result { PublicKey::from_slice(&s) } -/// A peer that can come in ENR or [NodeRecord] form. +/// A peer that can come in ENR or [`NodeRecord`] form. #[derive( Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr, )] @@ -174,7 +174,7 @@ impl WithPeerId { WithPeerId(self.0, self.1.into()) } - /// Split the wrapper into [PeerId] and data tuple + /// Split the wrapper into [`PeerId`] and data tuple pub fn split(self) -> (PeerId, T) { (self.0, self.1) } diff --git a/crates/net/types/src/node_record.rs b/crates/net/types/src/node_record.rs index 898d0312c..ec1435670 100644 --- a/crates/net/types/src/node_record.rs +++ b/crates/net/types/src/node_record.rs @@ -1,4 +1,4 @@ -//! Commonly used NodeRecord type for peers. +//! Commonly used `NodeRecord` type for peers. use std::{ fmt, @@ -49,11 +49,11 @@ impl NodeRecord { } /// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped - /// [Ipv6Addr](std::net::Ipv6Addr). + /// [`Ipv6Addr`](std::net::Ipv6Addr). /// /// Returns `true` if the address was converted. /// - /// See also [std::net::Ipv6Addr::to_ipv4_mapped] + /// See also [`std::net::Ipv6Addr::to_ipv4_mapped`] pub fn convert_ipv4_mapped(&mut self) -> bool { // convert IPv4 mapped IPv6 address if let IpAddr::V6(v6) = self.address { @@ -65,7 +65,7 @@ impl NodeRecord { false } - /// Same as [Self::convert_ipv4_mapped] but consumes the type + /// Same as [`Self::convert_ipv4_mapped`] but consumes the type pub fn into_ipv4_mapped(mut self) -> Self { self.convert_ipv4_mapped(); self diff --git a/crates/node-core/src/args/dev.rs b/crates/node-core/src/args/dev.rs index 7670ccfd6..b6a017452 100644 --- a/crates/node-core/src/args/dev.rs +++ b/crates/node-core/src/args/dev.rs @@ -29,7 +29,7 @@ pub struct DevArgs { /// Interval between blocks. /// - /// Parses strings using [humantime::parse_duration] + /// Parses strings using [`humantime::parse_duration`] /// --dev.block-time 12s #[arg( long = "dev.block-time", diff --git a/crates/node-core/src/args/gas_price_oracle.rs b/crates/node-core/src/args/gas_price_oracle.rs index 63b5e4b76..edb3217c7 100644 --- a/crates/node-core/src/args/gas_price_oracle.rs +++ b/crates/node-core/src/args/gas_price_oracle.rs @@ -28,7 +28,7 @@ pub struct GasPriceOracleArgs { } impl GasPriceOracleArgs { - /// Returns a [GasPriceOracleConfig] from the arguments. + /// Returns a [`GasPriceOracleConfig`] from the arguments. pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig { let Self { blocks, ignore_price, max_price, percentile } = self; GasPriceOracleConfig { diff --git a/crates/node-core/src/args/log.rs b/crates/node-core/src/args/log.rs index b7e69e663..9f77e5555 100644 --- a/crates/node-core/src/args/log.rs +++ b/crates/node-core/src/args/log.rs @@ -72,7 +72,7 @@ pub struct LogArgs { } impl LogArgs { - /// Creates a [LayerInfo] instance. + /// Creates a [`LayerInfo`] instance. fn layer(&self, format: LogFormat, filter: String, use_color: bool) -> LayerInfo { LayerInfo::new( format, diff --git a/crates/node-core/src/args/mod.rs b/crates/node-core/src/args/mod.rs index bce63917b..270c757bc 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node-core/src/args/mod.rs @@ -8,7 +8,7 @@ pub use network::{DiscoveryArgs, NetworkArgs}; mod rpc_server; pub use rpc_server::RpcServerArgs; -/// RpcStateCacheArgs struct for configuring RPC state cache +/// `RpcStateCacheArgs` struct for configuring RPC state cache mod rpc_state_cache; pub use rpc_state_cache::RpcStateCacheArgs; @@ -27,7 +27,7 @@ pub use log::{ColorMode, LogArgs}; mod secret_key; pub use secret_key::{get_secret_key, SecretKeyError}; -/// PayloadBuilderArgs struct for configuring the payload builder +/// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index c0e2522a1..0ba5d6cd1 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -34,6 +34,7 @@ pub struct NetworkArgs { #[command(flatten)] pub discovery: DiscoveryArgs, + #[allow(clippy::doc_markdown)] /// Comma separated enode URLs of trusted peers for P2P connections. /// /// --trusted-peers enode://abcd@192.168.0.1:30303 @@ -99,8 +100,8 @@ pub struct NetworkArgs { /// Experimental, for usage in research. Sets the max accumulated byte size of transactions to /// request in one request. /// - /// Since RLPx protocol version 68, the byte size of a transaction is shared as metadata in a - /// transaction announcement (see RLPx specs). This allows a node to request a specific size + /// Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + /// transaction announcement (see `RLPx` specs). This allows a node to request a specific size /// response. /// /// By default, nodes request only 128 KiB worth of transactions, but should a peer request @@ -266,12 +267,12 @@ pub struct DiscoveryArgs { #[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by RLPx + /// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` /// address, if it's also IPv4. #[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)] pub discv5_addr: Option, - /// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by RLPx + /// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` /// address, if it's also IPv6. #[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)] pub discv5_addr_ipv6: Option, @@ -306,7 +307,7 @@ pub struct DiscoveryArgs { } impl DiscoveryArgs { - /// Apply the discovery settings to the given [NetworkConfigBuilder] + /// Apply the discovery settings to the given [`NetworkConfigBuilder`] pub fn apply_to_builder( &self, mut network_config_builder: NetworkConfigBuilder, diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index d8c3f2466..033c127e7 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -79,7 +79,7 @@ pub struct RpcServerArgs { #[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)] pub ws_port: u16, - /// Origins from which to accept WebSocket requests + /// Origins from which to accept `WebSocket` requests #[arg(id = "ws.origins", long = "ws.origins")] pub ws_allowed_origins: Option, @@ -438,7 +438,7 @@ impl Default for RpcServerArgs { } } -/// clap value parser for [RpcModuleSelection]. +/// clap value parser for [`RpcModuleSelection`]. #[derive(Clone, Debug, Default)] #[non_exhaustive] struct RpcModuleSelectionValueParser; diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 4f49bf134..4648f6500 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -31,7 +31,7 @@ pub fn parse_duration_from_secs(arg: &str) -> eyre::Result eyre::Result, eyre::Error> { Ok(match s { @@ -65,11 +65,11 @@ pub fn chain_help() -> String { format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", ")) } -/// Clap value parser for [ChainSpec]s. +/// Clap value parser for [`ChainSpec`]s. /// /// The value parser matches either a known chain, the path /// to a json file, or a json formatted string in-memory. The json can be either -/// a serialized [ChainSpec] or Genesis struct. +/// a serialized [`ChainSpec`] or Genesis struct. pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error> { Ok(match s { #[cfg(not(feature = "optimism"))] @@ -111,7 +111,7 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result, eyre::Error }) } -/// Parse [BlockHashOrNumber] +/// Parse [`BlockHashOrNumber`] pub fn hash_or_num_value_parser(value: &str) -> eyre::Result { match B256::from_str(value) { Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)), @@ -136,7 +136,7 @@ pub enum SocketAddressParsingError { Port(#[from] std::num::ParseIntError), } -/// Parse a [SocketAddr] from a `str`. +/// Parse a [`SocketAddr`] from a `str`. /// /// The following formats are checked: /// diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 0fb8fbf41..ce205eb07 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -14,7 +14,7 @@ use std::{borrow::Cow, path::PathBuf, time::Duration}; /// A trait that provides a configured RPC server. /// /// This provides all basic config values for the RPC server and is implemented by the -/// [RpcServerArgs](crate::args::RpcServerArgs) type. +/// [`RpcServerArgs`](crate::args::RpcServerArgs) type. pub trait RethRpcConfig { /// Returns whether ipc is enabled. fn is_ipc_enabled(&self) -> bool; @@ -37,10 +37,10 @@ pub trait RethRpcConfig { /// Extracts the gas price oracle config from the args. fn gas_price_oracle_config(&self) -> GasPriceOracleConfig; - /// Creates the [TransportRpcModuleConfig] from cli args. + /// Creates the [`TransportRpcModuleConfig`] from cli args. /// /// This sets all the api modules, and configures additional settings like gas price oracle - /// settings in the [TransportRpcModuleConfig]. + /// settings in the [`TransportRpcModuleConfig`]. fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig; /// Returns the default server builder for http/ws @@ -49,10 +49,10 @@ pub trait RethRpcConfig { /// Returns the default ipc server builder fn ipc_server_builder(&self) -> IpcServerBuilder; - /// Creates the [RpcServerConfig] from cli args. + /// Creates the [`RpcServerConfig`] from cli args. fn rpc_server_config(&self) -> RpcServerConfig; - /// Creates the [AuthServerConfig] from cli args. + /// Creates the [`AuthServerConfig`] from cli args. fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result; /// The execution layer and consensus layer clients SHOULD accept a configuration parameter: @@ -79,7 +79,7 @@ pub trait RethRpcConfig { /// A trait that provides payload builder settings. /// /// This provides all basic payload builder settings and is implemented by the -/// [PayloadBuilderArgs](crate::args::PayloadBuilderArgs) type. +/// [`PayloadBuilderArgs`](crate::args::PayloadBuilderArgs) type. pub trait PayloadBuilderConfig { /// Block extra data set by the payload builder. fn extradata(&self) -> Cow<'_, str>; @@ -105,13 +105,13 @@ pub trait PayloadBuilderConfig { /// A trait that represents the configured network and can be used to apply additional configuration /// to the network. pub trait RethNetworkConfig { - /// Adds a new additional protocol to the RLPx sub-protocol list. + /// Adds a new additional protocol to the `RLPx` sub-protocol list. /// - /// These additional protocols are negotiated during the RLPx handshake. + /// These additional protocols are negotiated during the `RLPx` handshake. /// If both peers share the same protocol, the corresponding handler will be included alongside /// the `eth` protocol. /// - /// See also [ProtocolHandler](reth_network::protocol::ProtocolHandler) + /// See also [`ProtocolHandler`](reth_network::protocol::ProtocolHandler) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol); /// Returns the secret key used for authenticating sessions. @@ -131,7 +131,7 @@ impl RethNetworkConfig for reth_network::NetworkManager { } /// A trait that provides all basic config values for the transaction pool and is implemented by the -/// [TxPoolArgs](crate::args::TxPoolArgs) type. +/// [`TxPoolArgs`](crate::args::TxPoolArgs) type. pub trait RethTransactionPoolConfig { /// Returns transaction pool configuration. fn pool_config(&self) -> PoolConfig; diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 22bfab0dd..6c84e6ef9 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -16,35 +16,35 @@ pub fn config_path_prefix(chain: Chain) -> String { /// Returns the path to the reth data directory. /// -/// Refer to [dirs_next::data_dir] for cross-platform behavior. +/// Refer to [`dirs_next::data_dir`] for cross-platform behavior. pub fn data_dir() -> Option { dirs_next::data_dir().map(|root| root.join("reth")) } /// Returns the path to the reth database. /// -/// Refer to [dirs_next::data_dir] for cross-platform behavior. +/// Refer to [`dirs_next::data_dir`] for cross-platform behavior. pub fn database_path() -> Option { data_dir().map(|root| root.join("db")) } /// Returns the path to the reth configuration directory. /// -/// Refer to [dirs_next::config_dir] for cross-platform behavior. +/// Refer to [`dirs_next::config_dir`] for cross-platform behavior. pub fn config_dir() -> Option { dirs_next::config_dir().map(|root| root.join("reth")) } /// Returns the path to the reth cache directory. /// -/// Refer to [dirs_next::cache_dir] for cross-platform behavior. +/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior. pub fn cache_dir() -> Option { dirs_next::cache_dir().map(|root| root.join("reth")) } /// Returns the path to the reth logs directory. /// -/// Refer to [dirs_next::cache_dir] for cross-platform behavior. +/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior. pub fn logs_dir() -> Option { cache_dir().map(|root| root.join("logs")) } @@ -65,7 +65,7 @@ impl XdgPath for DataDirPath { /// Returns the path to the reth logs directory. /// -/// Refer to [dirs_next::cache_dir] for cross-platform behavior. +/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior. #[derive(Clone, Copy, Debug, Default)] #[non_exhaustive] pub struct LogsDir; @@ -86,7 +86,7 @@ pub trait XdgPath { /// A wrapper type that either parses a user-given path or defaults to an /// OS-specific path. /// -/// The [FromStr] implementation supports shell expansions and common patterns such as `~` for the +/// The [`FromStr`] implementation supports shell expansions and common patterns such as `~` for the /// home directory. /// /// # Example @@ -171,7 +171,7 @@ impl PlatformPath { } } -/// An Optional wrapper type around [PlatformPath]. +/// An Optional wrapper type around [`PlatformPath`]. /// /// This is useful for when a path is optional, such as the `--data-dir` flag. #[derive(Clone, Debug, PartialEq, Eq)] @@ -250,7 +250,7 @@ impl From for MaybePlatformPath { } } -/// Wrapper type around PlatformPath that includes a `Chain`, used for separating reth data for +/// Wrapper type around `PlatformPath` that includes a `Chain`, used for separating reth data for /// different networks. /// /// If the chain is either mainnet, goerli, or sepolia, then the path will be: @@ -283,7 +283,7 @@ impl ChainPath { self.data_dir().join("db") } - /// Returns the path to the static_files directory for this chain. + /// Returns the path to the `static_files` directory for this chain. /// /// `

//static_files` pub fn static_files(&self) -> PathBuf { diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/node-core/src/engine/engine_store.rs index c8d6146e9..21e3c370a 100644 --- a/crates/node-core/src/engine/engine_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -24,14 +24,14 @@ use tracing::*; pub enum StoredEngineApiMessage { /// The on-disk representation of an `engine_forkchoiceUpdated` method call. ForkchoiceUpdated { - /// The [ForkchoiceState] sent in the persisted call. + /// The [`ForkchoiceState`] sent in the persisted call. state: ForkchoiceState, /// The payload attributes sent in the persisted call, if any. payload_attrs: Option, }, /// The on-disk representation of an `engine_newPayload` method call. NewPayload { - /// The [ExecutionPayload] sent in the persisted call. + /// The [`ExecutionPayload`] sent in the persisted call. payload: ExecutionPayload, /// The Cancun-specific fields sent in the persisted call, if any. cancun_fields: Option, @@ -46,14 +46,14 @@ pub struct EngineMessageStore { } impl EngineMessageStore { - /// Creates a new [EngineMessageStore] at the given path. + /// Creates a new [`EngineMessageStore`] at the given path. /// /// The path is expected to be a directory, where individual message JSON files will be stored. pub const fn new(path: PathBuf) -> Self { Self { path } } - /// Stores the received [BeaconEngineMessage] to disk, appending the `received_at` time to the + /// Stores the received [`BeaconEngineMessage`] to disk, appending the `received_at` time to the /// path. pub fn on_message( &self, diff --git a/crates/node-core/src/engine/mod.rs b/crates/node-core/src/engine/mod.rs index 2c4e12e68..38825b890 100644 --- a/crates/node-core/src/engine/mod.rs +++ b/crates/node-core/src/engine/mod.rs @@ -19,7 +19,7 @@ use skip_new_payload::EngineSkipNewPayload; pub trait EngineMessageStreamExt: Stream> { - /// Skips the specified number of [BeaconEngineMessage::ForkchoiceUpdated] messages from the + /// Skips the specified number of [`BeaconEngineMessage::ForkchoiceUpdated`] messages from the /// engine message stream. fn skip_fcu(self, count: usize) -> EngineSkipFcu where @@ -29,7 +29,7 @@ pub trait EngineMessageStreamExt: } /// If the count is [Some], returns the stream that skips the specified number of - /// [BeaconEngineMessage::ForkchoiceUpdated] messages. Otherwise, returns `Self`. + /// [`BeaconEngineMessage::ForkchoiceUpdated`] messages. Otherwise, returns `Self`. fn maybe_skip_fcu(self, maybe_count: Option) -> Either, Self> where Self: Sized, @@ -41,7 +41,7 @@ pub trait EngineMessageStreamExt: } } - /// Skips the specified number of [BeaconEngineMessage::NewPayload] messages from the + /// Skips the specified number of [`BeaconEngineMessage::NewPayload`] messages from the /// engine message stream. fn skip_new_payload(self, count: usize) -> EngineSkipNewPayload where @@ -51,7 +51,7 @@ pub trait EngineMessageStreamExt: } /// If the count is [Some], returns the stream that skips the specified number of - /// [BeaconEngineMessage::NewPayload] messages. Otherwise, returns `Self`. + /// [`BeaconEngineMessage::NewPayload`] messages. Otherwise, returns `Self`. fn maybe_skip_new_payload( self, maybe_count: Option, diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/node-core/src/engine/skip_fcu.rs index 7a76e8009..d63894a5c 100644 --- a/crates/node-core/src/engine/skip_fcu.rs +++ b/crates/node-core/src/engine/skip_fcu.rs @@ -21,7 +21,7 @@ pub struct EngineSkipFcu { } impl EngineSkipFcu { - /// Creates new [EngineSkipFcu] stream wrapper. + /// Creates new [`EngineSkipFcu`] stream wrapper. pub const fn new(stream: S, threshold: usize) -> Self { Self { stream, diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/node-core/src/engine/skip_new_payload.rs index 084b0b781..04db70e0a 100644 --- a/crates/node-core/src/engine/skip_new_payload.rs +++ b/crates/node-core/src/engine/skip_new_payload.rs @@ -22,7 +22,7 @@ pub struct EngineSkipNewPayload { } impl EngineSkipNewPayload { - /// Creates new [EngineSkipNewPayload] stream wrapper. + /// Creates new [`EngineSkipNewPayload`] stream wrapper. pub const fn new(stream: S, threshold: usize) -> Self { Self { stream, threshold, skipped: 0 } } diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 41dbb18ad..f4519846b 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -68,7 +68,7 @@ pub static PROMETHEUS_RECORDER_HANDLE: Lazy = /// ``` /// /// This can also be used to launch a node with a temporary test database. This can be done with -/// the [NodeConfig::test] method. +/// the [`NodeConfig::test`] method. /// /// # Example /// ```rust @@ -118,11 +118,11 @@ pub struct NodeConfig { /// port numbers that conflict with each other. /// /// Changes to the following port numbers: - /// - DISCOVERY_PORT: default + `instance` - 1 - /// - DISCOVERY_V5_PORT: default + `instance` - 1 - /// - AUTH_PORT: default + `instance` * 100 - 100 - /// - HTTP_RPC_PORT: default - `instance` + 1 - /// - WS_RPC_PORT: default + `instance` * 2 - 2 + /// - `DISCOVERY_PORT`: default + `instance` - 1 + /// - `DISCOVERY_V5_PORT`: default + `instance` - 1 + /// - `AUTH_PORT`: default + `instance` * 100 - 100 + /// - `HTTP_RPC_PORT`: default - `instance` + 1 + /// - `WS_RPC_PORT`: default + `instance` * 2 - 2 pub instance: u16, /// All networking related arguments @@ -151,7 +151,7 @@ pub struct NodeConfig { } impl NodeConfig { - /// Creates a testing [NodeConfig], causing the database to be launched ephemerally. + /// Creates a testing [`NodeConfig`], causing the database to be launched ephemerally. pub fn test() -> Self { Self::default() // set all ports to zero by default for test instances @@ -170,7 +170,7 @@ impl NodeConfig { self } - /// Set the [ChainSpec] for the node + /// Set the [`ChainSpec`] for the node pub fn with_chain(mut self, chain: impl Into>) -> Self { self.chain = chain.into(); self @@ -293,7 +293,7 @@ impl NodeConfig { Ok(max_block) } - /// Create the [NetworkConfig] for the node + /// Create the [`NetworkConfig`] for the node pub fn network_config( &self, config: &Config, @@ -308,7 +308,7 @@ impl NodeConfig { Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path)) } - /// Create the [NetworkBuilder]. + /// Create the [`NetworkBuilder`]. /// /// This only configures it and does not spawn it. pub async fn build_network( @@ -327,7 +327,7 @@ impl NodeConfig { Ok(builder) } - /// Loads 'MAINNET_KZG_TRUSTED_SETUP' + /// Loads '`MAINNET_KZG_TRUSTED_SETUP`' pub fn kzg_settings(&self) -> eyre::Result> { Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) } @@ -447,7 +447,7 @@ impl NodeConfig { } } - /// Builds the [NetworkConfig] with the given [ProviderFactory]. + /// Builds the [`NetworkConfig`] with the given [`ProviderFactory`]. pub fn load_network_config( &self, config: &Config, @@ -507,7 +507,7 @@ impl NodeConfig { } /// Change rpc port numbers based on the instance number, using the inner - /// [RpcServerArgs::adjust_instance_ports] method. + /// [`RpcServerArgs::adjust_instance_ports`] method. pub fn adjust_instance_ports(&mut self) { self.rpc.adjust_instance_ports(self.instance); } diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index eba840b6e..dd1e577a0 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -37,8 +37,8 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result(network: &NetworkManager, persistent_peers_file: Option) where C: BlockReader + Unpin, diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 355a7ecab..8ea4d864e 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -26,7 +26,7 @@ pub trait NodeTypes: Send + Sync + 'static { type Engine: EngineTypes; } -/// A helper trait that is downstream of the [NodeTypes] trait and adds stateful components to the +/// A helper trait that is downstream of the [`NodeTypes`] trait and adds stateful components to the /// node. /// /// Its types are configured by node internally and are not intended to be user configurable. diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 0e948dfd2..571d36b98 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -47,38 +47,38 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter { builder: Builder, task_executor: TaskExecutor, @@ -269,7 +270,7 @@ where /// /// This bootstraps the node internals, creates all the components with the given [Node] /// - /// Returns a [NodeHandle] that can be used to interact with the node. + /// Returns a [`NodeHandle`] that can be used to interact with the node. pub async fn launch_node( self, node: N, @@ -378,11 +379,11 @@ where } } - /// Installs an ExEx (Execution Extension) in the node. + /// Installs an `ExEx` (Execution Extension) in the node. /// /// # Note /// - /// The ExEx ID must be unique. + /// The `ExEx` ID must be unique. pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self where F: FnOnce(ExExContext, CB::Components>>) -> R @@ -433,7 +434,7 @@ pub struct BuilderContext { } impl BuilderContext { - /// Create a new instance of [BuilderContext] + /// Create a new instance of [`BuilderContext`] pub const fn new( head: Head, provider: Node::Provider, @@ -505,7 +506,7 @@ impl BuilderContext { ) } - /// Creates the [NetworkBuilder] for the node. + /// Creates the [`NetworkBuilder`] for the node. pub async fn network_builder(&self) -> eyre::Result> { self.config .build_network( @@ -520,8 +521,8 @@ impl BuilderContext { /// Convenience function to start the network. /// - /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected - /// to that network. + /// Spawns the configured network and associated tasks and returns the [`NetworkHandle`] + /// connected to that network. pub fn start_network( &self, builder: NetworkBuilder, diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index fee2ab493..00a587611 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -197,11 +197,11 @@ impl> NodeBuilderWithComponents(mut self, exex_id: impl Into, exex: F) -> Self where F: FnOnce(ExExContext>) -> R + Send + 'static, @@ -230,10 +230,10 @@ impl> NodeBuilderWithComponents { - /// Additional NodeHooks that are called at specific points in the node's launch lifecycle. + /// Additional `NodeHooks` that are called at specific points in the node's launch lifecycle. pub(crate) hooks: NodeHooks, /// Additional RPC hooks. pub(crate) rpc: RpcHooks, - /// The ExExs (execution extensions) of the node. + /// The `ExExs` (execution extensions) of the node. pub(crate) exexs: Vec<(String, Box>)>, } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index abeb2ca05..85c3f47e4 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,4 +1,4 @@ -//! A generic [NodeComponentsBuilder] +//! A generic [`NodeComponentsBuilder`] use crate::{ components::{ @@ -115,7 +115,7 @@ where { /// Configures the pool builder. /// - /// This accepts a [PoolBuilder] instance that will be used to create the node's transaction + /// This accepts a [`PoolBuilder`] instance that will be used to create the node's transaction /// pool. pub fn pool( self, @@ -149,7 +149,7 @@ where { /// Configures the network builder. /// - /// This accepts a [NetworkBuilder] instance that will be used to create the node's network + /// This accepts a [`NetworkBuilder`] instance that will be used to create the node's network /// stack. pub fn network( self, @@ -176,7 +176,7 @@ where /// Configures the payload builder. /// - /// This accepts a [PayloadServiceBuilder] instance that will be used to create the node's + /// This accepts a [`PayloadServiceBuilder`] instance that will be used to create the node's /// payload builder service. pub fn payload( self, @@ -203,8 +203,8 @@ where /// Configures the executor builder. /// - /// This accepts a [ExecutorBuilder] instance that will be used to create the node's components - /// for execution. + /// This accepts a [`ExecutorBuilder`] instance that will be used to create the node's + /// components for execution. pub fn executor( self, executor_builder: EB, @@ -271,9 +271,9 @@ impl Default for ComponentsBuilder<(), (), (), (), ()> { /// A type that configures all the customizable components of the node and knows how to build them. /// /// Implementers of this trait are responsible for building all the components of the node: See -/// [NodeComponents]. +/// [`NodeComponents`]. /// -/// The [ComponentsBuilder] is a generic, general purpose implementation of this trait that can be +/// The [`ComponentsBuilder`] is a generic, general purpose implementation of this trait that can be /// used to customize certain components of the node using the builder pattern and defaults, e.g. /// Ethereum and Optimism. /// A type that's responsible for building the components of the node. diff --git a/crates/node/builder/src/components/payload.rs b/crates/node/builder/src/components/payload.rs index bf5eb13ed..b24026a2c 100644 --- a/crates/node/builder/src/components/payload.rs +++ b/crates/node/builder/src/components/payload.rs @@ -9,7 +9,7 @@ use std::future::Future; pub trait PayloadServiceBuilder: Send { /// Spawns the payload service and returns the handle to it. /// - /// The [BuilderContext] is provided to allow access to the node's configuration. + /// The [`BuilderContext`] is provided to allow access to the node's configuration. fn spawn_payload_service( self, ctx: &BuilderContext, diff --git a/crates/node/builder/src/exex.rs b/crates/node/builder/src/exex.rs index ae9ace8cd..0c8f1f548 100644 --- a/crates/node/builder/src/exex.rs +++ b/crates/node/builder/src/exex.rs @@ -4,11 +4,11 @@ use reth_exex::ExExContext; use reth_node_api::FullNodeComponents; use std::future::Future; -/// A trait for launching an ExEx. +/// A trait for launching an `ExEx`. trait LaunchExEx: Send { - /// Launches the ExEx. + /// Launches the `ExEx`. /// - /// The ExEx should be able to run independently and emit events on the channels provided in + /// The `ExEx` should be able to run independently and emit events on the channels provided in /// the [`ExExContext`]. fn launch( self, @@ -18,15 +18,15 @@ trait LaunchExEx: Send { type BoxExEx = BoxFuture<'static, eyre::Result<()>>; -/// A version of [LaunchExEx] that returns a boxed future. Makes the trait object-safe. +/// A version of [`LaunchExEx`] that returns a boxed future. Makes the trait object-safe. pub(crate) trait BoxedLaunchExEx: Send { fn launch(self: Box, ctx: ExExContext) -> BoxFuture<'static, eyre::Result>; } -/// Implements [BoxedLaunchExEx] for any [LaunchExEx] that is [Send] and `'static`. +/// Implements [`BoxedLaunchExEx`] for any [`LaunchExEx`] that is [Send] and `'static`. /// -/// Returns a [BoxFuture] that resolves to a [BoxExEx]. +/// Returns a [`BoxFuture`] that resolves to a [`BoxExEx`]. impl BoxedLaunchExEx for E where E: LaunchExEx + Send + 'static, @@ -44,8 +44,8 @@ where } } -/// Implements `LaunchExEx` for any closure that takes an [ExExContext] and returns a future -/// resolving to an ExEx. +/// Implements `LaunchExEx` for any closure that takes an [`ExExContext`] and returns a future +/// resolving to an `ExEx`. impl LaunchExEx for F where Node: FullNodeComponents, diff --git a/crates/node/builder/src/hooks.rs b/crates/node/builder/src/hooks.rs index 468c84e85..86c4b56c6 100644 --- a/crates/node/builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -10,7 +10,7 @@ pub(crate) struct NodeHooks { } impl NodeHooks { - /// Creates a new, empty [NodeHooks] instance for the given node type. + /// Creates a new, empty [`NodeHooks`] instance for the given node type. pub(crate) fn new() -> Self { Self { on_component_initialized: Box::<()>::default(), diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 1ec101925..646ce28a3 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -107,7 +107,7 @@ impl LaunchContext { Ok(()) } - /// Convenience function to [Self::configure_globals] + /// Convenience function to [`Self::configure_globals`] pub fn with_configured_globals(self) -> Self { self.configure_globals(); self @@ -141,7 +141,7 @@ impl LaunchContext { } } -/// A [LaunchContext] along with an additional value. +/// A [`LaunchContext`] along with an additional value. /// /// This can be used to sequentially attach additional values to the type during the launch process. /// @@ -239,22 +239,22 @@ impl LaunchContextWith> { self } - /// Returns the attached [NodeConfig]. + /// Returns the attached [`NodeConfig`]. pub const fn node_config(&self) -> &NodeConfig { &self.left().config } - /// Returns the attached [NodeConfig]. + /// Returns the attached [`NodeConfig`]. pub fn node_config_mut(&mut self) -> &mut NodeConfig { &mut self.left_mut().config } - /// Returns the attached toml config [reth_config::Config]. + /// Returns the attached toml config [`reth_config::Config`]. pub const fn toml_config(&self) -> &reth_config::Config { &self.left().toml_config } - /// Returns the attached toml config [reth_config::Config]. + /// Returns the attached toml config [`reth_config::Config`]. pub fn toml_config_mut(&mut self) -> &mut reth_config::Config { &mut self.left_mut().toml_config } @@ -279,17 +279,17 @@ impl LaunchContextWith> { self.node_config().dev.dev } - /// Returns the configured [PruneConfig] + /// Returns the configured [`PruneConfig`] pub fn prune_config(&self) -> Option { self.toml_config().prune.clone().or_else(|| self.node_config().prune_config()) } - /// Returns the configured [PruneModes] + /// Returns the configured [`PruneModes`] pub fn prune_modes(&self) -> Option { self.prune_config().map(|config| config.segments) } - /// Returns an initialized [PrunerBuilder] based on the configured [PruneConfig] + /// Returns an initialized [`PrunerBuilder`] based on the configured [`PruneConfig`] pub fn pruner_builder(&self) -> PrunerBuilder { PrunerBuilder::new(self.prune_config().unwrap_or_default()) .prune_delete_limit(self.chain_spec().prune_delete_limit) @@ -313,7 +313,7 @@ impl LaunchContextWith> { Ok(secret) } - /// Returns the [MiningMode] intended for --dev mode. + /// Returns the [`MiningMode`] intended for --dev mode. pub fn dev_mining_mode(&self, pending_transactions_listener: Receiver) -> MiningMode { if let Some(interval) = self.node_config().dev.block_time { MiningMode::interval(interval) @@ -329,7 +329,7 @@ impl LaunchContextWith> where DB: Database + Clone + 'static, { - /// Returns the [ProviderFactory] for the attached storage after executing a consistent check + /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** pub async fn create_provider_factory(&self) -> eyre::Result> { @@ -397,7 +397,7 @@ where Ok(factory) } - /// Creates a new [ProviderFactory] and attaches it to the launch context. + /// Creates a new [`ProviderFactory`] and attaches it to the launch context. pub async fn with_provider_factory( self, ) -> eyre::Result>>> { @@ -420,7 +420,7 @@ where self.right().db_ref() } - /// Returns the configured ProviderFactory. + /// Returns the configured `ProviderFactory`. pub const fn provider_factory(&self) -> &ProviderFactory { self.right() } @@ -430,7 +430,7 @@ where self.right().static_file_provider() } - /// Creates a new [StaticFileProducer] with the attached database. + /// Creates a new [`StaticFileProducer`] with the attached database. pub fn static_file_producer(&self) -> StaticFileProducer { StaticFileProducer::new( self.provider_factory().clone(), @@ -439,7 +439,7 @@ where ) } - /// Convenience function to [Self::init_genesis] + /// Convenience function to [`Self::init_genesis`] pub fn with_genesis(self) -> Result { init_genesis(self.provider_factory().clone())?; Ok(self) @@ -459,7 +459,7 @@ where self.node_config().max_block(client, self.provider_factory().clone()).await } - /// Convenience function to [Self::start_prometheus_endpoint] + /// Convenience function to [`Self::start_prometheus_endpoint`] pub async fn with_prometheus(self) -> eyre::Result { self.start_prometheus_endpoint().await?; Ok(self) @@ -538,7 +538,7 @@ impl Attached { } } -/// Helper container type to bundle the initial [NodeConfig] and the loaded settings from the +/// Helper container type to bundle the initial [`NodeConfig`] and the loaded settings from the /// reth.toml config #[derive(Debug, Clone)] pub struct WithConfigs { diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 35dc70059..2b2be40d2 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -48,7 +48,7 @@ pub use common::LaunchContext; /// /// This is essentially the launch logic for a node. /// -/// See also [DefaultNodeLauncher] and [NodeBuilderWithComponents::launch_with] +/// See also [`DefaultNodeLauncher`] and [`NodeBuilderWithComponents::launch_with`] pub trait LaunchNode { /// The node type that is created. type Node; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 9946f890c..6976b0941 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -19,14 +19,14 @@ use std::sync::Arc; use crate::components::NodeComponentsBuilder; pub use reth_node_api::{FullNodeTypes, NodeTypes}; -/// A [crate::Node] is a [NodeTypes] that comes with preconfigured components. +/// A [`crate::Node`] is a [`NodeTypes`] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. pub trait Node: NodeTypes + Clone { /// The type that builds the node's components. type ComponentsBuilder: NodeComponentsBuilder; - /// Returns a [NodeComponentsBuilder] for the node. + /// Returns a [`NodeComponentsBuilder`] for the node. fn components_builder(self) -> Self::ComponentsBuilder; } @@ -58,36 +58,36 @@ pub struct FullNode { } impl FullNode { - /// Returns the [ChainSpec] of the node. + /// Returns the [`ChainSpec`] of the node. pub fn chain_spec(&self) -> Arc { self.provider.chain_spec() } - /// Returns the [RpcServerHandle] to the started rpc server. + /// Returns the [`RpcServerHandle`] to the started rpc server. pub const fn rpc_server_handle(&self) -> &RpcServerHandle { &self.rpc_server_handles.rpc } - /// Returns the [AuthServerHandle] to the started authenticated engine API server. + /// Returns the [`AuthServerHandle`] to the started authenticated engine API server. pub const fn auth_server_handle(&self) -> &AuthServerHandle { &self.rpc_server_handles.auth } - /// Returns the [EngineApiClient] interface for the authenticated engine API. + /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated http requests to the node's auth server. pub fn engine_http_client(&self) -> impl EngineApiClient { self.auth_server_handle().http_client() } - /// Returns the [EngineApiClient] interface for the authenticated engine API. + /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated ws requests to the node's auth server. pub async fn engine_ws_client(&self) -> impl EngineApiClient { self.auth_server_handle().ws_client().await } - /// Returns the [EngineApiClient] interface for the authenticated engine API. + /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send not authenticated IPC requests to the node's auth server. #[cfg(unix)] diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 81afe9bb1..0dabf0dce 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -41,7 +41,7 @@ pub(crate) struct RpcHooks { } impl RpcHooks { - /// Creates a new, empty [RpcHooks] instance for the given node type. + /// Creates a new, empty [`RpcHooks`] instance for the given node type. pub(crate) fn new() -> Self { Self { on_rpc_started: Box::<()>::default(), extend_rpc_modules: Box::<()>::default() } } @@ -150,7 +150,7 @@ impl ExtendRpcModules for () { } } -/// Helper wrapper type to encapsulate the [RethModuleRegistry] over components trait. +/// Helper wrapper type to encapsulate the [`RethModuleRegistry`] over components trait. #[derive(Debug)] pub struct RpcRegistry { pub(crate) registry: RethModuleRegistry< @@ -190,11 +190,12 @@ impl Clone for RpcRegistry { } } -/// Helper container to encapsulate [RethModuleRegistry], [TransportRpcModules] and [AuthRpcModule]. +/// Helper container to encapsulate [`RethModuleRegistry`], [`TransportRpcModules`] and +/// [`AuthRpcModule`]. /// /// This can be used to access installed modules, or create commonly used handlers like -/// [reth_rpc::EthApi], and ultimately merge additional rpc handler into the configured transport -/// modules [TransportRpcModules] as well as configured authenticated methods [AuthRpcModule]. +/// [`reth_rpc::EthApi`], and ultimately merge additional rpc handler into the configured transport +/// modules [`TransportRpcModules`] as well as configured authenticated methods [`AuthRpcModule`]. #[allow(missing_debug_implementations)] pub struct RpcContext<'a, Node: FullNodeComponents> { /// The node components. @@ -205,12 +206,12 @@ pub struct RpcContext<'a, Node: FullNodeComponents> { /// A Helper type the holds instances of the configured modules. /// - /// This provides easy access to rpc handlers, such as [RethModuleRegistry::eth_api]. + /// This provides easy access to rpc handlers, such as [`RethModuleRegistry::eth_api`]. pub registry: &'a mut RpcRegistry, /// Holds installed modules per transport type. /// /// This can be used to merge additional modules into the configured transports (http, ipc, - /// ws). See [TransportRpcModules::merge_configured] + /// ws). See [`TransportRpcModules::merge_configured`] pub modules: &'a mut TransportRpcModules, /// Holds jwt authenticated rpc module. /// diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index cce709e84..427164d7d 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -74,7 +74,7 @@ where Ok(pipeline) } -/// Builds the [Pipeline] with the given [ProviderFactory] and downloaders. +/// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders. #[allow(clippy::too_many_arguments)] pub async fn build_pipeline( node_config: &NodeConfig, diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 6d9c2cb99..47f1de668 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -19,7 +19,7 @@ const NO_TRANSITION_CONFIG_EXCHANGED_PERIOD: Duration = Duration::from_secs(120) /// after which the warning is issued. const NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD: Duration = Duration::from_secs(120); -/// A Stream of [ConsensusLayerHealthEvent]. +/// A Stream of [`ConsensusLayerHealthEvent`]. pub struct ConsensusLayerHealthEvents { interval: Interval, canon_chain: Box, @@ -32,7 +32,7 @@ impl fmt::Debug for ConsensusLayerHealthEvents { } impl ConsensusLayerHealthEvents { - /// Creates a new [ConsensusLayerHealthEvents] with the given canonical chain tracker. + /// Creates a new [`ConsensusLayerHealthEvents`] with the given canonical chain tracker. pub fn new(canon_chain: Box) -> Self { // Skip the first tick to prevent the false `ConsensusLayerHealthEvent::NeverSeen` event. let interval = tokio::time::interval_at(Instant::now() + CHECK_INTERVAL, CHECK_INTERVAL); diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 00d3517fc..bd1e2506e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -37,7 +37,7 @@ const INFO_MESSAGE_INTERVAL: Duration = Duration::from_secs(25); struct NodeState { /// Database environment. /// Used for freelist calculation reported in the "Status" log message. - /// See [EventHandler::poll]. + /// See [`EventHandler::poll`]. db: DB, /// Connection to the network. network: Option, @@ -394,7 +394,7 @@ pub enum NodeEvent { ConsensusLayerHealth(ConsensusLayerHealthEvent), /// A pruner event Pruner(PrunerEvent), - /// A static_file_producer event + /// A `static_file_producer` event StaticFileProducer(StaticFileProducerEvent), /// Used to encapsulate various conditions or situations that do not /// naturally fit into the other more specific variants. @@ -643,8 +643,8 @@ impl Eta { /// Format ETA for a given stage. /// /// NOTE: Currently ETA is enabled only for the stages that have predictable progress. - /// It's not the case for network-dependent ([StageId::Headers] and [StageId::Bodies]) and - /// [StageId::Execution] stages. + /// It's not the case for network-dependent ([`StageId::Headers`] and [`StageId::Bodies`]) and + /// [`StageId::Execution`] stages. fn fmt_for_stage(&self, stage: StageId) -> Option { if !self.is_available() || matches!(stage, StageId::Headers | StageId::Bodies | StageId::Execution) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index a9f07f23c..5ac9cf924 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -31,11 +31,11 @@ pub struct OptimismBeaconConsensus { } impl OptimismBeaconConsensus { - /// Create a new instance of [OptimismBeaconConsensus] + /// Create a new instance of [`OptimismBeaconConsensus`] /// /// # Panics /// - /// If given chain spec is not optimism [ChainSpec::is_optimism] + /// If given chain spec is not optimism [`ChainSpec::is_optimism`] pub fn new(chain_spec: Arc) -> Self { assert!(chain_spec.is_optimism(), "optimism consensus only valid for optimism chains"); Self { chain_spec } diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 62b87678b..6ad5b9681 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -20,10 +20,10 @@ const CREATE_2_DEPLOYER_CODEHASH: B256 = /// The raw bytecode of the create2 deployer contract. const CREATE_2_DEPLOYER_BYTECODE: [u8; 1584] = hex!("6080604052600436106100435760003560e01c8063076c37b21461004f578063481286e61461007157806356299481146100ba57806366cfa057146100da57600080fd5b3661004a57005b600080fd5b34801561005b57600080fd5b5061006f61006a366004610327565b6100fa565b005b34801561007d57600080fd5b5061009161008c366004610327565b61014a565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100c657600080fd5b506100916100d5366004610349565b61015d565b3480156100e657600080fd5b5061006f6100f53660046103ca565b610172565b61014582826040518060200161010f9061031a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052610183565b505050565b600061015683836102e7565b9392505050565b600061016a8484846102f0565b949350505050565b61017d838383610183565b50505050565b6000834710156101f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b815160000361025f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f60448201526064016101eb565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610156576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f790000000000000060448201526064016101eb565b60006101568383305b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b61014e806104ad83390190565b6000806040838503121561033a57600080fd5b50508035926020909101359150565b60008060006060848603121561035e57600080fd5b8335925060208401359150604084013573ffffffffffffffffffffffffffffffffffffffff8116811461039057600080fd5b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156103df57600080fd5b8335925060208401359150604084013567ffffffffffffffff8082111561040557600080fd5b818601915086601f83011261041957600080fd5b81358181111561042b5761042b61039b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104715761047161039b565b8160405282815289602084870101111561048a57600080fd5b826020860160208301376000602084830101528095505050505050925092509256fe608060405234801561001057600080fd5b5061012e806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063249cb3fa14602d575b600080fd5b603c603836600460b1565b604e565b60405190815260200160405180910390f35b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915281205460ff16608857600060aa565b7fa2ef4600d742022d532d4747cb3547474667d6f13804902513b2ec01c848f4b45b9392505050565b6000806040838503121560c357600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811460ed57600080fd5b80915050925092905056fea26469706673582212205ffd4e6cede7d06a5daf93d48d0541fc68189eeb16608c1999a82063b666eb1164736f6c63430008130033a2646970667358221220fdc4a0fe96e3b21c108ca155438d37c9143fb01278a3c1d274948bad89c564ba64736f6c63430008130033"); -/// The function selector of the "setL1BlockValuesEcotone" function in the L1Block contract. +/// The function selector of the "setL1BlockValuesEcotone" function in the `L1Block` contract. const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); -/// Extracts the [L1BlockInfo] from the L2 block. The L1 info transaction is always the first +/// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. @@ -52,7 +52,7 @@ pub fn extract_l1_info(block: &Block) -> Result Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: @@ -94,7 +94,7 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result( args: RollupArgs, ) -> ComponentsBuilder< diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index d3d004a19..be5f81af7 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -12,10 +12,10 @@ use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer #[derive(Debug, thiserror::Error)] pub enum SequencerRpcError { - /// Wrapper around a [hyper::Error]. + /// Wrapper around a [`hyper::Error`]. #[error(transparent)] HyperError(#[from] hyper::Error), - /// Wrapper around an [reqwest::Error]. + /// Wrapper around an [`reqwest::Error`]. #[error(transparent)] HttpError(#[from] reqwest::Error), /// Thrown when serializing transaction to forward to sequencer @@ -46,13 +46,13 @@ pub struct SequencerClient { } impl SequencerClient { - /// Creates a new [SequencerClient]. + /// Creates a new [`SequencerClient`]. pub fn new(sequencer_endpoint: impl Into) -> Self { let client = Client::builder().use_rustls_tls().build().unwrap(); Self::with_client(sequencer_endpoint, client) } - /// Creates a new [SequencerClient]. + /// Creates a new [`SequencerClient`]. pub fn with_client(sequencer_endpoint: impl Into, http_client: Client) -> Self { let inner = SequencerClientInner { sequencer_endpoint: sequencer_endpoint.into(), diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index db6a6266e..ea26b9aff 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -47,7 +47,7 @@ where Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { - /// Create a new [OpTransactionValidator]. + /// Create a new [`OpTransactionValidator`]. pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = @@ -65,7 +65,7 @@ where this } - /// Create a new [OpTransactionValidator] with the given [OpL1BlockInfo]. + /// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`]. pub fn with_block_info( inner: EthTransactionValidator, block_info: OpL1BlockInfo, @@ -83,9 +83,9 @@ where /// Validates a single transaction. /// - /// See also [TransactionValidator::validate_transaction] + /// See also [`TransactionValidator::validate_transaction`] /// - /// This behaves the same as [EthTransactionValidator::validate_one], but in addition, ensures + /// This behaves the same as [`EthTransactionValidator::validate_one`], but in addition, ensures /// that the account has enough balance to cover the L1 gas cost. pub fn validate_one( &self, @@ -153,7 +153,7 @@ where /// /// Returns all outcomes for the given transactions in the same order. /// - /// See also [Self::validate_one] + /// See also [`Self::validate_one`] pub fn validate_all( &self, transactions: Vec<(TransactionOrigin, Tx)>, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 6ad1203f8..ff72160d4 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -39,7 +39,7 @@ pub struct OptimismPayloadBuilder { } impl OptimismPayloadBuilder { - /// OptimismPayloadBuilder constructor. + /// `OptimismPayloadBuilder` constructor. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { compute_pending_block: true, chain_spec, evm_config } } @@ -67,7 +67,7 @@ impl OptimismPayloadBuilder { } } -/// Implementation of the [PayloadBuilder] trait for [OptimismPayloadBuilder]. +/// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. impl PayloadBuilder for OptimismPayloadBuilder where Client: StateProviderFactory, diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index ca258a93d..38f5bb10b 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -4,7 +4,7 @@ #[derive(Debug, thiserror::Error)] pub enum OptimismPayloadBuilderError { /// Thrown when a transaction fails to convert to a - /// [reth_primitives::TransactionSignedEcRecovered]. + /// [`reth_primitives::TransactionSignedEcRecovered`]. #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] TransactionEcRecoverFailed, /// Thrown when the L1 block info could not be parsed from the calldata of the diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 706b1c2bf..7d426f552 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -27,7 +27,7 @@ use std::sync::Arc; pub struct OptimismPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, - /// NoTxPool option for the generated payload + /// `NoTxPool` option for the generated payload pub no_tx_pool: bool, /// Transactions for the generated payload pub transactions: Vec, @@ -41,7 +41,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [PayloadId] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent and attributes fn try_new(parent: B256, attributes: OptimismPayloadAttributes) -> Result { let (id, transactions) = { let transactions: Vec<_> = attributes @@ -298,7 +298,7 @@ impl From for OptimismExecutionPayloadEnvelopeV4 { } } -/// Generates the payload id for the configured payload from the [OptimismPayloadAttributes]. +/// Generates the payload id for the configured payload from the [`OptimismPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. pub(crate) fn payload_id_optimism( diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index a96d7dedc..7f3646d2a 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -66,16 +66,17 @@ pub struct BasicPayloadJobGenerator { chain_spec: Arc, /// The type responsible for building payloads. /// - /// See [PayloadBuilder] + /// See [`PayloadBuilder`] builder: Builder, - /// Stored cached_reads for new payload jobs. + /// Stored `cached_reads` for new payload jobs. pre_cached: Option, } // === impl BasicPayloadJobGenerator === impl BasicPayloadJobGenerator { - /// Creates a new [BasicPayloadJobGenerator] with the given config and custom [PayloadBuilder] + /// Creates a new [`BasicPayloadJobGenerator`] with the given config and custom + /// [`PayloadBuilder`] pub fn with_builder( client: Client, pool: Pool, @@ -214,9 +215,9 @@ where } } -/// Pre-filled [CachedReads] for a specific block. +/// Pre-filled [`CachedReads`] for a specific block. /// -/// This is extracted from the [CanonStateNotification] for the tip block. +/// This is extracted from the [`CanonStateNotification`] for the tip block. #[derive(Debug, Clone)] pub struct PrecachedState { /// The block for which the state is pre-cached. @@ -246,7 +247,7 @@ impl PayloadTaskGuard { } } -/// Settings for the [BasicPayloadJobGenerator]. +/// Settings for the [`BasicPayloadJobGenerator`]. #[derive(Debug, Clone)] pub struct BasicPayloadJobGeneratorConfig { /// Data to include in the block's extra data field. @@ -255,7 +256,7 @@ pub struct BasicPayloadJobGeneratorConfig { interval: Duration, /// The deadline for when the payload builder job should resolve. /// - /// By default this is [SLOT_DURATION]: 12s + /// By default this is [`SLOT_DURATION`]: 12s deadline: Duration, /// Maximum number of tasks to spawn for building a payload. max_payload_tasks: usize, @@ -341,7 +342,7 @@ where metrics: PayloadBuilderMetrics, /// The type responsible for building payloads. /// - /// See [PayloadBuilder] + /// See [`PayloadBuilder`] builder: Builder, } @@ -644,7 +645,7 @@ pub struct PayloadConfig { } impl PayloadConfig { - /// Returns an owned instance of the [PayloadConfig]'s extra_data bytes. + /// Returns an owned instance of the [`PayloadConfig`]'s `extra_data` bytes. pub fn extra_data(&self) -> Bytes { self.extra_data.clone() } @@ -813,7 +814,7 @@ impl WithdrawalsOutcome { } } -/// Executes the withdrawals and commits them to the _runtime_ Database and BundleState. +/// Executes the withdrawals and commits them to the _runtime_ Database and `BundleState`. /// /// Returns the withdrawals root. /// @@ -849,12 +850,12 @@ pub fn commit_withdrawals>( /// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. /// /// This constructs a new [Evm] with the given DB, and environment -/// ([CfgEnvWithHandlerCfg] and [BlockEnv]) to execute the pre block contract call. +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. /// /// The parent beacon block root used for the call is gathered from the given -/// [PayloadBuilderAttributes]. +/// [`PayloadBuilderAttributes`]. /// -/// This uses [apply_beacon_root_contract_call] to ultimately apply the beacon root contract state +/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state /// change. pub fn pre_block_beacon_root_contract_call( db: &mut DB, @@ -892,9 +893,9 @@ where /// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. /// /// This constructs a new [Evm] with the given DB, and environment -/// ([CfgEnvWithHandlerCfg] and [BlockEnv]) to execute the post block contract call. +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. /// -/// This uses [apply_withdrawal_requests_contract_call] to ultimately calculate the +/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the /// [requests](Request). pub fn post_block_withdrawal_requests_contract_call( db: &mut DB, diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index ac36de98c..340a8510a 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -12,7 +12,7 @@ use std::{ collections::{hash_map::Entry, HashMap}, }; -/// A container type that caches reads from an underlying [DatabaseRef]. +/// A container type that caches reads from an underlying [`DatabaseRef`]. /// /// This is intended to be used in conjunction with `revm::db::State` /// during payload building which repeatedly accesses the same data. @@ -41,7 +41,7 @@ pub struct CachedReads { // === impl CachedReads === impl CachedReads { - /// Gets a [DatabaseRef] that will cache reads from the given database. + /// Gets a [`DatabaseRef`] that will cache reads from the given database. pub fn as_db(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> { CachedReadsDBRef { inner: RefCell::new(self.as_db_mut(db)) } } @@ -61,7 +61,7 @@ impl CachedReads { } } -/// A [Database] that caches reads inside [CachedReads]. +/// A [Database] that caches reads inside [`CachedReads`]. #[derive(Debug)] pub struct CachedReadsDbMut<'a, DB> { /// The cache of reads. @@ -123,9 +123,9 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { } } -/// A [DatabaseRef] that caches reads inside [CachedReads]. +/// A [`DatabaseRef`] that caches reads inside [`CachedReads`]. /// -/// This is intended to be used as the [DatabaseRef] for +/// This is intended to be used as the [`DatabaseRef`] for /// `revm::db::State` for repeated payload build jobs. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index bdfadf766..ccefa6777 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -22,7 +22,7 @@ impl NoopPayloadBuilderService where Engine: EngineTypes + 'static, { - /// Creates a new [NoopPayloadBuilderService]. + /// Creates a new [`NoopPayloadBuilderService`]. pub fn new() -> (Self, PayloadBuilderHandle) { let (service_tx, command_rx) = mpsc::unbounded_channel(); ( diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index ec34849dd..f39037275 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -29,7 +29,7 @@ use tracing::{debug, info, trace, warn}; type PayloadFuture

= Pin> + Send + Sync>>; -/// A communication channel to the [PayloadBuilderService] that can retrieve payloads. +/// A communication channel to the [`PayloadBuilderService`] that can retrieve payloads. #[derive(Debug)] pub struct PayloadStore { inner: PayloadBuilderHandle, @@ -43,8 +43,8 @@ where { /// Resolves the payload job and returns the best payload that has been built so far. /// - /// Note: depending on the installed [PayloadJobGenerator], this may or may not terminate the - /// job, See [PayloadJob::resolve]. + /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the + /// job, See [`PayloadJob::resolve`]. pub async fn resolve( &self, id: PayloadId, @@ -91,12 +91,12 @@ where } } -/// A communication channel to the [PayloadBuilderService]. +/// A communication channel to the [`PayloadBuilderService`]. /// /// This is the API used to create new payloads and to get the current state of existing ones. #[derive(Debug)] pub struct PayloadBuilderHandle { - /// Sender half of the message channel to the [PayloadBuilderService]. + /// Sender half of the message channel to the [`PayloadBuilderService`]. to_service: mpsc::UnboundedSender>, } @@ -108,16 +108,16 @@ where { /// Creates a new payload builder handle for the given channel. /// - /// Note: this is only used internally by the [PayloadBuilderService] to manage the payload - /// building flow See [PayloadBuilderService::poll] for implementation details. + /// Note: this is only used internally by the [`PayloadBuilderService`] to manage the payload + /// building flow See [`PayloadBuilderService::poll`] for implementation details. pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } /// Resolves the payload job and returns the best payload that has been built so far. /// - /// Note: depending on the installed [PayloadJobGenerator], this may or may not terminate the - /// job, See [PayloadJob::resolve]. + /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the + /// job, See [`PayloadJob::resolve`]. async fn resolve( &self, id: PayloadId, @@ -156,8 +156,8 @@ where /// Sends a message to the service to start building a new payload for the given payload. /// - /// This is the same as [PayloadBuilderHandle::new_payload] but does not wait for the result and - /// returns the receiver instead + /// This is the same as [`PayloadBuilderHandle::new_payload`] but does not wait for the result + /// and returns the receiver instead pub fn send_new_payload( &self, attr: Engine::PayloadBuilderAttributes, @@ -240,11 +240,12 @@ where Gen::Job: PayloadJob, ::BuiltPayload: Into, { - /// Creates a new payload builder service and returns the [PayloadBuilderHandle] to interact + /// Creates a new payload builder service and returns the [`PayloadBuilderHandle`] to interact /// with it. /// /// This also takes a stream of chain events that will be forwarded to the generator to apply - /// additional logic when new state is committed. See also [PayloadJobGenerator::on_new_state]. + /// additional logic when new state is committed. See also + /// [`PayloadJobGenerator::on_new_state`]. pub fn new(generator: Gen, chain_events: St) -> (Self, PayloadBuilderHandle) { let (service_tx, command_rx) = mpsc::unbounded_channel(); let (payload_events, _) = broadcast::channel(PAYLOAD_EVENTS_BUFFER_SIZE); @@ -450,7 +451,7 @@ where } } -/// Message type for the [PayloadBuilderService]. +/// Message type for the [`PayloadBuilderService`]. pub enum PayloadServiceCommand { /// Start building a new payload. BuildNewPayload( diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 40237aa92..26a3fab3f 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -14,7 +14,7 @@ use std::{ task::{Context, Poll}, }; -/// Creates a new [PayloadBuilderService] for testing purposes. +/// Creates a new [`PayloadBuilderService`] for testing purposes. pub fn test_payload_service() -> ( PayloadBuilderService< TestPayloadJobGenerator, @@ -32,7 +32,7 @@ where PayloadBuilderService::new(Default::default(), futures_util::stream::empty()) } -/// Creates a new [PayloadBuilderService] for testing purposes and spawns it in the background. +/// Creates a new [`PayloadBuilderService`] for testing purposes and spawns it in the background. pub fn spawn_test_payload_service() -> PayloadBuilderHandle where Engine: EngineTypes< @@ -45,7 +45,7 @@ where handle } -/// A [PayloadJobGenerator] for testing purposes +/// A [`PayloadJobGenerator`] for testing purposes #[derive(Debug, Default)] #[non_exhaustive] pub struct TestPayloadJobGenerator; @@ -61,7 +61,7 @@ impl PayloadJobGenerator for TestPayloadJobGenerator { } } -/// A [PayloadJobGenerator] for testing purposes +/// A [`PayloadJobGenerator`] for testing purposes #[derive(Debug)] pub struct TestPayloadJob { attr: EthPayloadBuilderAttributes, diff --git a/crates/payload/ethereum/src/lib.rs b/crates/payload/ethereum/src/lib.rs index 8a0ac50ce..599138fdb 100644 --- a/crates/payload/ethereum/src/lib.rs +++ b/crates/payload/ethereum/src/lib.rs @@ -47,7 +47,7 @@ pub struct EthereumPayloadBuilder { } impl EthereumPayloadBuilder { - /// EthereumPayloadBuilder constructor. + /// `EthereumPayloadBuilder` constructor. pub const fn new(evm_config: EvmConfig) -> Self { Self { evm_config } } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 13c20541f..eced6a481 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -47,7 +47,7 @@ impl ExecutionPayloadValidator { /// Cancun specific checks for EIP-4844 blob transactions. /// /// Ensures that the number of blob versioned hashes matches the number hashes included in the - /// _separate_ block_versioned_hashes of the cancun payload fields. + /// _separate_ `block_versioned_hashes` of the cancun payload fields. fn ensure_matching_blob_versioned_hashes( &self, sealed_block: &SealedBlock, @@ -91,13 +91,13 @@ impl ExecutionPayloadValidator { /// The checks are done in the order that conforms with the engine-API specification. /// /// This is intended to be invoked after receiving the payload from the CLI. - /// The additional [MaybeCancunPayloadFields] are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also + /// The additional [`MaybeCancunPayloadFields`] are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also /// /// If the cancun fields are provided this also validates that the versioned hashes in the block /// match the versioned hashes passed in the - /// [CancunPayloadFields](reth_rpc_types::engine::CancunPayloadFields), if the cancun payload + /// [`CancunPayloadFields`](reth_rpc_types::engine::CancunPayloadFields), if the cancun payload /// fields are provided. If the payload fields are not provided, but versioned hashes exist - /// in the block, this is considered an error: [PayloadError::InvalidVersionedHashes]. + /// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`]. /// /// This validates versioned hashes according to the Engine API Cancun spec: /// diff --git a/crates/primitives/benches/integer_list.rs b/crates/primitives/benches/integer_list.rs index 8ce985d13..6dcec9139 100644 --- a/crates/primitives/benches/integer_list.rs +++ b/crates/primitives/benches/integer_list.rs @@ -85,7 +85,7 @@ criterion_group! { } criterion_main!(benches); -/// Implementation from https://github.com/paradigmxyz/reth/blob/cda5d4e7c53ccc898b7725eb5d3b46c35e4da7f8/crates/primitives/src/integer_list.rs +/// Implementation from /// adapted to work with `sucds = "0.8.1"` #[allow(unused, unreachable_pub)] mod elias_fano { @@ -111,7 +111,7 @@ mod elias_fano { } impl IntegerList { - /// Creates an IntegerList from a list of integers. `usize` is safe to use since + /// Creates an `IntegerList` from a list of integers. `usize` is safe to use since /// [`sucds::EliasFano`] restricts its compilation to 64bits. /// /// # Returns diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index 786871948..4ce9fea8c 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -28,15 +28,15 @@ impl Account { self.bytecode_hash.is_some() } - /// After SpuriousDragon empty account is defined as account with nonce == 0 && balance == 0 && - /// bytecode = None (or hash is [`KECCAK_EMPTY`]). + /// After `SpuriousDragon` empty account is defined as account with nonce == 0 && balance == 0 + /// && bytecode = None (or hash is [`KECCAK_EMPTY`]). pub fn is_empty(&self) -> bool { self.nonce == 0 && self.balance.is_zero() && self.bytecode_hash.map_or(true, |hash| hash == KECCAK_EMPTY) } - /// Makes an [Account] from [GenesisAccount] type + /// Makes an [Account] from [`GenesisAccount`] type pub fn from_genesis_account(value: &GenesisAccount) -> Self { Self { // nonce must exist, so we default to zero when converting a genesis account diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a797b01bb..af0e2680c 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -85,7 +85,7 @@ impl Block { } } - /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. + /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. pub fn senders(&self) -> Option> { TransactionSigned::recover_signers(&self.body, self.body.len()) } @@ -107,7 +107,7 @@ impl Block { /// /// If the number of senders does not match the number of transactions in the block, this falls /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [TransactionSigned::recover_signer_unchecked] + /// See also [`TransactionSigned::recover_signer_unchecked`] /// /// Returns an error if a signature is invalid. #[track_caller] @@ -323,7 +323,7 @@ impl SealedBlock { (self.header, self.body, self.ommers) } - /// Splits the [BlockBody] and [SealedHeader] into separate components + /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] pub fn split_header_body(self) -> (SealedHeader, BlockBody) { ( @@ -363,7 +363,7 @@ impl SealedBlock { self.blob_versioned_hashes_iter().collect() } - /// Expensive operation that recovers transaction signer. See [SealedBlockWithSenders]. + /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. pub fn senders(&self) -> Option> { TransactionSigned::recover_signers(&self.body, self.body.len()) } @@ -392,7 +392,7 @@ impl SealedBlock { } } - /// Calculates a heuristic for the in-memory size of the [SealedBlock]. + /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. #[inline] pub fn size(&self) -> usize { self.header.size() + @@ -438,7 +438,7 @@ impl SealedBlock { Ok(()) } - /// Returns a vector of transactions RLP encoded with [TransactionSigned::encode_enveloped]. + /// Returns a vector of transactions RLP encoded with [`TransactionSigned::encode_enveloped`]. pub fn raw_transactions(&self) -> Vec { self.body.iter().map(|tx| tx.envelope_encoded()).collect() } @@ -485,7 +485,7 @@ impl SealedBlockWithSenders { (self.block, self.senders) } - /// Returns the unsealed [BlockWithSenders] + /// Returns the unsealed [`BlockWithSenders`] #[inline] pub fn unseal(self) -> BlockWithSenders { let Self { block, senders } = self; @@ -600,7 +600,7 @@ impl BlockBody { self.requests.as_ref().map(|r| crate::proofs::calculate_requests_root(&r.0)) } - /// Calculates a heuristic for the in-memory size of the [BlockBody]. + /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] pub fn size(&self) -> usize { self.transactions.iter().map(TransactionSigned::size).sum::() + diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 85da33a98..f0389e42d 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -212,7 +212,7 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { /// Dev testnet specification /// -/// Includes 20 prefunded accounts with 10_000 ETH each derived from mnemonic "test test test test +/// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test /// test test test test test test test junk". pub static DEV: Lazy> = Lazy::new(|| { ChainSpec { @@ -454,14 +454,14 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { .into() }); -/// A wrapper around [BaseFeeParams] that allows for specifying constant or dynamic EIP-1559 +/// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559 /// parameters based on the active [Hardfork]. #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(untagged)] pub enum BaseFeeParamsKind { - /// Constant [BaseFeeParams]; used for chains that don't have dynamic EIP-1559 parameters + /// Constant [`BaseFeeParams`]; used for chains that don't have dynamic EIP-1559 parameters Constant(BaseFeeParams), - /// Variable [BaseFeeParams]; used for chains that have dynamic EIP-1559 parameters like + /// Variable [`BaseFeeParams`]; used for chains that have dynamic EIP-1559 parameters like /// Optimism Variable(ForkBaseFeeParams), } @@ -478,7 +478,7 @@ impl From for BaseFeeParamsKind { } } -/// A type alias to a vector of tuples of [Hardfork] and [BaseFeeParams], sorted by [Hardfork] +/// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork] /// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>); @@ -511,14 +511,15 @@ pub struct ChainSpec { /// The genesis block pub genesis: Genesis, - /// The block at which [Hardfork::Paris] was activated and the final difficulty at this block. + /// The block at which [`Hardfork::Paris`] was activated and the final difficulty at this + /// block. #[serde(skip, default)] pub paris_block_and_final_difficulty: Option<(u64, U256)>, /// The active hard forks and their activation conditions pub hardforks: BTreeMap, - /// The deposit contract deployed for PoS + /// The deposit contract deployed for `PoS` #[serde(skip, default)] pub deposit_contract: Option, @@ -662,7 +663,7 @@ impl ChainSpec { self.fork(Hardfork::London).active_at_block(0).then_some(genesis_base_fee) } - /// Get the [BaseFeeParams] for the chain at the given timestamp. + /// Get the [`BaseFeeParams`] for the chain at the given timestamp. pub fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { match self.base_fee_params { BaseFeeParamsKind::Constant(bf_params) => bf_params, @@ -681,7 +682,7 @@ impl ChainSpec { } } - /// Get the [BaseFeeParams] for the chain at the given block number + /// Get the [`BaseFeeParams`] for the chain at the given block number pub fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams { match self.base_fee_params { BaseFeeParamsKind::Constant(bf_params) => bf_params, @@ -756,13 +757,13 @@ impl ChainSpec { } } - /// Convenience method to get the fork id for [Hardfork::Shanghai] from a given chainspec. + /// Convenience method to get the fork id for [`Hardfork::Shanghai`] from a given chainspec. #[inline] pub fn shanghai_fork_id(&self) -> Option { self.hardfork_fork_id(Hardfork::Shanghai) } - /// Convenience method to get the fork id for [Hardfork::Cancun] from a given chainspec. + /// Convenience method to get the fork id for [`Hardfork::Cancun`] from a given chainspec. #[inline] pub fn cancun_fork_id(&self) -> Option { self.hardfork_fork_id(Hardfork::Cancun) @@ -797,43 +798,44 @@ impl ChainSpec { self.fork(fork).active_at_block(block_number) } - /// Convenience method to check if [Hardfork::Shanghai] is active at a given timestamp. + /// Convenience method to check if [`Hardfork::Shanghai`] is active at a given timestamp. #[inline] pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) } - /// Convenience method to check if [Hardfork::Cancun] is active at a given timestamp. + /// Convenience method to check if [`Hardfork::Cancun`] is active at a given timestamp. #[inline] pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) } - /// Convenience method to check if [Hardfork::Prague] is active at a given timestamp. + /// Convenience method to check if [`Hardfork::Prague`] is active at a given timestamp. #[inline] pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) } - /// Convenience method to check if [Hardfork::Byzantium] is active at a given block number. + /// Convenience method to check if [`Hardfork::Byzantium`] is active at a given block number. #[inline] pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { self.fork(Hardfork::Byzantium).active_at_block(block_number) } - /// Convenience method to check if [Hardfork::SpuriousDragon] is active at a given block number. + /// Convenience method to check if [`Hardfork::SpuriousDragon`] is active at a given block + /// number. #[inline] pub fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { self.fork(Hardfork::SpuriousDragon).active_at_block(block_number) } - /// Convenience method to check if [Hardfork::Homestead] is active at a given block number. + /// Convenience method to check if [`Hardfork::Homestead`] is active at a given block number. #[inline] pub fn is_homestead_active_at_block(&self, block_number: u64) -> bool { self.fork(Hardfork::Homestead).active_at_block(block_number) } - /// Convenience method to check if [Hardfork::Bedrock] is active at a given block number. + /// Convenience method to check if [`Hardfork::Bedrock`] is active at a given block number. #[cfg(feature = "optimism")] #[inline] pub fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { @@ -926,7 +928,7 @@ impl ChainSpec { /// An internal helper function that returns the block number of the last block-based /// fork that occurs before any existing TTD (merge)/timestamp based forks. /// - /// Note: this returns None if the ChainSpec is not configured with a TTD/Timestamp fork. + /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { let mut hardforks_iter = self.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { @@ -1381,7 +1383,7 @@ impl ForkCondition { /// The fork is considered active if the _previous_ total difficulty is above the threshold. /// To achieve that, we subtract the passed `difficulty` from the current block's total /// difficulty, and check if it's above the Fork Condition's total difficulty (here: - /// 58_750_000_000_000_000_000_000) + /// `58_750_000_000_000_000_000_000`) /// /// This will return false for any condition that is not TTD-based. pub fn active_at_ttd(&self, ttd: U256, difficulty: U256) -> bool { @@ -1597,7 +1599,7 @@ impl DisplayHardforks { } } -/// PoS deposit contract details. +/// `PoS` deposit contract details. #[derive(Debug, Clone, PartialEq, Eq)] pub struct DepositContract { /// Deposit Contract Address @@ -1609,7 +1611,7 @@ pub struct DepositContract { } impl DepositContract { - /// Creates a new [DepositContract]. + /// Creates a new [`DepositContract`]. pub const fn new(address: Address, block: BlockNumber, topic: B256) -> Self { Self { address, block, topic } } @@ -2395,7 +2397,7 @@ Post-merge hard forks (timestamp based): /// Checks that time-based forks work /// - /// This is based off of the test vectors here: https://github.com/ethereum/go-ethereum/blob/5c8cc10d1e05c23ff1108022f4150749e73c0ca1/core/forkid/forkid_test.go#L155-L188 + /// This is based off of the test vectors here: #[test] fn timestamped_forks() { let mainnet_with_timestamps = ChainSpecBuilder::mainnet().build(); @@ -2526,7 +2528,7 @@ Post-merge hard forks (timestamp based): ); } - /// Constructs a [ChainSpec] with the given [ChainSpecBuilder], shanghai, and cancun fork + /// Constructs a [`ChainSpec`] with the given [`ChainSpecBuilder`], shanghai, and cancun fork /// timestamps. fn construct_chainspec( builder: ChainSpecBuilder, diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 9ac04b862..3379a9e48 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -25,10 +25,10 @@ mod trusted_setup { ) }); - /// Loads the trusted setup parameters from the given bytes and returns the [KzgSettings]. + /// Loads the trusted setup parameters from the given bytes and returns the [`KzgSettings`]. /// - /// This creates a temp file to store the bytes and then loads the [KzgSettings] from the file - /// via [KzgSettings::load_trusted_setup_file]. + /// This creates a temp file to store the bytes and then loads the [`KzgSettings`] from the file + /// via [`KzgSettings::load_trusted_setup_file`]. pub fn load_trusted_setup_from_bytes( bytes: &[u8], ) -> Result { @@ -40,8 +40,8 @@ mod trusted_setup { /// Error type for loading the trusted setup. #[derive(Debug, thiserror::Error)] pub enum LoadKzgSettingsError { - /// Failed to create temp file to store bytes for loading [KzgSettings] via - /// [KzgSettings::load_trusted_setup_file]. + /// Failed to create temp file to store bytes for loading [`KzgSettings`] via + /// [`KzgSettings::load_trusted_setup_file`]. #[error("failed to setup temp file: {0}")] TempFileErr(#[from] std::io::Error), /// Kzg error diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index f04868c6c..f4a1e24dc 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -53,7 +53,7 @@ pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; /// significant harm in leaving this setting as is. pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; -/// Same as [MIN_PROTOCOL_BASE_FEE] but as a U256. +/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); /// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) diff --git a/crates/primitives/src/exex/mod.rs b/crates/primitives/src/exex/mod.rs index 82730f297..a0836c1fc 100644 --- a/crates/primitives/src/exex/mod.rs +++ b/crates/primitives/src/exex/mod.rs @@ -1,15 +1,15 @@ use crate::BlockNumber; -/// The finished height of all ExEx's. +/// The finished height of all `ExEx`'s. #[derive(Debug, Clone, Copy)] pub enum FinishedExExHeight { - /// No ExEx's are installed, so there is no finished height. + /// No `ExEx`'s are installed, so there is no finished height. NoExExs, - /// Not all ExExs have emitted a `FinishedHeight` event yet. + /// Not all `ExExs` have emitted a `FinishedHeight` event yet. NotReady, - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. /// - /// This is the lowest common denominator between all ExEx's. + /// This is the lowest common denominator between all `ExEx`'s. /// /// This block is used to (amongst other things) determine what blocks are safe to prune. /// @@ -18,7 +18,7 @@ pub enum FinishedExExHeight { } impl FinishedExExHeight { - /// Returns `true` if not all ExExs have emitted a `FinishedHeight` event yet. + /// Returns `true` if not all `ExExs` have emitted a `FinishedHeight` event yet. pub const fn is_not_ready(&self) -> bool { matches!(self, Self::NotReady) } diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 7ec106c3b..25c846972 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -244,7 +244,7 @@ impl Header { /// /// Returns `None` if `excess_blob_gas` is None. /// - /// See also [Self::next_block_excess_blob_gas] + /// See also [`Self::next_block_excess_blob_gas`] pub fn next_block_blob_fee(&self) -> Option { self.next_block_excess_blob_gas().map(calc_blob_gasprice) } @@ -861,7 +861,7 @@ impl SealedHeader { self.header } - /// This is the inverse of [Header::seal_slow] which returns the raw header and hash. + /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. pub fn split(self) -> (Header, BlockHash) { (self.header, self.hash) } @@ -871,7 +871,7 @@ impl SealedHeader { BlockNumHash::new(self.number, self.hash) } - /// Calculates a heuristic for the in-memory size of the [SealedHeader]. + /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] pub fn size(&self) -> usize { self.header.size() + mem::size_of::() diff --git a/crates/primitives/src/integer_list.rs b/crates/primitives/src/integer_list.rs index 33ddae452..4dcb9a9bc 100644 --- a/crates/primitives/src/integer_list.rs +++ b/crates/primitives/src/integer_list.rs @@ -28,7 +28,7 @@ impl fmt::Debug for IntegerList { } impl IntegerList { - /// Creates an IntegerList from a list of integers. + /// Creates an `IntegerList` from a list of integers. /// /// # Returns /// diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index e8015a276..ca419acb8 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -173,7 +173,7 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. -/// See [state_root_unsorted] for more info. +/// See [`state_root_unsorted`] for more info. pub fn state_root_ref_unhashed<'a, A: Into + Clone + 'a>( state: impl IntoIterator, ) -> B256 { @@ -184,7 +184,7 @@ pub fn state_root_ref_unhashed<'a, A: Into + Clone + 'a>( /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. -/// See [state_root_unsorted] for more info. +/// See [`state_root_unsorted`] for more info. pub fn state_root_unhashed>( state: impl IntoIterator, ) -> B256 { @@ -192,7 +192,7 @@ pub fn state_root_unhashed>( } /// Sorts the hashed account keys and calculates the root hash of the state represented as MPT. -/// See [state_root] for more info. +/// See [`state_root`] for more info. pub fn state_root_unsorted>( state: impl IntoIterator, ) -> B256 { @@ -217,13 +217,13 @@ pub fn state_root>(state: impl IntoIterator) -> B256 { storage_root_unsorted(storage.into_iter().map(|(slot, value)| (keccak256(slot), value))) } /// Sorts and calculates the root hash of account storage trie. -/// See [storage_root] for more info. +/// See [`storage_root`] for more info. pub fn storage_root_unsorted(storage: impl IntoIterator) -> B256 { storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key)) } diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 07da6132f..aaefc0c6a 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -110,7 +110,7 @@ pub enum PruneInterruptReason { } impl PruneInterruptReason { - /// Creates new [PruneInterruptReason] based on the [PruneLimiter]. + /// Creates new [`PruneInterruptReason`] based on the [`PruneLimiter`]. pub fn new(limiter: &PruneLimiter) -> Self { if limiter.is_time_limit_reached() { Self::Timeout @@ -133,11 +133,11 @@ impl PruneInterruptReason { } impl PruneProgress { - /// Creates new [PruneProgress]. + /// Creates new [`PruneProgress`]. /// - /// If `done == true`, returns [PruneProgress::Finished], otherwise - /// [PruneProgress::HasMoreData] is returned with [PruneInterruptReason] according to the passed - /// limiter. + /// If `done == true`, returns [`PruneProgress::Finished`], otherwise + /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the + /// passed limiter. pub fn new(done: bool, limiter: &PruneLimiter) -> Self { if done { Self::Finished diff --git a/crates/primitives/src/prune/segment.rs b/crates/primitives/src/prune/segment.rs index 23b867ee8..867fc4ca1 100644 --- a/crates/primitives/src/prune/segment.rs +++ b/crates/primitives/src/prune/segment.rs @@ -47,7 +47,7 @@ impl PruneSegment { pub enum PrunePurpose { /// Prune data according to user configuration. User, - /// Prune data according to highest static_files to delete the data from database. + /// Prune data according to highest `static_files` to delete the data from database. StaticFile, } @@ -63,7 +63,7 @@ impl PrunePurpose { } } -/// PruneSegment error type. +/// `PruneSegment` error type. #[derive(Debug, Error, PartialEq, Eq, Clone)] pub enum PruneSegmentError { /// Invalid configuration of a prune segment. diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 255976531..9ff9034b9 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -40,7 +40,7 @@ pub struct PruneModes { /// Receipts pruning configuration by retaining only those receipts that contain logs emitted /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// - /// The [BlockNumber](`crate::BlockNumber`) represents the starting block from which point + /// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point /// onwards the receipts are preserved. pub receipts_log_filter: ReceiptsLogPruneConfig, } @@ -68,10 +68,10 @@ impl PruneModes { /// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be /// left in database after the pruning. /// -/// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`. -/// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed -/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we -/// have one block in the database. +/// 1. For [`PruneMode::Full`], it fails if `MIN_BLOCKS > 0`. +/// 2. For [`PruneMode::Distance(distance`)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is +/// needed because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, +/// meaning we have one block in the database. fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>( deserializer: D, ) -> Result, D::Error> { diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index a01e8542c..76a88e88f 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -45,20 +45,20 @@ pub struct Receipt { } impl Receipt { - /// Calculates [`Log`]'s bloom filter. this is slow operation and [ReceiptWithBloom] can + /// Calculates [`Log`]'s bloom filter. this is slow operation and [`ReceiptWithBloom`] can /// be used to cache this value. pub fn bloom_slow(&self) -> Bloom { logs_bloom(self.logs.iter()) } - /// Calculates the bloom filter for the receipt and returns the [ReceiptWithBloom] container + /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container /// type. pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } - /// Calculates the bloom filter for the receipt and returns the [ReceiptWithBloomRef] container - /// type. + /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloomRef`] + /// container type. pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { self.into() } @@ -172,7 +172,7 @@ pub struct ReceiptWithBloom { } impl ReceiptWithBloom { - /// Create new [ReceiptWithBloom] + /// Create new [`ReceiptWithBloom`] pub const fn new(receipt: Receipt, bloom: Bloom) -> Self { Self { receipt, bloom } } @@ -283,7 +283,7 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { impl ReceiptWithBloom { /// Returns the enveloped encoded receipt. /// - /// See also [ReceiptWithBloom::encode_enveloped] + /// See also [`ReceiptWithBloom::encode_enveloped`] pub fn envelope_encoded(&self) -> Bytes { let mut buf = Vec::new(); self.encode_enveloped(&mut buf); @@ -429,7 +429,7 @@ pub struct ReceiptWithBloomRef<'a> { } impl<'a> ReceiptWithBloomRef<'a> { - /// Create new [ReceiptWithBloomRef] + /// Create new [`ReceiptWithBloomRef`] pub const fn new(receipt: &'a Receipt, bloom: Bloom) -> Self { Self { receipt, bloom } } diff --git a/crates/primitives/src/revm/config.rs b/crates/primitives/src/revm/config.rs index b443b78e2..d3fed4061 100644 --- a/crates/primitives/src/revm/config.rs +++ b/crates/primitives/src/revm/config.rs @@ -32,7 +32,7 @@ pub fn revm_spec_by_timestamp_after_merge( } } -/// return revm_spec from spec configuration. +/// return `revm_spec` from spec configuration. pub fn revm_spec(chain_spec: &ChainSpec, block: Head) -> revm_primitives::SpecId { #[cfg(feature = "optimism")] if chain_spec.is_optimism() { diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index 2fad8fb3f..db38bb533 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -104,7 +104,7 @@ pub fn recover_header_signer(header: &Header) -> Result TxEnv { let mut tx_env = TxEnv::default(); @@ -132,8 +132,8 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn /// [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) are: /// /// At the start of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. -/// before processing any transactions), call [BEACON_ROOTS_ADDRESS] as -/// [SYSTEM_ADDRESS](alloy_eips::eip4788::SYSTEM_ADDRESS) with the 32-byte input of +/// before processing any transactions), call [`BEACON_ROOTS_ADDRESS`] as +/// [`SYSTEM_ADDRESS`](alloy_eips::eip4788::SYSTEM_ADDRESS) with the 32-byte input of /// `header.parent_beacon_block_root`. This will trigger the `set()` routine of the beacon roots /// contract. pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { @@ -215,13 +215,13 @@ fn fill_tx_env_with_system_contract_call( env.block.basefee = U256::ZERO; } -/// Fill transaction environment from [TransactionSignedEcRecovered]. +/// Fill transaction environment from [`TransactionSignedEcRecovered`]. #[cfg(not(feature = "optimism"))] pub fn fill_tx_env_with_recovered(tx_env: &mut TxEnv, transaction: &TransactionSignedEcRecovered) { fill_tx_env(tx_env, transaction.as_ref(), transaction.signer()); } -/// Fill transaction environment from [TransactionSignedEcRecovered] and the given envelope. +/// Fill transaction environment from [`TransactionSignedEcRecovered`] and the given envelope. #[cfg(feature = "optimism")] pub fn fill_tx_env_with_recovered( tx_env: &mut TxEnv, diff --git a/crates/primitives/src/revm/mod.rs b/crates/primitives/src/revm/mod.rs index 1d527d28c..f3c4ac62d 100644 --- a/crates/primitives/src/revm/mod.rs +++ b/crates/primitives/src/revm/mod.rs @@ -5,7 +5,7 @@ /// /// The included conversion methods can be used to convert between: /// * reth's [Log](crate::Log) type and revm's [Log](revm_primitives::Log) type. -/// * reth's [Account](crate::Account) type and revm's [AccountInfo](revm_primitives::AccountInfo) +/// * reth's [Account](crate::Account) type and revm's [`AccountInfo`](revm_primitives::AccountInfo) /// type. pub mod compat; diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index d8e54a353..bdc8b0d9c 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -291,7 +291,7 @@ impl Default for StageUnitCheckpoint { } } -/// Generates [StageCheckpoint] getter and builder methods. +/// Generates [`StageCheckpoint`] getter and builder methods. macro_rules! stage_unit_checkpoints { ($(($index:expr,$enum_variant:tt,$checkpoint_ty:ty,#[doc = $fn_get_doc:expr]$fn_get_name:ident,#[doc = $fn_build_doc:expr]$fn_build_name:ident)),+) => { impl StageCheckpoint { diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index 11ceb41e3..a4d0553d7 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -85,17 +85,17 @@ impl StageId { } } - /// Returns true if it's a downloading stage [StageId::Headers] or [StageId::Bodies] + /// Returns true if it's a downloading stage [`StageId::Headers`] or [`StageId::Bodies`] pub const fn is_downloading_stage(&self) -> bool { matches!(self, Self::Headers | Self::Bodies) } - /// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage. + /// Returns `true` if it's [`TransactionLookup`](StageId::TransactionLookup) stage. pub const fn is_tx_lookup(&self) -> bool { matches!(self, Self::TransactionLookup) } - /// Returns true indicating if it's the finish stage [StageId::Finish] + /// Returns true indicating if it's the finish stage [`StageId::Finish`] pub const fn is_finish(&self) -> bool { matches!(self, Self::Finish) } diff --git a/crates/primitives/src/storage.rs b/crates/primitives/src/storage.rs index d9f6a15d2..ef3b2f082 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives/src/storage.rs @@ -15,7 +15,7 @@ pub struct StorageEntry { } impl StorageEntry { - /// Create a new StorageEntry with given key and value. + /// Create a new `StorageEntry` with given key and value. pub const fn new(key: B256, value: U256) -> Self { Self { key, value } } diff --git a/crates/primitives/src/transaction/eip1559.rs b/crates/primitives/src/transaction/eip1559.rs index ef6f9eb72..92f75db6a 100644 --- a/crates/primitives/src/transaction/eip1559.rs +++ b/crates/primitives/src/transaction/eip1559.rs @@ -80,7 +80,7 @@ impl TxEip1559 { } } - /// Decodes the inner [TxEip1559] fields from RLP bytes. + /// Decodes the inner [`TxEip1559`] fields from RLP bytes. /// /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following /// RLP fields in the following order: @@ -179,7 +179,7 @@ impl TxEip1559 { TxType::Eip1559 } - /// Calculates a heuristic for the in-memory size of the [TxEip1559] transaction. + /// Calculates a heuristic for the in-memory size of the [`TxEip1559`] transaction. #[inline] pub fn size(&self) -> usize { mem::size_of::() + // chain_id diff --git a/crates/primitives/src/transaction/eip2930.rs b/crates/primitives/src/transaction/eip2930.rs index 672bb86b5..9dc461886 100644 --- a/crates/primitives/src/transaction/eip2930.rs +++ b/crates/primitives/src/transaction/eip2930.rs @@ -50,7 +50,7 @@ pub struct TxEip2930 { } impl TxEip2930 { - /// Calculates a heuristic for the in-memory size of the [TxEip2930] transaction. + /// Calculates a heuristic for the in-memory size of the [`TxEip2930`] transaction. #[inline] pub fn size(&self) -> usize { mem::size_of::() + // chain_id @@ -63,7 +63,7 @@ impl TxEip2930 { self.input.len() // input } - /// Decodes the inner [TxEip2930] fields from RLP bytes. + /// Decodes the inner [`TxEip2930`] fields from RLP bytes. /// /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following /// RLP fields in the following order: diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index d91490eb4..214e2a5e1 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -102,7 +102,7 @@ impl TxEip4844 { /// Verifies that the given blob data, commitments, and proofs are all valid for this /// transaction. /// - /// Takes as input the [KzgSettings], which should contain the parameters derived from the + /// Takes as input the [`KzgSettings`], which should contain the parameters derived from the /// KZG trusted setup. /// /// This ensures that the blob transaction payload has the same number of blob data elements, @@ -128,7 +128,7 @@ impl TxEip4844 { self.blob_versioned_hashes.len() as u64 * DATA_GAS_PER_BLOB } - /// Decodes the inner [TxEip4844] fields from RLP bytes. + /// Decodes the inner [`TxEip4844`] fields from RLP bytes. /// /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following /// RLP fields in the following order: @@ -191,7 +191,7 @@ impl TxEip4844 { self.blob_versioned_hashes.encode(out); } - /// Calculates a heuristic for the in-memory size of the [TxEip4844] transaction. + /// Calculates a heuristic for the in-memory size of the [`TxEip4844`] transaction. #[inline] pub fn size(&self) -> usize { mem::size_of::() + // chain_id diff --git a/crates/primitives/src/transaction/legacy.rs b/crates/primitives/src/transaction/legacy.rs index 42e1e4b45..d6cb4ae2a 100644 --- a/crates/primitives/src/transaction/legacy.rs +++ b/crates/primitives/src/transaction/legacy.rs @@ -43,7 +43,7 @@ pub struct TxLegacy { } impl TxLegacy { - /// Calculates a heuristic for the in-memory size of the [TxLegacy] transaction. + /// Calculates a heuristic for the in-memory size of the [`TxLegacy`] transaction. #[inline] pub fn size(&self) -> usize { mem::size_of::>() + // chain_id @@ -161,7 +161,7 @@ impl TxLegacy { /// Outputs the signature hash of the transaction by first encoding without a signature, then /// hashing. /// - /// See [Self::encode_for_signing] for more information on the encoding format. + /// See [`Self::encode_for_signing`] for more information on the encoding format. pub(crate) fn signature_hash(&self) -> B256 { let mut buf = BytesMut::with_capacity(self.payload_len_for_signature()); self.encode_for_signing(&mut buf); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index bb6fdad17..ca8b4d24b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -150,7 +150,7 @@ impl Transaction { } } - /// Get chain_id. + /// Get `chain_id`. pub const fn chain_id(&self) -> Option { match self { Self::Legacy(TxLegacy { chain_id, .. }) => *chain_id, @@ -232,7 +232,7 @@ impl Transaction { } } - /// Returns the [AccessList] of the transaction. + /// Returns the [`AccessList`] of the transaction. /// /// Returns `None` for legacy transactions. pub const fn access_list(&self) -> Option<&AccessList> { @@ -268,7 +268,7 @@ impl Transaction { } } - /// Max fee per gas for eip1559 transaction, for legacy transactions this is gas_price. + /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). pub const fn max_fee_per_gas(&self) -> u128 { @@ -315,7 +315,7 @@ impl Transaction { } } - /// Max fee per blob gas for eip4844 transaction [TxEip4844]. + /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. /// /// Returns `None` for non-eip4844 transactions. /// @@ -331,7 +331,7 @@ impl Transaction { /// transaction. /// /// This is the number of blobs times the - /// [DATA_GAS_PER_BLOB](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } @@ -374,7 +374,7 @@ impl Transaction { /// If the base fee is `None`, the `max_priority_fee_per_gas`, or gas price for non-EIP1559 /// transactions is returned. /// - /// Returns `None` if the basefee is higher than the [Transaction::max_fee_per_gas]. + /// Returns `None` if the basefee is higher than the [`Transaction::max_fee_per_gas`]. pub fn effective_tip_per_gas(&self, base_fee: Option) -> Option { let base_fee = match base_fee { Some(base_fee) => base_fee as u128, @@ -551,7 +551,7 @@ impl Transaction { matches!(self, Self::Eip4844(_)) } - /// Returns the [TxLegacy] variant if the transaction is a legacy transaction. + /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { Self::Legacy(tx) => Some(tx), @@ -559,7 +559,7 @@ impl Transaction { } } - /// Returns the [TxEip2930] variant if the transaction is an EIP-2930 transaction. + /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { Self::Eip2930(tx) => Some(tx), @@ -567,7 +567,7 @@ impl Transaction { } } - /// Returns the [TxEip1559] variant if the transaction is an EIP-1559 transaction. + /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { Self::Eip1559(tx) => Some(tx), @@ -575,7 +575,7 @@ impl Transaction { } } - /// Returns the [TxEip4844] variant if the transaction is an EIP-4844 transaction. + /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { Self::Eip4844(tx) => Some(tx), @@ -752,7 +752,7 @@ impl TransactionSignedNoHash { /// Recover signer from signature and hash. /// - /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. pub fn recover_signer(&self) -> Option

{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -772,11 +772,11 @@ impl TransactionSignedNoHash { /// buffer before use.** /// /// Returns `None` if the transaction's signature is invalid, see also - /// [Signature::recover_signer_unchecked]. + /// [`Signature::recover_signer_unchecked`]. /// /// # Optimism /// - /// For optimism this will return [Address::ZERO] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: + /// For optimism this will return [`Address::ZERO`] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: /// This makes it possible to import pre bedrock transactions via the sender recovery stage. pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ buffer.clear(); @@ -814,7 +814,7 @@ impl TransactionSignedNoHash { /// Recovers a list of signers from a transaction list iterator /// /// Returns `None`, if some transaction's signature is invalid, see also - /// [Self::recover_signer]. + /// [`Self::recover_signer`]. pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> where T: IntoParallelIterator + IntoIterator + Send, @@ -1000,13 +1000,13 @@ impl TransactionSigned { /// Recover signer from signature and hash. /// - /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [Signature::recover_signer]. + /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [`Signature::recover_signer`]. /// /// Note: /// /// This can fail for some early ethereum mainnet transactions pre EIP-2, use - /// [Self::recover_signer_unchecked] if you want to recover the signer without ensuring that the - /// signature has a low `s` value. + /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that + /// the signature has a low `s` value. pub fn recover_signer(&self) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -1022,7 +1022,7 @@ impl TransactionSigned { /// value_. /// /// Returns `None` if the transaction's signature is invalid, see also - /// [Signature::recover_signer_unchecked]. + /// [`Signature::recover_signer_unchecked`]. pub fn recover_signer_unchecked(&self) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -1037,7 +1037,7 @@ impl TransactionSigned { /// Recovers a list of signers from a transaction list iterator. /// /// Returns `None`, if some transaction's signature is invalid, see also - /// [Self::recover_signer]. + /// [`Self::recover_signer`]. pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> where T: IntoParallelIterator + IntoIterator + Send, @@ -1053,7 +1053,7 @@ impl TransactionSigned { /// signature has a low `s` value_. /// /// Returns `None`, if some transaction's signature is invalid, see also - /// [Self::recover_signer_unchecked]. + /// [`Self::recover_signer_unchecked`]. pub fn recover_signers_unchecked<'a, T>(txes: T, num_txes: usize) -> Option> where T: IntoParallelIterator + IntoIterator, @@ -1065,7 +1065,7 @@ impl TransactionSigned { } } - /// Returns the [TransactionSignedEcRecovered] transaction with the given sender. + /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. #[inline] pub const fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { TransactionSignedEcRecovered::from_signed_transaction(self, signer) @@ -1073,7 +1073,7 @@ impl TransactionSigned { /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] /// - /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. pub fn into_ecrecovered(self) -> Option { let signer = self.recover_signer()?; Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) @@ -1083,7 +1083,7 @@ impl TransactionSigned { /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid, see also - /// [Self::recover_signer_unchecked]. + /// [`Self::recover_signer_unchecked`]. pub fn into_ecrecovered_unchecked(self) -> Option { let signer = self.recover_signer_unchecked()?; Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) @@ -1098,7 +1098,7 @@ impl TransactionSigned { /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [Self::recover_signer]. + /// [`Self::recover_signer`]. pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), @@ -1110,7 +1110,7 @@ impl TransactionSigned { /// the signature has a low `s` value_ (EIP-2). /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [Self::recover_signer_unchecked]. + /// [`Self::recover_signer_unchecked`]. pub fn try_into_ecrecovered_unchecked(self) -> Result { match self.recover_signer_unchecked() { None => Err(self), @@ -1120,7 +1120,7 @@ impl TransactionSigned { /// Returns the enveloped encoded transactions. /// - /// See also [TransactionSigned::encode_enveloped] + /// See also [`TransactionSigned::encode_enveloped`] pub fn envelope_encoded(&self) -> Bytes { let mut buf = Vec::new(); self.encode_enveloped(&mut buf); @@ -1144,8 +1144,8 @@ impl TransactionSigned { self.transaction.encode_with_signature(&self.signature, out, with_header); } - /// Output the length of the encode_inner(out, true). Note to assume that `with_header` is only - /// `true`. + /// Output the length of the `encode_inner(out`, true). Note to assume that `with_header` is + /// only `true`. pub(crate) fn payload_len_inner(&self) -> usize { match &self.transaction { Transaction::Legacy(legacy_tx) => legacy_tx.payload_len_with_signature(&self.signature), @@ -1178,7 +1178,7 @@ impl TransactionSigned { initial_tx } - /// Calculate a heuristic for the in-memory size of the [TransactionSigned]. + /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. #[inline] pub fn size(&self) -> usize { mem::size_of::() + self.transaction.size() + self.signature.size() @@ -1188,7 +1188,7 @@ impl TransactionSigned { /// /// This expects `rlp(legacy_tx)` /// - /// Refer to the docs for [Self::decode_rlp_legacy_transaction] for details on the exact + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact /// format expected. pub(crate) fn decode_rlp_legacy_transaction_tuple( data: &mut &[u8], @@ -1404,16 +1404,16 @@ impl Decodable for TransactionSigned { /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. /// /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since - /// the EIP-4844 variant of [TransactionSigned] does not include the blob sidecar. + /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. /// - /// For a method suitable for decoding pooled transactions, see [PooledTransactionsElement]. + /// For a method suitable for decoding pooled transactions, see [`PooledTransactionsElement`]. /// - /// CAUTION: Due to a quirk in [Header::decode], this method will succeed even if a typed + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed /// transaction is encoded in this format, and does not start with a RLP header: /// `tx-type || rlp(tx-data)`. /// - /// This is because [Header::decode] does not advance the buffer, and returns a length-1 string - /// header if the first byte is less than `0xf7`. + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { if buf.is_empty() { return Err(RlpError::InputTooShort) @@ -1548,7 +1548,7 @@ impl TransactionSignedEcRecovered { impl Encodable for TransactionSignedEcRecovered { /// This encodes the transaction _with_ the signature, and an rlp header. /// - /// Refer to docs for [TransactionSigned::encode] for details on the exact format. + /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. fn encode(&self, out: &mut dyn bytes::BufMut) { self.signed_transaction.encode(out) } @@ -1609,7 +1609,7 @@ pub trait FromRecoveredPooledTransaction { fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; } -/// The inverse of [TryFromRecoveredTransaction] that ensure the transaction can be sent over the +/// The inverse of [`TryFromRecoveredTransaction`] that ensure the transaction can be sent over the /// network pub trait IntoRecoveredTransaction { /// Converts to this type into a [`TransactionSignedEcRecovered`]. diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 41d47506c..6bb8ec9b8 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -31,7 +31,7 @@ pub struct TxDeposit { } impl TxDeposit { - /// Calculates a heuristic for the in-memory size of the [TxDeposit] transaction. + /// Calculates a heuristic for the in-memory size of the [`TxDeposit`] transaction. #[inline] pub fn size(&self) -> usize { mem::size_of::() + // source_hash @@ -44,7 +44,7 @@ impl TxDeposit { self.input.len() // input } - /// Decodes the inner [TxDeposit] fields from RLP bytes. + /// Decodes the inner [`TxDeposit`] fields from RLP bytes. /// /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following /// RLP fields in the following order: diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 4ab6e61fc..9c29bbc62 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -50,11 +50,11 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { - /// Tries to convert a [TransactionSigned] into a [PooledTransactionsElement]. + /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. /// /// This function used as a helper to convert from a decoded p2p broadcast message to - /// [PooledTransactionsElement]. Since [BlobTransaction] is disallowed to be broadcasted on - /// p2p, return an err if `tx` is [Transaction::Eip4844]. + /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on + /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. pub fn try_from_broadcast(tx: TransactionSigned) -> Result { match tx { TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { @@ -74,8 +74,8 @@ impl PooledTransactionsElement { } } - /// Converts from an EIP-4844 [TransactionSignedEcRecovered] to a - /// [PooledTransactionsElementEcRecovered] with the given sidecar. + /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not /// EIP-4844. @@ -138,7 +138,7 @@ impl PooledTransactionsElement { /// Recover signer from signature and hash. /// - /// Returns `None` if the transaction's signature is invalid, see also [Self::recover_signer]. + /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. pub fn recover_signer(&self) -> Option
{ self.signature().recover_signer(self.signature_hash()) } @@ -146,7 +146,7 @@ impl PooledTransactionsElement { /// Tries to recover signer and return [`PooledTransactionsElementEcRecovered`]. /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [Self::recover_signer]. + /// [`Self::recover_signer`]. pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), @@ -248,7 +248,7 @@ impl PooledTransactionsElement { TransactionSignedEcRecovered::from_signed_transaction(self.into_transaction(), signer) } - /// Returns the inner [TransactionSigned]. + /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { Self::Legacy { transaction, signature, hash } => { @@ -292,7 +292,7 @@ impl PooledTransactionsElement { /// Returns the enveloped encoded transactions. /// - /// See also [TransactionSigned::encode_enveloped] + /// See also [`TransactionSigned::encode_enveloped`] pub fn envelope_encoded(&self) -> Bytes { let mut buf = Vec::new(); self.encode_enveloped(&mut buf); @@ -338,7 +338,7 @@ impl PooledTransactionsElement { matches!(self, Self::BlobTransaction(_)) } - /// Returns the [TxLegacy] variant if the transaction is a legacy transaction. + /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { Self::Legacy { transaction, .. } => Some(transaction), @@ -346,7 +346,7 @@ impl PooledTransactionsElement { } } - /// Returns the [TxEip2930] variant if the transaction is an EIP-2930 transaction. + /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { Self::Eip2930 { transaction, .. } => Some(transaction), @@ -354,7 +354,7 @@ impl PooledTransactionsElement { } } - /// Returns the [TxEip1559] variant if the transaction is an EIP-1559 transaction. + /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { Self::Eip1559 { transaction, .. } => Some(transaction), @@ -362,7 +362,7 @@ impl PooledTransactionsElement { } } - /// Returns the [TxEip4844] variant if the transaction is an EIP-4844 transaction. + /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { Self::BlobTransaction(tx) => Some(&tx.transaction), @@ -374,12 +374,12 @@ impl PooledTransactionsElement { /// transaction. /// /// This is the number of blobs times the - /// [DATA_GAS_PER_BLOB](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Max fee per blob gas for eip4844 transaction [TxEip4844]. + /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. /// /// Returns `None` for non-eip4844 transactions. /// @@ -403,7 +403,7 @@ impl PooledTransactionsElement { } } - /// Max fee per gas for eip1559 transaction, for legacy transactions this is gas_price. + /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). pub const fn max_fee_per_gas(&self) -> u128 { @@ -417,7 +417,7 @@ impl PooledTransactionsElement { } impl Encodable for PooledTransactionsElement { - /// Encodes an enveloped post EIP-4844 [PooledTransactionsElement]. + /// Encodes an enveloped post EIP-4844 [`PooledTransactionsElement`]. /// /// For legacy transactions, this encodes the transaction as `rlp(tx-data)`. /// @@ -474,7 +474,7 @@ impl Encodable for PooledTransactionsElement { } impl Decodable for PooledTransactionsElement { - /// Decodes an enveloped post EIP-4844 [PooledTransactionsElement]. + /// Decodes an enveloped post EIP-4844 [`PooledTransactionsElement`]. /// /// CAUTION: this expects that `buf` is `rlp(tx_type || rlp(tx-data))` fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { @@ -683,8 +683,8 @@ impl PooledTransactionsElementEcRecovered { Self { transaction, signer } } - /// Converts from an EIP-4844 [TransactionSignedEcRecovered] to a - /// [PooledTransactionsElementEcRecovered] with the given sidecar. + /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns the transaction is not an EIP-4844 transaction. pub fn try_from_blob_transaction( diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index b4c82b35a..da273db36 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -30,10 +30,10 @@ pub struct BlobTransaction { } impl BlobTransaction { - /// Constructs a new [BlobTransaction] from a [TransactionSigned] and a - /// [BlobTransactionSidecar]. + /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a + /// [`BlobTransactionSidecar`]. /// - /// Returns an error if the signed transaction is not [TxEip4844] + /// Returns an error if the signed transaction is not [`TxEip4844`] pub fn try_from_signed( tx: TransactionSigned, sidecar: BlobTransactionSidecar, @@ -50,7 +50,7 @@ impl BlobTransaction { /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// See also [TxEip4844::validate_blob] + /// See also [`TxEip4844::validate_blob`] #[cfg(feature = "c-kzg")] pub fn validate( &self, @@ -59,7 +59,7 @@ impl BlobTransaction { self.transaction.validate_blob(&self.sidecar, proof_settings) } - /// Splits the [BlobTransaction] into its [TransactionSigned] and [BlobTransactionSidecar] + /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { let transaction = TransactionSigned { @@ -71,7 +71,7 @@ impl BlobTransaction { (transaction, self.sidecar) } - /// Encodes the [BlobTransaction] fields as RLP, with a tx type. If `with_header` is `false`, + /// Encodes the [`BlobTransaction`] fields as RLP, with a tx type. If `with_header` is `false`, /// the following will be encoded: /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` /// @@ -99,7 +99,7 @@ impl BlobTransaction { self.encode_inner(out); } - /// Encodes the [BlobTransaction] fields as RLP, with the following format: + /// Encodes the [`BlobTransaction`] fields as RLP, with the following format: /// `rlp([transaction_payload_body, blobs, commitments, proofs])` /// /// where `transaction_payload_body` is a list: @@ -201,7 +201,7 @@ impl BlobTransaction { blob_tx_header.length() + blob_tx_header.payload_length } - /// Decodes a [BlobTransaction] from RLP. This expects the encoding to be: + /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: /// `rlp([transaction_payload_body, blobs, commitments, proofs])` /// /// where `transaction_payload_body` is a list: diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index c68a46b61..d564c58ab 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -58,13 +58,13 @@ impl Compact for Signature { impl Signature { /// Output the length of the signature without the length of the RLP header, using the legacy - /// scheme with EIP-155 support depends on chain_id. + /// scheme with EIP-155 support depends on `chain_id`. pub(crate) fn payload_len_with_eip155_chain_id(&self, chain_id: Option) -> usize { self.v(chain_id).length() + self.r.length() + self.s.length() } /// Encode the `v`, `r`, `s` values without a RLP header. - /// Encodes the `v` value using the legacy scheme with EIP-155 support depends on chain_id. + /// Encodes the `v` value using the legacy scheme with EIP-155 support depends on `chain_id`. pub(crate) fn encode_with_eip155_chain_id( &self, out: &mut dyn alloy_rlp::BufMut, @@ -75,7 +75,7 @@ impl Signature { self.s.encode(out); } - /// Output the `v` of the signature depends on chain_id + /// Output the `v` of the signature depends on `chain_id` #[inline] #[allow(clippy::missing_const_for_fn)] pub fn v(&self, chain_id: Option) -> u64 { @@ -197,7 +197,7 @@ impl Signature { } } -/// Outputs (odd_y_parity, chain_id) from the `v` value. +/// Outputs (`odd_y_parity`, `chain_id`) from the `v` value. /// This doesn't check validity of the `v` value for optimism. #[inline] pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 975f70317..7530fda08 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -4,20 +4,20 @@ use bytes::Buf; use reth_codecs::{derive_arbitrary, Compact}; use serde::{Deserialize, Serialize}; -/// Identifier for legacy transaction, however [TxLegacy](crate::TxLegacy) this is technically not +/// Identifier for legacy transaction, however [`TxLegacy`](crate::TxLegacy) this is technically not /// typed. pub const LEGACY_TX_TYPE_ID: u8 = 0; -/// Identifier for [TxEip2930](crate::TxEip2930) transaction. +/// Identifier for [`TxEip2930`](crate::TxEip2930) transaction. pub const EIP2930_TX_TYPE_ID: u8 = 1; -/// Identifier for [TxEip1559](crate::TxEip1559) transaction. +/// Identifier for [`TxEip1559`](crate::TxEip1559) transaction. pub const EIP1559_TX_TYPE_ID: u8 = 2; -/// Identifier for [TxEip4844](crate::TxEip4844) transaction. +/// Identifier for [`TxEip4844`](crate::TxEip4844) transaction. pub const EIP4844_TX_TYPE_ID: u8 = 3; -/// Identifier for [TxDeposit](crate::TxDeposit) transaction. +/// Identifier for [`TxDeposit`](crate::TxDeposit) transaction. #[cfg(feature = "optimism")] pub const DEPOSIT_TX_TYPE_ID: u8 = 126; diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index eb9e541f9..b3f7a00be 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -43,7 +43,7 @@ impl TransactionSignedVariant { /// Returns the signer of the transaction. /// - /// If the transaction is of not of [TransactionSignedEcRecovered] it will be recovered. + /// If the transaction is of not of [`TransactionSignedEcRecovered`] it will be recovered. pub fn signer(&self) -> Option
{ match self { Self::SignedNoHash(tx) => tx.recover_signer(), @@ -52,7 +52,7 @@ impl TransactionSignedVariant { } } - /// Returns [TransactionSigned] type + /// Returns [`TransactionSigned`] type /// else None pub const fn as_signed(&self) -> Option<&TransactionSigned> { match self { @@ -70,22 +70,22 @@ impl TransactionSignedVariant { } } - /// Returns true if the transaction is of [TransactionSigned] variant + /// Returns true if the transaction is of [`TransactionSigned`] variant pub const fn is_signed(&self) -> bool { matches!(self, Self::Signed(_)) } - /// Returns true if the transaction is of [TransactionSignedNoHash] variant + /// Returns true if the transaction is of [`TransactionSignedNoHash`] variant pub const fn is_signed_no_hash(&self) -> bool { matches!(self, Self::SignedNoHash(_)) } - /// Returns true if the transaction is of [TransactionSignedEcRecovered] variant + /// Returns true if the transaction is of [`TransactionSignedEcRecovered`] variant pub const fn is_signed_ec_recovered(&self) -> bool { matches!(self, Self::SignedEcRecovered(_)) } - /// Consumes the [TransactionSignedVariant] and returns the consumed [Transaction] + /// Consumes the [`TransactionSignedVariant`] and returns the consumed [Transaction] pub fn into_raw(self) -> Transaction { match self { Self::SignedNoHash(tx) => tx.transaction, @@ -94,7 +94,7 @@ impl TransactionSignedVariant { } } - /// Consumes the [TransactionSignedVariant] and returns the consumed [TransactionSigned] + /// Consumes the [`TransactionSignedVariant`] and returns the consumed [`TransactionSigned`] pub fn into_signed(self) -> TransactionSigned { match self { Self::SignedNoHash(tx) => tx.with_hash(), @@ -103,20 +103,20 @@ impl TransactionSignedVariant { } } - /// Consumes the [TransactionSignedVariant] and converts it into a - /// [TransactionSignedEcRecovered] + /// Consumes the [`TransactionSignedVariant`] and converts it into a + /// [`TransactionSignedEcRecovered`] /// - /// If the variants is not a [TransactionSignedEcRecovered] it will recover the sender. + /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. /// /// Returns `None` if the transaction's signature is invalid pub fn into_signed_ec_recovered(self) -> Option { self.try_into_signed_ec_recovered().ok() } - /// Consumes the [TransactionSignedVariant] and converts it into a - /// [TransactionSignedEcRecovered] + /// Consumes the [`TransactionSignedVariant`] and converts it into a + /// [`TransactionSignedEcRecovered`] /// - /// If the variants is not a [TransactionSignedEcRecovered] it will recover the sender. + /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. /// /// Returns an error if the transaction's signature is invalid. pub fn try_into_signed_ec_recovered( diff --git a/crates/primitives/src/trie/subnode.rs b/crates/primitives/src/trie/subnode.rs index d151c21ef..556282871 100644 --- a/crates/primitives/src/trie/subnode.rs +++ b/crates/primitives/src/trie/subnode.rs @@ -3,7 +3,7 @@ use bytes::Buf; use reth_codecs::Compact; /// Walker sub node for storing intermediate state root calculation state in the database. -/// See [crate::stage::MerkleCheckpoint]. +/// See [`crate::stage::MerkleCheckpoint`]. #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct StoredSubNode { /// The key of the current node. diff --git a/crates/prune/src/builder.rs b/crates/prune/src/builder.rs index 8cf3e11bd..ffdb2954b 100644 --- a/crates/prune/src/builder.rs +++ b/crates/prune/src/builder.rs @@ -21,7 +21,7 @@ pub struct PrunerBuilder { prune_delete_limit: usize, /// Time a pruner job can run before timing out. timeout: Option, - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. finished_exex_height: watch::Receiver, } @@ -29,7 +29,7 @@ impl PrunerBuilder { /// Default timeout for a prune run. pub const DEFAULT_TIMEOUT: Duration = Duration::from_millis(100); - /// Creates a new [PrunerBuilder] from the given [PruneConfig]. + /// Creates a new [`PrunerBuilder`] from the given [`PruneConfig`]. pub fn new(pruner_config: PruneConfig) -> Self { Self::default() .block_interval(pruner_config.block_interval) @@ -69,7 +69,7 @@ impl PrunerBuilder { self } - /// Sets the receiver for the finished height of all ExEx's. + /// Sets the receiver for the finished height of all `ExEx`'s. pub fn finished_exex_height( mut self, finished_exex_height: watch::Receiver, diff --git a/crates/prune/src/metrics.rs b/crates/prune/src/metrics.rs index 82215969c..428c1f784 100644 --- a/crates/prune/src/metrics.rs +++ b/crates/prune/src/metrics.rs @@ -15,8 +15,8 @@ pub(crate) struct Metrics { } impl Metrics { - /// Returns existing or initializes a new instance of [PrunerSegmentMetrics] for the provided - /// [PruneSegment]. + /// Returns existing or initializes a new instance of [`PrunerSegmentMetrics`] for the provided + /// [`PruneSegment`]. pub(crate) fn get_prune_segment_metrics( &mut self, segment: PruneSegment, diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index c6e0fffae..ae6a50131 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -21,15 +21,15 @@ use std::{ use tokio::sync::watch; use tracing::debug; -/// Result of [Pruner::run] execution. +/// Result of [`Pruner::run`] execution. pub type PrunerResult = Result; -/// The pruner type itself with the result of [Pruner::run] +/// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); type PrunerStats = BTreeMap; -/// Pruning routine. Main pruning logic happens in [Pruner::run]. +/// Pruning routine. Main pruning logic happens in [`Pruner::run`]. #[derive(Debug)] pub struct Pruner { provider_factory: ProviderFactory, @@ -48,7 +48,7 @@ pub struct Pruner { prune_max_blocks_per_run: usize, /// Maximum time for a one pruner run. timeout: Option, - /// The finished height of all ExEx's. + /// The finished height of all `ExEx`'s. finished_exex_height: watch::Receiver, #[doc(hidden)] metrics: Metrics, @@ -159,10 +159,10 @@ impl Pruner { } /// Prunes the segments that the [Pruner] was initialized with, and the segments that needs to - /// be pruned according to the highest static_files. Segments are parts of the database that + /// be pruned according to the highest `static_files`. Segments are parts of the database that /// represent one or more tables. /// - /// Returns [PrunerStats], total number of entries pruned, and [PruneProgress]. + /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. fn prune_segments( &mut self, provider: &DatabaseProviderRW, @@ -248,8 +248,8 @@ impl Pruner { } /// Returns pre-configured segments that needs to be pruned according to the highest - /// static_files for [PruneSegment::Transactions], [PruneSegment::Headers] and - /// [PruneSegment::Receipts]. + /// `static_files` for [`PruneSegment::Transactions`], [`PruneSegment::Headers`] and + /// [`PruneSegment::Receipts`]. fn static_file_segments(&self) -> Vec>> { let mut segments = Vec::>>::new(); @@ -304,13 +304,13 @@ impl Pruner { } } - /// Adjusts the tip block number to the finished ExEx height. This is needed to not prune more - /// data than ExExs have processed. Depending on the height: - /// - [FinishedExExHeight::NoExExs] returns the tip block number as is as no adjustment for - /// ExExs is needed. - /// - [FinishedExExHeight::NotReady] returns `None` as not all ExExs have emitted a + /// Adjusts the tip block number to the finished `ExEx` height. This is needed to not prune more + /// data than `ExExs` have processed. Depending on the height: + /// - [`FinishedExExHeight::NoExExs`] returns the tip block number as is as no adjustment for + /// `ExExs` is needed. + /// - [`FinishedExExHeight::NotReady`] returns `None` as not all `ExExs` have emitted a /// `FinishedHeight` event yet. - /// - [FinishedExExHeight::Height] returns the finished ExEx height. + /// - [`FinishedExExHeight::Height`] returns the finished `ExEx` height. fn adjust_tip_block_number_to_finished_exex_height( &self, tip_block_number: BlockNumber, diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index ded3403ac..5830de8dd 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -11,8 +11,8 @@ use tracing::{instrument, trace}; /// Number of account history tables to prune in one step. /// -/// Account History consists of two tables: [tables::AccountChangeSets] and -/// [tables::AccountsHistory]. We want to prune them to the same block number. +/// Account History consists of two tables: [`tables::AccountChangeSets`] and +/// [`tables::AccountsHistory`]. We want to prune them to the same block number. const ACCOUNT_HISTORY_TABLES_TO_PRUNE: usize = 2; #[derive(Debug)] diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index f79c1d2c0..c914e1782 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -33,10 +33,10 @@ pub use transactions::Transactions; /// A segment represents a pruning of some portion of the data. /// /// Segments are called from [Pruner](crate::Pruner) with the following lifecycle: -/// 1. Call [Segment::prune] with `delete_limit` of [PruneInput]. -/// 2. If [Segment::prune] returned a [Some] in `checkpoint` of [PruneOutput], call -/// [Segment::save_checkpoint]. -/// 3. Subtract `pruned` of [PruneOutput] from `delete_limit` of next [PruneInput]. +/// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`]. +/// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`PruneOutput`], call +/// [`Segment::save_checkpoint`]. +/// 3. Subtract `pruned` of [`PruneOutput`] from `delete_limit` of next [`PruneInput`]. pub trait Segment: Debug + Send + Sync { /// Segment of data that's pruned. fn segment(&self) -> PruneSegment; @@ -44,14 +44,14 @@ pub trait Segment: Debug + Send + Sync { /// Prune mode with which the segment was initialized fn mode(&self) -> Option; - /// Prune data for [Self::segment] using the provided input. + /// Prune data for [`Self::segment`] using the provided input. fn prune( &self, provider: &DatabaseProviderRW, input: PruneInput, ) -> Result; - /// Save checkpoint for [Self::segment] to the database. + /// Save checkpoint for [`Self::segment`] to the database. fn save_checkpoint( &self, provider: &DatabaseProviderRW, @@ -61,7 +61,7 @@ pub trait Segment: Debug + Send + Sync { } } -/// Segment pruning input, see [Segment::prune]. +/// Segment pruning input, see [`Segment::prune`]. #[derive(Debug)] #[cfg_attr(test, derive(Clone))] pub struct PruneInput { @@ -151,7 +151,7 @@ impl PruneInput { } } -/// Segment pruning output, see [Segment::prune]. +/// Segment pruning output, see [`Segment::prune`]. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct PruneOutput { pub(crate) progress: PruneProgress, @@ -162,13 +162,13 @@ pub struct PruneOutput { } impl PruneOutput { - /// Returns a [PruneOutput] with `done = true`, `pruned = 0` and `checkpoint = None`. + /// Returns a [`PruneOutput`] with `done = true`, `pruned = 0` and `checkpoint = None`. /// Use when no pruning is needed. pub(crate) const fn done() -> Self { Self { progress: PruneProgress::Finished, pruned: 0, checkpoint: None } } - /// Returns a [PruneOutput] with `done = false`, `pruned = 0` and `checkpoint = None`. + /// Returns a [`PruneOutput`] with `done = false`, `pruned = 0` and `checkpoint = None`. /// Use when pruning is needed but cannot be done. pub(crate) const fn not_done( reason: PruneInterruptReason, @@ -187,7 +187,7 @@ pub(crate) struct PruneOutputCheckpoint { } impl PruneOutputCheckpoint { - /// Converts [PruneOutputCheckpoint] to [PruneCheckpoint] with the provided [PruneMode] + /// Converts [`PruneOutputCheckpoint`] to [`PruneCheckpoint`] with the provided [`PruneMode`] pub(crate) const fn as_prune_checkpoint(&self, prune_mode: PruneMode) -> PruneCheckpoint { PruneCheckpoint { block_number: self.block_number, tx_number: self.tx_number, prune_mode } } diff --git a/crates/prune/src/segments/set.rs b/crates/prune/src/segments/set.rs index 0843589ec..2fb64d19e 100644 --- a/crates/prune/src/segments/set.rs +++ b/crates/prune/src/segments/set.rs @@ -12,7 +12,7 @@ pub struct SegmentSet { } impl SegmentSet { - /// Returns empty [SegmentSet] collection. + /// Returns empty [`SegmentSet`] collection. pub fn new() -> Self { Self::default() } @@ -31,12 +31,12 @@ impl SegmentSet { self } - /// Consumes [SegmentSet] and returns a [Vec]. + /// Consumes [`SegmentSet`] and returns a [Vec]. pub fn into_vec(self) -> Vec>> { self.inner } - /// Creates a [SegmentSet] from an existing [PruneModes]. + /// Creates a [`SegmentSet`] from an existing [`PruneModes`]. pub fn from_prune_modes(prune_modes: PruneModes) -> Self { let PruneModes { sender_recovery, diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index 3d0f18d97..fe8ed3b1f 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -15,8 +15,8 @@ use tracing::{instrument, trace}; /// Number of storage history tables to prune in one step /// -/// Storage History consists of two tables: [tables::StorageChangeSets] and -/// [tables::StoragesHistory]. We want to prune them to the same block number. +/// Storage History consists of two tables: [`tables::StorageChangeSets`] and +/// [`tables::StoragesHistory`]. We want to prune them to the same block number. const STORAGE_HISTORY_TABLES_TO_PRUNE: usize = 2; #[derive(Debug)] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index bf0661ca3..936dac185 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -93,7 +93,7 @@ impl BlockBatchRecord { std::mem::take(&mut self.requests) } - /// Returns the [BundleRetention] for the given block based on the configured prune modes. + /// Returns the [`BundleRetention`] for the given block based on the configured prune modes. pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { if self.tip.map_or(true, |tip| { !self diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 02a9cdd2b..fc40f474a 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -61,18 +61,18 @@ impl EvmStateProvider for T { } } -/// A [Database] and [DatabaseRef] implementation that uses [EvmStateProvider] as the underlying +/// A [Database] and [`DatabaseRef`] implementation that uses [`EvmStateProvider`] as the underlying /// data source. #[derive(Debug, Clone)] pub struct StateProviderDatabase(pub DB); impl StateProviderDatabase { - /// Create new State with generic StateProvider. + /// Create new State with generic `StateProvider`. pub const fn new(db: DB) -> Self { Self(db) } - /// Consume State and return inner StateProvider. + /// Consume State and return inner `StateProvider`. pub fn into_inner(self) -> DB { self.0 } diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index e61a12b24..e3fe4403d 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -139,7 +139,7 @@ fn eip2935_block_hash_slot>( } /// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, -/// [ChainSpec], EVM. +/// [`ChainSpec`], EVM. /// /// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no /// state changes are made. diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 44b24bf01..8990b342f 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -82,7 +82,7 @@ where { /// Start responding to connections requests. /// - /// This will run on the tokio runtime until the server is stopped or the ServerHandle is + /// This will run on the tokio runtime until the server is stopped or the `ServerHandle` is /// dropped. /// /// ``` @@ -273,7 +273,7 @@ pub(crate) struct ServiceData { /// /// This is used for subscriptions. pub(crate) method_sink: MethodSink, - /// ServerConfig + /// `ServerConfig` pub(crate) server_cfg: Settings, } @@ -342,7 +342,7 @@ impl RpcServiceBuilder { } } -/// JsonRPSee service compatible with `tower`. +/// `JsonRPSee` service compatible with `tower`. /// /// # Note /// This is similar to [`hyper::service::service_fn`](https://docs.rs/hyper/latest/hyper/service/fn.service_fn.html). diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index edc615de3..d98564bda 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -20,7 +20,7 @@ pub struct RpcService { cfg: RpcServiceCfg, } -/// Configuration of the RpcService. +/// Configuration of the `RpcService`. #[allow(dead_code)] #[derive(Clone, Debug)] pub(crate) enum RpcServiceCfg { diff --git a/crates/rpc/rpc-api/src/optimism.rs b/crates/rpc/rpc-api/src/optimism.rs index 3ff7c6ce3..783bd1760 100644 --- a/crates/rpc/rpc-api/src/optimism.rs +++ b/crates/rpc/rpc-api/src/optimism.rs @@ -161,9 +161,9 @@ pub struct PeerInfo { pub enr: String, pub addresses: Vec, pub protocols: Option>, - /// 0: "NotConnected", 1: "Connected", - /// 2: "CanConnect" (gracefully disconnected) - /// 3: "CannotConnect" (tried but failed) + /// 0: "`NotConnected`", 1: "Connected", + /// 2: "`CanConnect`" (gracefully disconnected) + /// 3: "`CannotConnect`" (tried but failed) pub connectedness: u8, /// 0: "Unknown", 1: "Inbound" (if the peer contacted us) /// 2: "Outbound" (if we connected to them) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 70721ac10..5c2ad2600 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -102,7 +102,7 @@ where launch_with_eth_api(eth_api, eth_filter, engine_api, socket_addr, secret).await } -/// Configure and launch a _standalone_ auth server with existing EthApi implementation. +/// Configure and launch a _standalone_ auth server with existing `EthApi` implementation. pub async fn launch_with_eth_api( eth_api: EthApi, eth_filter: EthFilter, @@ -261,8 +261,8 @@ impl AuthServerConfigBuilder { /// Configures the JSON-RPC server /// - /// Note: this always configures an [EthSubscriptionIdProvider] - /// [IdProvider](jsonrpsee::server::IdProvider) for convenience. + /// Note: this always configures an [`EthSubscriptionIdProvider`] + /// [`IdProvider`](jsonrpsee::server::IdProvider) for convenience. pub fn with_server_config(mut self, config: ServerBuilder) -> Self { self.server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self @@ -276,7 +276,7 @@ impl AuthServerConfigBuilder { /// Configures the IPC server /// - /// Note: this always configures an [EthSubscriptionIdProvider] + /// Note: this always configures an [`EthSubscriptionIdProvider`] pub fn with_ipc_config(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self @@ -363,7 +363,7 @@ impl AuthRpcModule { /// A handle to the spawned auth server. /// -/// When this type is dropped or [AuthServerHandle::stop] has been called the server will be +/// When this type is dropped or [`AuthServerHandle::stop`] has been called the server will be /// stopped. #[derive(Clone, Debug)] #[must_use = "Server stops if dropped"] diff --git a/crates/rpc/rpc-builder/src/constants.rs b/crates/rpc/rpc-builder/src/constants.rs index 26eac7535..6b2b54cdb 100644 --- a/crates/rpc/rpc-builder/src/constants.rs +++ b/crates/rpc/rpc-builder/src/constants.rs @@ -43,6 +43,6 @@ pub const DEFAULT_IPC_ENDPOINT: &str = "/tmp/reth.ipc"; #[cfg(windows)] pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = r"\\.\pipe\reth_engine_api.ipc"; -/// The engine_api IPC endpoint +/// The `engine_api` IPC endpoint #[cfg(not(windows))] pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = "/tmp/reth_engine_api.ipc"; diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index 46ff722ac..6124e9e4e 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -10,7 +10,7 @@ pub enum CorsDomainError { WildCardNotAllowed { input: String }, } -/// Creates a [CorsLayer] from the given domains +/// Creates a [`CorsLayer`] from the given domains pub(crate) fn create_cors_layer(http_cors_domains: &str) -> Result { let cors = match http_cors_domains.trim() { "*" => CorsLayer::new() diff --git a/crates/rpc/rpc-builder/src/error.rs b/crates/rpc/rpc-builder/src/error.rs index e4c0c76e4..cc55d232c 100644 --- a/crates/rpc/rpc-builder/src/error.rs +++ b/crates/rpc/rpc-builder/src/error.rs @@ -76,7 +76,7 @@ pub enum RpcError { } impl RpcError { - /// Converts an [io::Error] to a more descriptive `RpcError`. + /// Converts an [`io::Error`] to a more descriptive `RpcError`. pub fn server_error(io_error: io::Error, kind: ServerKind) -> Self { if io_error.kind() == ErrorKind::AddrInUse { return Self::AddressAlreadyInUse { kind, error: io_error } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index bb7523fc1..afa0b9184 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -42,7 +42,7 @@ pub struct EthConfig { pub max_logs_per_response: usize, /// Gas limit for `eth_call` and call tracing RPC methods. /// - /// Defaults to [RPC_DEFAULT_GAS_CAP] + /// Defaults to [`RPC_DEFAULT_GAS_CAP`] pub rpc_gas_cap: u64, /// /// Sets TTL for stale filters diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0bb398b2a..3e83e9941 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1,20 +1,20 @@ //! Configure reth RPC. //! //! This crate contains several builder and config types that allow to configure the selection of -//! [RethRpcModule] specific to transports (ws, http, ipc). +//! [`RethRpcModule`] specific to transports (ws, http, ipc). //! -//! The [RpcModuleBuilder] is the main entrypoint for configuring all reth modules. It takes +//! The [`RpcModuleBuilder`] is the main entrypoint for configuring all reth modules. It takes //! instances of components required to start the servers, such as provider impls, network and -//! transaction pool. [RpcModuleBuilder::build] returns a [TransportRpcModules] which contains the -//! transport specific config (what APIs are available via this transport). +//! transaction pool. [`RpcModuleBuilder::build`] returns a [`TransportRpcModules`] which contains +//! the transport specific config (what APIs are available via this transport). //! -//! The [RpcServerConfig] is used to configure the [RpcServer] type which contains all transport -//! implementations (http server, ws server, ipc server). [RpcServer::start] requires the -//! [TransportRpcModules] so it can start the servers with the configured modules. +//! The [`RpcServerConfig`] is used to configure the [`RpcServer`] type which contains all transport +//! implementations (http server, ws server, ipc server). [`RpcServer::start`] requires the +//! [`TransportRpcModules`] so it can start the servers with the configured modules. //! //! # Examples //! -//! Configure only an http server with a selection of [RethRpcModule]s +//! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` //! use reth_evm::ConfigureEvm; @@ -268,7 +268,7 @@ where .await } -/// A builder type to configure the RPC module: See [RpcModule] +/// A builder type to configure the RPC module: See [`RpcModule`] /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] @@ -328,11 +328,11 @@ impl RpcModuleBuilder { provider, network, pool, executor, events, evm_config } } - /// Configure a [NoopTransactionPool] instance. + /// Configure a [`NoopTransactionPool`] instance. /// /// Caution: This will configure a pool API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which - /// requires a [TransactionPool] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] + /// which requires a [`TransactionPool`] implementation. pub fn with_noop_pool( self, ) -> RpcModuleBuilder { @@ -359,11 +359,11 @@ impl RpcModuleBuilder { provider, network, pool, executor, events, evm_config } } - /// Configure a [NoopNetwork] instance. + /// Configure a [`NoopNetwork`] instance. /// /// Caution: This will configure a network API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [EthApi] which - /// requires a [NetworkInfo] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] + /// which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, ) -> RpcModuleBuilder { @@ -390,10 +390,10 @@ impl RpcModuleBuilder { provider, network, pool, executor, events, evm_config } } - /// Configure [TokioTaskExecutor] as the task executor to use for additional tasks. + /// Configure [`TokioTaskExecutor`] as the task executor to use for additional tasks. /// /// This will spawn additional tasks directly via `tokio::task::spawn`, See - /// [TokioTaskExecutor]. + /// [`TokioTaskExecutor`]. pub fn with_tokio_executor( self, ) -> RpcModuleBuilder { @@ -451,11 +451,12 @@ where Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm + 'static, { - /// Configures all [RpcModule]s specific to the given [TransportRpcModuleConfig] which can be - /// used to start the transport server(s). + /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can + /// be used to start the transport server(s). /// - /// This behaves exactly as [RpcModuleBuilder::build] for the [TransportRpcModules], but also - /// configures the auth (engine api) server, which exposes a subset of the `eth_` namespace. + /// This behaves exactly as [`RpcModuleBuilder::build`] for the [`TransportRpcModules`], but + /// also configures the auth (engine api) server, which exposes a subset of the `eth_` + /// namespace. pub fn build_with_auth_server( self, module_config: TransportRpcModuleConfig, @@ -483,7 +484,8 @@ where (modules, auth_module, registry) } - /// Converts the builder into a [RethModuleRegistry] which can be used to create all components. + /// Converts the builder into a [`RethModuleRegistry`] which can be used to create all + /// components. /// /// This is useful for getting access to API handlers directly: /// @@ -518,10 +520,10 @@ where RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config) } - /// Configures all [RpcModule]s specific to the given [TransportRpcModuleConfig] which can be - /// used to start the transport server(s). + /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can + /// be used to start the transport server(s). /// - /// See also [RpcServer::start] + /// See also [`RpcServer::start`] pub fn build(self, module_config: TransportRpcModuleConfig) -> TransportRpcModules<()> { let mut modules = TransportRpcModules::default(); @@ -566,7 +568,7 @@ pub struct RpcModuleConfig { // === impl RpcModuleConfig === impl RpcModuleConfig { - /// Convenience method to create a new [RpcModuleConfigBuilder] + /// Convenience method to create a new [`RpcModuleConfigBuilder`] pub fn builder() -> RpcModuleConfigBuilder { RpcModuleConfigBuilder::default() } @@ -587,7 +589,7 @@ impl RpcModuleConfig { } } -/// Configures [RpcModuleConfig] +/// Configures [`RpcModuleConfig`] #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, @@ -602,7 +604,7 @@ impl RpcModuleConfigBuilder { self } - /// Consumes the type and creates the [RpcModuleConfig] + /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { let Self { eth } = self; RpcModuleConfig { eth: eth.unwrap_or_default() } @@ -628,7 +630,7 @@ impl RpcModuleConfigBuilder { /// /// # Example /// -/// Create a [RpcModuleSelection] from a selection. +/// Create a [`RpcModuleSelection`] from a selection. /// /// ``` /// use reth_rpc_builder::{RethRpcModule, RpcModuleSelection}; @@ -652,12 +654,12 @@ impl RpcModuleSelection { pub const STANDARD_MODULES: [RethRpcModule; 3] = [RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]; - /// Returns a selection of [RethRpcModule] with all [RethRpcModule::all_variants]. + /// Returns a selection of [`RethRpcModule`] with all [`RethRpcModule::all_variants`]. pub fn all_modules() -> HashSet { RethRpcModule::modules().into_iter().collect() } - /// Returns the [RpcModuleSelection::STANDARD_MODULES] as a selection. + /// Returns the [`RpcModuleSelection::STANDARD_MODULES`] as a selection. pub fn standard_modules() -> HashSet { HashSet::from(Self::STANDARD_MODULES) } @@ -669,7 +671,7 @@ impl RpcModuleSelection { Self::all_modules() } - /// Creates a new _unique_ [RpcModuleSelection::Selection] from the given items. + /// Creates a new _unique_ [`RpcModuleSelection::Selection`] from the given items. /// /// # Note /// @@ -677,7 +679,7 @@ impl RpcModuleSelection { /// /// # Example /// - /// Create a selection from the [RethRpcModule] string identifiers + /// Create a selection from the [`RethRpcModule`] string identifiers /// /// ``` /// use reth_rpc_builder::{RethRpcModule, RpcModuleSelection}; @@ -686,7 +688,7 @@ impl RpcModuleSelection { /// assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin])); /// ``` /// - /// Create a unique selection from the [RethRpcModule] string identifiers + /// Create a unique selection from the [`RethRpcModule`] string identifiers /// /// ``` /// use reth_rpc_builder::{RethRpcModule, RpcModuleSelection}; @@ -719,7 +721,7 @@ impl RpcModuleSelection { } } - /// Returns an iterator over all configured [RethRpcModule] + /// Returns an iterator over all configured [`RethRpcModule`] pub fn iter_selection(&self) -> Box + '_> { match self { Self::All => Box::new(RethRpcModule::modules().into_iter()), @@ -728,7 +730,7 @@ impl RpcModuleSelection { } } - /// Clones the set of configured [RethRpcModule]. + /// Clones the set of configured [`RethRpcModule`]. pub fn to_selection(&self) -> HashSet { match self { Self::All => Self::all_modules(), @@ -737,7 +739,7 @@ impl RpcModuleSelection { } } - /// Converts the selection into a [HashSet]. + /// Converts the selection into a [`HashSet`]. pub fn into_selection(self) -> HashSet { match self { Self::All => Self::all_modules(), @@ -878,7 +880,7 @@ pub enum RethRpcModule { Ots, /// For single non-standard `eth_` namespace call `eth_callBundle` /// - /// This is separate from [RethRpcModule::Eth] because it is a non standardized call that + /// This is separate from [`RethRpcModule::Eth`] because it is a non standardized call that /// should be opt-in. EthCallBundle, } @@ -1045,7 +1047,7 @@ impl self.modules.values().cloned().collect() } - /// Returns a merged RpcModule + /// Returns a merged `RpcModule` pub fn module(&self) -> RpcModule<()> { let mut module = RpcModule::new(()); for methods in self.modules.values().cloned() { @@ -1060,12 +1062,12 @@ impl where Network: NetworkInfo + Peers + Clone + 'static, { - /// Instantiates AdminApi + /// Instantiates `AdminApi` pub fn admin_api(&self) -> AdminApi { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } - /// Instantiates Web3Api + /// Instantiates `Web3Api` pub fn web3_api(&self) -> Web3Api { Web3Api::new(self.network.clone()) } @@ -1107,7 +1109,7 @@ where /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_eth(&mut self) -> &mut Self { let eth_api = self.eth_api(); self.modules.insert(RethRpcModule::Eth, eth_api.into_rpc().into()); @@ -1118,7 +1120,7 @@ where /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -1129,7 +1131,7 @@ where /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_debug(&mut self) -> &mut Self { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1140,7 +1142,7 @@ where /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_trace(&mut self) -> &mut Self { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1171,7 +1173,7 @@ where /// Register Net Namespace /// - /// See also [Self::eth_api] + /// See also [`Self::eth_api`] /// /// # Panics /// @@ -1184,7 +1186,7 @@ where /// Register Reth namespace /// - /// See also [Self::eth_api] + /// See also [`Self::eth_api`] /// /// # Panics /// @@ -1219,8 +1221,8 @@ where modules } - /// Populates a new [RpcModule] based on the selected [RethRpcModule]s in the given - /// [RpcModuleSelection] + /// Populates a new [`RpcModule`] based on the selected [`RethRpcModule`]s in the given + /// [`RpcModuleSelection`] pub fn module_for(&mut self, config: &RpcModuleSelection) -> RpcModule<()> { let mut module = RpcModule::new(()); let all_methods = self.reth_methods(config.iter_selection()); @@ -1230,14 +1232,14 @@ where module } - /// Returns the [Methods] for the given [RethRpcModule] + /// Returns the [Methods] for the given [`RethRpcModule`] /// /// If this is the first time the namespace is requested, a new instance of API implementation /// will be created. /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn reth_methods( &mut self, namespaces: impl Iterator, @@ -1318,19 +1320,19 @@ where .collect::>() } - /// Returns the [EthStateCache] frontend + /// Returns the [`EthStateCache`] frontend /// - /// This will spawn exactly one [EthStateCache] service if this is the first time the cache is + /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is /// requested. pub fn eth_cache(&mut self) -> EthStateCache { self.with_eth(|handlers| handlers.cache.clone()) } - /// Creates the [EthHandlers] type the first time this is called. + /// Creates the [`EthHandlers`] type the first time this is called. /// - /// This will spawn the required service tasks for [EthApi] for: - /// - [EthStateCache] - /// - [FeeHistoryCache] + /// This will spawn the required service tasks for [`EthApi`] for: + /// - [`EthStateCache`] + /// - [`FeeHistoryCache`] fn with_eth(&mut self, f: F) -> R where F: FnOnce(&EthHandlers) -> R, @@ -1409,18 +1411,18 @@ where EthHandlers { api, cache, filter, pubsub, blocking_task_pool } } - /// Returns the configured [EthHandlers] or creates it if it does not exist yet + /// Returns the configured [`EthHandlers`] or creates it if it does not exist yet /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn eth_handlers(&mut self) -> EthHandlers { self.with_eth(|handlers| handlers.clone()) } - /// Returns the configured [EthApi] or creates it if it does not exist yet + /// Returns the configured [`EthApi`] or creates it if it does not exist yet /// - /// Caution: This will spawn the necessary tasks required by the [EthApi]: [EthStateCache]. + /// Caution: This will spawn the necessary tasks required by the [`EthApi`]: [`EthStateCache`]. /// /// # Panics /// @@ -1429,57 +1431,57 @@ where self.with_eth(|handlers| handlers.api.clone()) } - /// Instantiates TraceApi + /// Instantiates `TraceApi` /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn trace_api(&mut self) -> TraceApi> { let eth = self.eth_handlers(); TraceApi::new(self.provider.clone(), eth.api, self.blocking_pool_guard.clone()) } - /// Instantiates [EthBundle] Api + /// Instantiates [`EthBundle`] Api /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn bundle_api(&mut self) -> EthBundle> { let eth_api = self.eth_api(); EthBundle::new(eth_api, self.blocking_pool_guard.clone()) } - /// Instantiates OtterscanApi + /// Instantiates `OtterscanApi` /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn otterscan_api(&mut self) -> OtterscanApi> { let eth_api = self.eth_api(); OtterscanApi::new(eth_api) } - /// Instantiates DebugApi + /// Instantiates `DebugApi` /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn debug_api(&mut self) -> DebugApi> { let eth_api = self.eth_api(); DebugApi::new(self.provider.clone(), eth_api, self.blocking_pool_guard.clone()) } - /// Instantiates NetApi + /// Instantiates `NetApi` /// /// # Panics /// - /// If called outside of the tokio runtime. See also [Self::eth_api] + /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn net_api(&mut self) -> NetApi> { let eth_api = self.eth_api(); NetApi::new(self.network.clone(), eth_api) } - /// Instantiates RethApi + /// Instantiates `RethApi` pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } @@ -1494,8 +1496,8 @@ where /// /// Http and WS share the same settings: [`ServerBuilder`]. /// -/// Once the [RpcModule] is built via [RpcModuleBuilder] the servers can be started, See also -/// [ServerBuilder::build] and [Server::start](jsonrpsee::server::Server::start). +/// Once the [`RpcModule`] is built via [`RpcModuleBuilder`] the servers can be started, See also +/// [`ServerBuilder::build`] and [`Server::start`](jsonrpsee::server::Server::start). #[derive(Default, Debug)] pub struct RpcServerConfig { /// Configs for JSON-RPC Http. @@ -1538,8 +1540,8 @@ impl RpcServerConfig { /// Configures the http server /// - /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. - /// To set a custom [IdProvider], please use [Self::with_id_provider]. + /// Note: this always configures an [`EthSubscriptionIdProvider`] [`IdProvider`] for + /// convenience. To set a custom [`IdProvider`], please use [`Self::with_id_provider`]. pub fn with_http(mut self, config: ServerBuilder) -> Self { self.http_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); @@ -1565,24 +1567,24 @@ impl RpcServerConfig { /// Configures the ws server /// - /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. - /// To set a custom [IdProvider], please use [Self::with_id_provider]. + /// Note: this always configures an [`EthSubscriptionIdProvider`] [`IdProvider`] for + /// convenience. To set a custom [`IdProvider`], please use [`Self::with_id_provider`]. pub fn with_ws(mut self, config: ServerBuilder) -> Self { self.ws_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } - /// Configures the [SocketAddr] of the http server + /// Configures the [`SocketAddr`] of the http server /// - /// Default is [Ipv4Addr::LOCALHOST] and [DEFAULT_HTTP_RPC_PORT] + /// Default is [`Ipv4Addr::LOCALHOST`] and [`DEFAULT_HTTP_RPC_PORT`] pub const fn with_http_address(mut self, addr: SocketAddr) -> Self { self.http_addr = Some(addr); self } - /// Configures the [SocketAddr] of the ws server + /// Configures the [`SocketAddr`] of the ws server /// - /// Default is [Ipv4Addr::LOCALHOST] and [DEFAULT_WS_RPC_PORT] + /// Default is [`Ipv4Addr::LOCALHOST`] and [`DEFAULT_WS_RPC_PORT`] pub const fn with_ws_address(mut self, addr: SocketAddr) -> Self { self.ws_addr = Some(addr); self @@ -1590,16 +1592,16 @@ impl RpcServerConfig { /// Configures the ipc server /// - /// Note: this always configures an [EthSubscriptionIdProvider] [IdProvider] for convenience. - /// To set a custom [IdProvider], please use [Self::with_id_provider]. + /// Note: this always configures an [`EthSubscriptionIdProvider`] [`IdProvider`] for + /// convenience. To set a custom [`IdProvider`], please use [`Self::with_id_provider`]. pub fn with_ipc(mut self, config: IpcServerBuilder) -> Self { self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default())); self } - /// Sets a custom [IdProvider] for all configured transports. + /// Sets a custom [`IdProvider`] for all configured transports. /// - /// By default all transports use [EthSubscriptionIdProvider] + /// By default all transports use [`EthSubscriptionIdProvider`] pub fn with_id_provider(mut self, id_provider: I) -> Self where I: IdProvider + Clone + 'static, @@ -1619,7 +1621,7 @@ impl RpcServerConfig { /// Configures the endpoint of the ipc server /// - /// Default is [DEFAULT_IPC_ENDPOINT] + /// Default is [`DEFAULT_IPC_ENDPOINT`] pub fn with_ipc_endpoint(mut self, path: impl Into) -> Self { self.ipc_endpoint = Some(path.into()); self @@ -1633,19 +1635,19 @@ impl RpcServerConfig { /// Returns true if any server is configured. /// - /// If no server is configured, no server will be be launched on [RpcServerConfig::start]. + /// If no server is configured, no server will be be launched on [`RpcServerConfig::start`]. pub const fn has_server(&self) -> bool { self.http_server_config.is_some() || self.ws_server_config.is_some() || self.ipc_server_config.is_some() } - /// Returns the [SocketAddr] of the http server + /// Returns the [`SocketAddr`] of the http server pub const fn http_address(&self) -> Option { self.http_addr } - /// Returns the [SocketAddr] of the ws server + /// Returns the [`SocketAddr`] of the ws server pub const fn ws_address(&self) -> Option { self.ws_addr } @@ -1655,17 +1657,17 @@ impl RpcServerConfig { self.ipc_endpoint.clone() } - /// Convenience function to do [RpcServerConfig::build] and [RpcServer::start] in one step + /// Convenience function to do [`RpcServerConfig::build`] and [`RpcServer::start`] in one step pub async fn start(self, modules: TransportRpcModules) -> Result { self.build(&modules).await?.start(modules).await } - /// Creates the [CorsLayer] if any + /// Creates the [`CorsLayer`] if any fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { cors.as_deref().map(cors::create_cors_layer).transpose() } - /// Creates the [AuthLayer] if any + /// Creates the [`AuthLayer`] if any fn maybe_jwt_layer(&self) -> Option> { self.jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } @@ -1805,7 +1807,8 @@ impl RpcServerConfig { /// /// This consumes the builder and returns a server. /// - /// Note: The server is not started and does nothing unless polled, See also [RpcServer::start] + /// Note: The server is not started and does nothing unless polled, See also + /// [`RpcServer::start`] pub async fn build(mut self, modules: &TransportRpcModules) -> Result { let mut server = RpcServer::empty(); server.ws_http = self.build_ws_http(modules).await?; @@ -1864,25 +1867,25 @@ impl TransportRpcModuleConfig { Self::default().with_ipc(ipc) } - /// Sets the [RpcModuleSelection] for the http transport. + /// Sets the [`RpcModuleSelection`] for the http transport. pub fn with_http(mut self, http: impl Into) -> Self { self.http = Some(http.into()); self } - /// Sets the [RpcModuleSelection] for the ws transport. + /// Sets the [`RpcModuleSelection`] for the ws transport. pub fn with_ws(mut self, ws: impl Into) -> Self { self.ws = Some(ws.into()); self } - /// Sets the [RpcModuleSelection] for the http transport. + /// Sets the [`RpcModuleSelection`] for the http transport. pub fn with_ipc(mut self, ipc: impl Into) -> Self { self.ipc = Some(ipc.into()); self } - /// Sets a custom [RpcModuleConfig] for the configured modules. + /// Sets a custom [`RpcModuleConfig`] for the configured modules. pub const fn with_config(mut self, config: RpcModuleConfig) -> Self { self.config = Some(config); self @@ -1913,22 +1916,22 @@ impl TransportRpcModuleConfig { self.http.is_none() && self.ws.is_none() && self.ipc.is_none() } - /// Returns the [RpcModuleSelection] for the http transport + /// Returns the [`RpcModuleSelection`] for the http transport pub const fn http(&self) -> Option<&RpcModuleSelection> { self.http.as_ref() } - /// Returns the [RpcModuleSelection] for the ws transport + /// Returns the [`RpcModuleSelection`] for the ws transport pub const fn ws(&self) -> Option<&RpcModuleSelection> { self.ws.as_ref() } - /// Returns the [RpcModuleSelection] for the ipc transport + /// Returns the [`RpcModuleSelection`] for the ipc transport pub const fn ipc(&self) -> Option<&RpcModuleSelection> { self.ipc.as_ref() } - /// Returns the [RpcModuleConfig] for the configured modules + /// Returns the [`RpcModuleConfig`] for the configured modules pub const fn config(&self) -> Option<&RpcModuleConfig> { self.config.as_ref() } @@ -1973,7 +1976,7 @@ pub struct TransportRpcModules { // === impl TransportRpcModules === impl TransportRpcModules { - /// Returns the [TransportRpcModuleConfig] used to configure this instance. + /// Returns the [`TransportRpcModuleConfig`] used to configure this instance. pub const fn module_config(&self) -> &TransportRpcModuleConfig { &self.config } @@ -2131,7 +2134,7 @@ impl RpcServer { pub const fn http_local_addr(&self) -> Option { self.ws_http.http_local_addr } - /// Return the JwtSecret of the server + /// Return the `JwtSecret` of the server pub const fn jwt(&self) -> Option { self.ws_http.jwt_secret } @@ -2192,7 +2195,8 @@ impl fmt::Debug for RpcServer { /// A handle to the spawned servers. /// -/// When this type is dropped or [RpcServerHandle::stop] has been called the server will be stopped. +/// When this type is dropped or [`RpcServerHandle::stop`] has been called the server will be +/// stopped. #[derive(Clone, Debug)] #[must_use = "Server stops if dropped"] pub struct RpcServerHandle { diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index ae13ea26a..4b638a7b3 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -78,7 +78,7 @@ struct RpcServerMetricsInner { call_metrics: HashMap<&'static str, RpcServerCallMetrics>, } -/// A [RpcServiceT] middleware that captures RPC metrics for the server. +/// A [`RpcServiceT`] middleware that captures RPC metrics for the server. /// /// This is created per connection and captures metrics for each request. #[derive(Clone)] diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 5e6e1833d..d1234973e 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -103,7 +103,7 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> .unwrap() } -/// Returns an [RpcModuleBuilder] with testing components. +/// Returns an [`RpcModuleBuilder`] with testing components. pub fn test_rpc_builder() -> RpcModuleBuilder< NoopProvider, TestPool, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 4d259ae00..c4143f93f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -63,7 +63,7 @@ where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes + 'static, { - /// Create new instance of [EngineApi]. + /// Create new instance of [`EngineApi`]. pub fn new( provider: Provider, chain_spec: Arc, @@ -364,7 +364,7 @@ where /// Returns the execution payload bodies by the range starting at `start`, containing `count` /// blocks. /// - /// WARNING: This method is associated with the BeaconBlocksByRange message in the consensus + /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus /// layer p2p specification, meaning the input should be treated as untrusted or potentially /// adversarial. /// @@ -507,13 +507,13 @@ where /// /// The payload attributes will be validated according to the engine API rules for the given /// message version: - /// * If the version is [EngineApiMessageVersion::V1], then the payload attributes will be + /// * If the version is [`EngineApiMessageVersion::V1`], then the payload attributes will be /// validated according to the Paris rules. - /// * If the version is [EngineApiMessageVersion::V2], then the payload attributes will be + /// * If the version is [`EngineApiMessageVersion::V2`], then the payload attributes will be /// validated according to the Shanghai rules, as well as the validity changes from cancun: /// /// - /// * If the version above [EngineApiMessageVersion::V3], then the payload attributes will be + /// * If the version above [`EngineApiMessageVersion::V3`], then the payload attributes will be /// validated according to the Cancun rules. async fn validate_and_execute_forkchoice( &self, diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 57318d0d6..6fc842f06 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -42,7 +42,7 @@ pub enum EngineApiError { /// The length that was requested. len: u64, }, - /// Thrown if engine_getPayloadBodiesByRangeV1 contains an invalid range + /// Thrown if `engine_getPayloadBodiesByRangeV1` contains an invalid range #[error("invalid start ({start}) or count ({count})")] InvalidBodiesRange { /// Start of the range @@ -93,7 +93,7 @@ pub enum EngineApiError { } impl EngineApiError { - /// Crates a new [EngineApiError::Other] variant. + /// Crates a new [`EngineApiError::Other`] variant. pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 3c7b5c4b2..73489b755 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -69,7 +69,7 @@ pub(crate) struct ForkchoiceUpdatedResponseMetrics { /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). pub(crate) forkchoice_updated_accepted: Counter, /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded - /// with an error type that is not a [PayloadStatusEnum]. + /// with an error type that is not a [`PayloadStatusEnum`]. pub(crate) forkchoice_updated_error: Counter, } @@ -92,7 +92,7 @@ pub(crate) struct NewPayloadStatusResponseMetrics { /// [Accepted](reth_rpc_types::engine::PayloadStatusEnum#Accepted). pub(crate) new_payload_accepted: Counter, /// The total count of new payload messages that were unsuccessful, i.e. we responded with an - /// error type that is not a [PayloadStatusEnum]. + /// error type that is not a [`PayloadStatusEnum`]. pub(crate) new_payload_error: Counter, /// The total gas of valid new payload messages received. pub(crate) new_payload_total_gas: Histogram, diff --git a/crates/rpc/rpc-layer/src/auth_client_layer.rs b/crates/rpc/rpc-layer/src/auth_client_layer.rs index e1514ed5f..94403ce4b 100644 --- a/crates/rpc/rpc-layer/src/auth_client_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_client_layer.rs @@ -7,14 +7,14 @@ use std::{ }; use tower::Layer; -/// A layer that adds a new JWT token to every request using AuthClientService. +/// A layer that adds a new JWT token to every request using `AuthClientService`. #[derive(Debug)] pub struct AuthClientLayer { secret: JwtSecret, } impl AuthClientLayer { - /// Create a new AuthClientLayer with the given `secret`. + /// Create a new `AuthClientLayer` with the given `secret`. pub const fn new(secret: JwtSecret) -> Self { Self { secret } } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index bf86d7d63..6b10ec032 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -260,7 +260,7 @@ mod tests { (status, body) } - /// Spawn a new RPC server equipped with a JwtLayer auth middleware. + /// Spawn a new RPC server equipped with a `JwtLayer` auth middleware. async fn spawn_server() -> ServerHandle { let secret = JwtSecret::from_hex(SECRET).unwrap(); let addr = format!("{AUTH_ADDR}:{AUTH_PORT}"); diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index de5a4ed2c..ae82415f0 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -32,7 +32,7 @@ pub trait DebugApiExt { /// The provider type that is used to make the requests. type Provider; - /// Same as [DebugApiClient::debug_trace_transaction] but returns the result as json. + /// Same as [`DebugApiClient::debug_trace_transaction`] but returns the result as json. fn debug_trace_transaction_json( &self, hash: B256, @@ -59,14 +59,14 @@ pub trait DebugApiExt { I: IntoIterator, B: Into + Send; - /// method for debug_traceCall + /// method for `debug_traceCall` fn debug_trace_call_json( &self, request: TransactionRequest, opts: GethDebugTracingOptions, ) -> impl Future> + Send; - /// method for debug_traceCall using raw JSON strings for the request and options. + /// method for `debug_traceCall` using raw JSON strings for the request and options. fn debug_trace_call_raw_json( &self, request_json: String, diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index c3df9832e..db8933c0a 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -53,7 +53,7 @@ pub trait TraceApiExt { /// Returns a new stream that yields the traces for the given blocks. /// - /// See also [StreamExt::buffered]. + /// See also [`StreamExt::buffered`]. fn trace_block_buffered(&self, params: I, n: usize) -> TraceBlockStream<'_> where I: IntoIterator, @@ -61,7 +61,7 @@ pub trait TraceApiExt { /// Returns a new stream that yields the traces for the given blocks. /// - /// See also [StreamExt::buffer_unordered]. + /// See also [`StreamExt::buffer_unordered`]. fn trace_block_buffered_unordered(&self, params: I, n: usize) -> TraceBlockStream<'_> where I: IntoIterator, diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index d3840b24e..7cfd57e8a 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -8,7 +8,7 @@ use reth_primitives::{ use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; /// Converts the given primitive block into a [Block] response with the given -/// [BlockTransactionsKind] +/// [`BlockTransactionsKind`] /// /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. pub fn from_block( @@ -29,7 +29,7 @@ pub fn from_block( /// total difficulty to populate its field in the rpc response. /// /// This will populate the `transactions` field with only the hashes of the transactions in the -/// block: [BlockTransactions::Hashes] +/// block: [`BlockTransactions::Hashes`] pub fn from_block_with_tx_hashes( block: BlockWithSenders, total_difficulty: U256, @@ -51,7 +51,7 @@ pub fn from_block_with_tx_hashes( /// total difficulty to populate its field in the rpc response. /// /// This will populate the `transactions` field with the _full_ -/// [Transaction](reth_rpc_types::Transaction) objects: [BlockTransactions::Full] +/// [Transaction](reth_rpc_types::Transaction) objects: [`BlockTransactions::Full`] pub fn from_block_full( mut block: BlockWithSenders, total_difficulty: U256, @@ -90,7 +90,7 @@ pub fn from_block_full( )) } -/// Converts from a [reth_primitives::SealedHeader] to a [reth_rpc_types::Header] +/// Converts from a [`reth_primitives::SealedHeader`] to a [`reth_rpc_types::Header`] /// /// # Note /// diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index e7f4ba046..dbd3e5be9 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -12,7 +12,7 @@ use reth_rpc_types::engine::{ ExecutionPayloadV4, PayloadError, }; -/// Converts [ExecutionPayloadV1] to [Block] +/// Converts [`ExecutionPayloadV1`] to [Block] pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { if payload.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { return Err(PayloadError::ExtraData(payload.extra_data)) @@ -72,7 +72,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result { // this performs the same conversion as the underlying V1 payload, but calculates the // withdrawals root and adds withdrawals @@ -83,7 +83,7 @@ pub fn try_payload_v2_to_block(payload: ExecutionPayloadV2) -> Result Result { // this performs the same conversion as the underlying V2 payload, but inserts the blob gas // used and excess blob gas @@ -95,7 +95,7 @@ pub fn try_payload_v3_to_block(payload: ExecutionPayloadV3) -> Result Result { let ExecutionPayloadV4 { payload_inner, deposit_requests, withdrawal_requests } = payload; let mut block = try_payload_v3_to_block(payload_inner)?; @@ -114,7 +114,7 @@ pub fn try_payload_v4_to_block(payload: ExecutionPayloadV4) -> Result (ExecutionPayload, Option) { if value.header.requests_root.is_some() { (ExecutionPayload::V4(block_to_payload_v4(value)), None) @@ -131,7 +131,7 @@ pub fn block_to_payload(value: SealedBlock) -> (ExecutionPayload, Option) } } -/// Converts [SealedBlock] to [ExecutionPayloadV1] +/// Converts [`SealedBlock`] to [`ExecutionPayloadV1`] pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); ExecutionPayloadV1 { @@ -152,7 +152,7 @@ pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { } } -/// Converts [SealedBlock] to [ExecutionPayloadV2] +/// Converts [`SealedBlock`] to [`ExecutionPayloadV2`] pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { let transactions = value.raw_transactions(); @@ -177,7 +177,7 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { } } -/// Converts [SealedBlock] to [ExecutionPayloadV3], and returns the parent beacon block root. +/// Converts [`SealedBlock`] to [`ExecutionPayloadV3`], and returns the parent beacon block root. pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option) { let transactions = value.raw_transactions(); @@ -209,7 +209,7 @@ pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option ExecutionPayloadV4 { let (deposit_requests, withdrawal_requests) = value.requests.take().unwrap_or_default().into_iter().fold( @@ -236,7 +236,7 @@ pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { } } -/// Converts [SealedBlock] to [ExecutionPayloadFieldV2] +/// Converts [`SealedBlock`] to [`ExecutionPayloadFieldV2`] pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 if value.withdrawals.is_some() { @@ -246,7 +246,7 @@ pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayload } } -/// Converts [ExecutionPayloadFieldV2] to [ExecutionPayload] +/// Converts [`ExecutionPayloadFieldV2`] to [`ExecutionPayload`] pub fn convert_payload_field_v2_to_payload(value: ExecutionPayloadFieldV2) -> ExecutionPayload { match value { ExecutionPayloadFieldV2::V1(payload) => ExecutionPayload::V1(payload), @@ -254,7 +254,7 @@ pub fn convert_payload_field_v2_to_payload(value: ExecutionPayloadFieldV2) -> Ex } } -/// Converts [ExecutionPayloadInputV2] to [ExecutionPayload] +/// Converts [`ExecutionPayloadInputV2`] to [`ExecutionPayload`] pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> ExecutionPayload { match value.withdrawals { Some(withdrawals) => ExecutionPayload::V2(ExecutionPayloadV2 { @@ -265,7 +265,7 @@ pub fn convert_payload_input_v2_to_payload(value: ExecutionPayloadInputV2) -> Ex } } -/// Converts [SealedBlock] to [ExecutionPayloadInputV2] +/// Converts [`SealedBlock`] to [`ExecutionPayloadInputV2`] pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayloadInputV2 { ExecutionPayloadInputV2 { withdrawals: value.withdrawals.clone().map(Withdrawals::into_inner), @@ -301,11 +301,11 @@ pub fn try_into_block( /// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and /// comparing the value with `payload.block_hash`. /// -/// Uses [try_into_block] to convert from the [ExecutionPayload] to [Block] and seals the block +/// Uses [`try_into_block`] to convert from the [`ExecutionPayload`] to [Block] and seals the block /// with its hash. /// -/// Uses [validate_block_hash] to validate the payload block hash and ultimately return the -/// [SealedBlock]. +/// Uses [`validate_block_hash`] to validate the payload block hash and ultimately return the +/// [`SealedBlock`]. pub fn try_into_sealed_block( payload: ExecutionPayload, parent_beacon_block_root: Option, @@ -318,10 +318,10 @@ pub fn try_into_sealed_block( } /// Takes the expected block hash and [Block], validating the block and converting it into a -/// [SealedBlock]. +/// [`SealedBlock`]. /// /// If the provided block hash does not match the block hash computed from the provided block, this -/// returns [PayloadError::BlockHash]. +/// returns [`PayloadError::BlockHash`]. #[inline] pub fn validate_block_hash( expected_block_hash: B256, @@ -338,7 +338,7 @@ pub fn validate_block_hash( Ok(sealed_block) } -/// Converts [Block] to [ExecutionPayloadBodyV1] +/// Converts [Block] to [`ExecutionPayloadBodyV1`] pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { let transactions = value.body.into_iter().map(|tx| { let mut out = Vec::new(); @@ -351,7 +351,7 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { } } -/// Transforms a [SealedBlock] into a [ExecutionPayloadV1] +/// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); ExecutionPayloadV1 { diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs index bb945ce8f..802ce490b 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction/mod.rs @@ -114,7 +114,7 @@ fn fill( } } -/// Convert [TransactionSignedEcRecovered] to [TransactionRequest] +/// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { let from = tx.signer(); let to = Some(tx.transaction.to().into()); diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index ed5d79bb4..d95538d1c 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -153,41 +153,41 @@ pub struct PrivacyHint { } impl PrivacyHint { - /// Sets the flag indicating inclusion of calldata and returns the modified PrivacyHint + /// Sets the flag indicating inclusion of calldata and returns the modified `PrivacyHint` /// instance. pub const fn with_calldata(mut self) -> Self { self.calldata = true; self } - /// Sets the flag indicating inclusion of contract address and returns the modified PrivacyHint - /// instance. + /// Sets the flag indicating inclusion of contract address and returns the modified + /// `PrivacyHint` instance. pub const fn with_contract_address(mut self) -> Self { self.contract_address = true; self } - /// Sets the flag indicating inclusion of logs and returns the modified PrivacyHint instance. + /// Sets the flag indicating inclusion of logs and returns the modified `PrivacyHint` instance. pub const fn with_logs(mut self) -> Self { self.logs = true; self } - /// Sets the flag indicating inclusion of function selector and returns the modified PrivacyHint - /// instance. + /// Sets the flag indicating inclusion of function selector and returns the modified + /// `PrivacyHint` instance. pub const fn with_function_selector(mut self) -> Self { self.function_selector = true; self } - /// Sets the flag indicating inclusion of hash and returns the modified PrivacyHint instance. + /// Sets the flag indicating inclusion of hash and returns the modified `PrivacyHint` instance. pub const fn with_hash(mut self) -> Self { self.hash = true; self } - /// Sets the flag indicating inclusion of transaction hash and returns the modified PrivacyHint - /// instance. + /// Sets the flag indicating inclusion of transaction hash and returns the modified + /// `PrivacyHint` instance. pub const fn with_tx_hash(mut self) -> Self { self.tx_hash = true; self @@ -223,7 +223,7 @@ impl PrivacyHint { self.tx_hash } - /// Calculates the number of hints set within the PrivacyHint instance. + /// Calculates the number of hints set within the `PrivacyHint` instance. const fn num_hints(&self) -> usize { let mut num_hints = 0; if self.calldata { @@ -381,12 +381,12 @@ pub struct SimBundleResponse { /// The gas used by the simulated block. #[serde(with = "alloy_rpc_types::serde_helpers::num::u64_via_ruint")] pub gas_used: u64, - /// Logs returned by mev_simBundle. + /// Logs returned by `mev_simBundle`. #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option>, } -/// Logs returned by mev_simBundle. +/// Logs returned by `mev_simBundle`. #[derive(Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct SimBundleLogs { diff --git a/crates/rpc/rpc-types/src/rpc.rs b/crates/rpc/rpc-types/src/rpc.rs index 4d7c08f6a..bb5ae5d77 100644 --- a/crates/rpc/rpc-types/src/rpc.rs +++ b/crates/rpc/rpc-types/src/rpc.rs @@ -10,7 +10,7 @@ pub struct RpcModules { } impl RpcModules { - /// Create a new instance of RPCModules + /// Create a new instance of `RPCModules` pub const fn new(module_map: HashMap) -> Self { Self { module_map } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 01b3fb101..ac1c56322 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -48,7 +48,7 @@ pub struct DebugApi { // === impl DebugApi === impl DebugApi { - /// Create a new instance of the [DebugApi] + /// Create a new instance of the [`DebugApi`] pub fn new(provider: Provider, eth: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { let inner = Arc::new(DebugApiInner { provider, eth_api: eth, blocking_task_guard }); Self { inner } @@ -256,8 +256,8 @@ where .await } - /// The debug_traceCall method lets you run an `eth_call` within the context of the given block - /// execution using the final state of parent block as the base. + /// The `debug_traceCall` method lets you run an `eth_call` within the context of the given + /// block execution using the final state of parent block as the base. /// /// Differences compare to `eth_call`: /// - `debug_traceCall` executes with __enabled__ basefee check, `eth_call` does not: @@ -395,7 +395,7 @@ where Ok(frame.into()) } - /// The debug_traceCallMany method lets you run an `eth_callMany` within the context of the + /// The `debug_traceCallMany` method lets you run an `eth_callMany` within the context of the /// given block execution using the first n transactions in the given block as base. /// Each following bundle increments block number by 1 and block timestamp by 12 seconds pub async fn debug_trace_call_many( diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index 7024fa35c..9534d1c7b 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -49,7 +49,7 @@ where Network: NetworkInfo + Send + Sync + 'static, EvmConfig: ConfigureEvm + 'static, { - /// Estimate gas needed for execution of the `request` at the [BlockId]. + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. pub async fn estimate_gas_at( &self, request: TransactionRequest, @@ -177,7 +177,7 @@ where /// Estimates the gas usage of the `request` with the state. /// - /// This will execute the [TransactionRequest] and find the best gas limit via binary search + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search pub fn estimate_gas_with( &self, mut cfg: CfgEnvWithHandlerCfg, @@ -374,7 +374,7 @@ where Ok(U256::from(highest_gas_limit)) } - /// Creates the AccessList for the `request` at the [BlockId] or latest. + /// Creates the `AccessList` for the `request` at the [`BlockId`] or latest. pub(crate) async fn create_access_list_at( &self, request: TransactionRequest, diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc/src/eth/api/fee_history.rs index 9184607bc..5c1085d51 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc/src/eth/api/fee_history.rs @@ -30,7 +30,7 @@ pub struct FeeHistoryCache { } impl FeeHistoryCache { - /// Creates new FeeHistoryCache instance, initialize it with the more recent data, set bounds + /// Creates new `FeeHistoryCache` instance, initialize it with the more recent data, set bounds pub fn new(eth_cache: EthStateCache, config: FeeHistoryCacheConfig) -> Self { let inner = FeeHistoryCacheInner { lower_bound: Default::default(), @@ -54,7 +54,8 @@ impl FeeHistoryCache { self.config().resolution } - /// Returns all blocks that are missing in the cache in the [lower_bound, upper_bound] range. + /// Returns all blocks that are missing in the cache in the [`lower_bound`, `upper_bound`] + /// range. /// /// This function is used to populate the cache with missing blocks, which can happen if the /// node switched to stage sync node. @@ -112,12 +113,12 @@ impl FeeHistoryCache { self.inner.lower_bound.store(lower_bound, SeqCst); } - /// Get UpperBound value for FeeHistoryCache + /// Get `UpperBound` value for `FeeHistoryCache` pub fn upper_bound(&self) -> u64 { self.inner.upper_bound.load(SeqCst) } - /// Get LowerBound value for FeeHistoryCache + /// Get `LowerBound` value for `FeeHistoryCache` pub fn lower_bound(&self) -> u64 { self.inner.lower_bound.load(SeqCst) } @@ -125,7 +126,7 @@ impl FeeHistoryCache { /// Collect fee history for given range. /// /// This function retrieves fee history entries from the cache for the specified range. - /// If the requested range (start_block to end_block) is within the cache bounds, + /// If the requested range (`start_block` to `end_block`) is within the cache bounds, /// it returns the corresponding entries. /// Otherwise it returns None. pub async fn get_history( @@ -161,14 +162,14 @@ impl FeeHistoryCache { } } -/// Settings for the [FeeHistoryCache]. +/// Settings for the [`FeeHistoryCache`]. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FeeHistoryCacheConfig { /// Max number of blocks in cache. /// - /// Default is [MAX_HEADER_HISTORY] plus some change to also serve slightly older blocks from - /// cache, since fee_history supports the entire range + /// Default is [`MAX_HEADER_HISTORY`] plus some change to also serve slightly older blocks from + /// cache, since `fee_history` supports the entire range pub max_blocks: u64, /// Percentile approximation resolution /// @@ -182,14 +183,14 @@ impl Default for FeeHistoryCacheConfig { } } -/// Container type for shared state in [FeeHistoryCache] +/// Container type for shared state in [`FeeHistoryCache`] #[derive(Debug)] struct FeeHistoryCacheInner { /// Stores the lower bound of the cache lower_bound: AtomicU64, /// Stores the upper bound of the cache upper_bound: AtomicU64, - /// Config for FeeHistoryCache, consists of resolution for percentile approximation + /// Config for `FeeHistoryCache`, consists of resolution for percentile approximation /// and max number of blocks config: FeeHistoryCacheConfig, /// Stores the entries of the cache @@ -382,7 +383,7 @@ impl FeeHistoryEntry { /// /// Returns `None` if `excess_blob_gas` is None. /// - /// See also [Self::next_block_excess_blob_gas] + /// See also [`Self::next_block_excess_blob_gas`] pub fn next_block_blob_fee(&self) -> Option { self.next_block_excess_blob_gas().map(calc_blob_gasprice) } diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs index da2b846a3..8c12e90e6 100644 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ b/crates/rpc/rpc/src/eth/api/fees.rs @@ -50,7 +50,7 @@ where /// Reports the fee history, for the given amount of blocks, up until the given newest block. /// - /// If `reward_percentiles` are provided the [FeeHistory] will include the _approximated_ + /// If `reward_percentiles` are provided the [`FeeHistory`] will include the _approximated_ /// rewards for the requested range. pub(crate) async fn fee_history( &self, diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 90d0b218e..543167ae0 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -78,7 +78,7 @@ pub trait EthApiSpec: EthTransactions + Send + Sync { /// `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. -/// These are implemented two-fold: Core functionality is implemented as [EthApiSpec] +/// These are implemented two-fold: Core functionality is implemented as [`EthApiSpec`] /// trait. Additionally, the required server implementations (e.g. [`reth_rpc_api::EthApiServer`]) /// are implemented separately in submodules. The rpc handler implementation can then delegate to /// the main impls. This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone @@ -229,14 +229,14 @@ where Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, { - /// Returns the state at the given [BlockId] enum. + /// Returns the state at the given [`BlockId`] enum. /// - /// Note: if not [BlockNumberOrTag::Pending] then this will only return canonical state. See also + /// Note: if not [`BlockNumberOrTag::Pending`] then this will only return canonical state. See also pub fn state_at_block_id(&self, at: BlockId) -> EthResult { Ok(self.provider().state_by_block_id(at)?) } - /// Returns the state at the given [BlockId] enum or the latest. + /// Returns the state at the given [`BlockId`] enum or the latest. /// /// Convenience function to interprets `None` as `BlockId::Number(BlockNumberOrTag::Latest)` pub fn state_at_block_id_or_latest( @@ -269,7 +269,7 @@ where Network: NetworkInfo + Send + Sync + 'static, EvmConfig: ConfigureEvm + Clone + 'static, { - /// Configures the [CfgEnvWithHandlerCfg] and [BlockEnv] for the pending block + /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block pub(crate) fn pending_block_env_and_cfg(&self) -> EthResult { @@ -432,10 +432,10 @@ where } } -/// The default gas limit for eth_call and adjacent calls. +/// The default gas limit for `eth_call` and adjacent calls. /// /// This is different from the default to regular 30M block gas limit -/// [ETHEREUM_BLOCK_GAS_LIMIT](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for +/// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for /// more complex calls. pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(50_000_000); diff --git a/crates/rpc/rpc/src/eth/api/optimism.rs b/crates/rpc/rpc/src/eth/api/optimism.rs index b5aa8c725..af5845014 100644 --- a/crates/rpc/rpc/src/eth/api/optimism.rs +++ b/crates/rpc/rpc/src/eth/api/optimism.rs @@ -5,7 +5,7 @@ use revm::L1BlockInfo; /// Optimism Transaction Metadata /// /// Includes the L1 fee and data gas for the tx along with the L1 -/// block info. In order to pass the [OptimismTxMeta] into the +/// block info. In order to pass the [`OptimismTxMeta`] into the /// async colored `build_transaction_receipt_with_block_receipts` /// function, a reference counter for the L1 block info is /// used so the L1 block info can be shared between receipts. @@ -20,7 +20,7 @@ pub(crate) struct OptimismTxMeta { } impl OptimismTxMeta { - /// Creates a new [OptimismTxMeta]. + /// Creates a new [`OptimismTxMeta`]. pub(crate) const fn new( l1_block_info: Option, l1_fee: Option, diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index b324ff97c..276796b13 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -26,12 +26,12 @@ use revm::{db::states::bundle_state::BundleRetention, Database, DatabaseCommit, use revm_primitives::EnvWithHandlerCfg; use std::time::Instant; -/// Configured [BlockEnv] and [CfgEnvWithHandlerCfg] for a pending block +/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block #[derive(Debug, Clone)] pub(crate) struct PendingBlockEnv { - /// Configured [CfgEnvWithHandlerCfg] for the pending block. + /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. pub(crate) cfg: CfgEnvWithHandlerCfg, - /// Configured [BlockEnv] for the pending block. + /// Configured [`BlockEnv`] for the pending block. pub(crate) block_env: BlockEnv, /// Origin block for the config pub(crate) origin: PendingBlockEnvOrigin, @@ -293,10 +293,10 @@ impl PendingBlockEnv { /// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. /// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment [CfgEnvWithHandlerCfg] -/// and [BlockEnv] to execute the pre block contract call. +/// This constructs a new [Evm](revm::Evm) with the given DB, and environment +/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] to execute the pre block contract call. /// -/// This uses [apply_beacon_root_contract_call] to ultimately apply the beacon root contract state +/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state /// change. fn pre_block_beacon_root_contract_call( db: &mut DB, @@ -332,10 +332,10 @@ where /// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block state transitions. /// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment [CfgEnvWithHandlerCfg] -/// and [BlockEnv]. +/// This constructs a new [Evm](revm::Evm) with the given DB, and environment +/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`]. /// -/// This uses [apply_blockhashes_update]. +/// This uses [`apply_blockhashes_update`]. fn pre_block_blockhashes_update + DatabaseCommit>( db: &mut DB, chain_spec: &ChainSpec, @@ -356,7 +356,7 @@ where .map_err(|err| EthApiError::Internal(err.into())) } -/// The origin for a configured [PendingBlockEnv] +/// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] pub(crate) enum PendingBlockEnvOrigin { /// The pending block as received from the CL. @@ -384,7 +384,7 @@ impl PendingBlockEnvOrigin { } } - /// Returns the [BlockId] that represents the state of the block. + /// Returns the [`BlockId`] that represents the state of the block. /// /// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely /// identify the block by its hash (latest block). @@ -397,8 +397,9 @@ impl PendingBlockEnvOrigin { /// Returns the hash of the block the pending block should be built on. /// - /// For the [PendingBlockEnvOrigin::ActualPending] this is the parent hash of the block. - /// For the [PendingBlockEnvOrigin::DerivedFromLatest] this is the hash of the _latest_ header. + /// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block. + /// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_ + /// header. fn build_target_hash(&self) -> B256 { match self { Self::ActualPending(block) => block.parent_hash, diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs index 61566a355..d7c1bafac 100644 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ b/crates/rpc/rpc/src/eth/api/state.rs @@ -38,7 +38,7 @@ where /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [BlockNumberOrTag::Pending] then this will look up the highest transaction in + /// If this is [`BlockNumberOrTag::Pending`] then this will look up the highest transaction in /// pool and return the next nonce (highest + 1). pub(crate) fn get_transaction_count( &self, diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index d9be1e91e..64d50e52e 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -52,7 +52,7 @@ use crate::eth::revm_utils::FillableTransaction; use reth_rpc_types::OptimismTransactionReceiptFields; use revm_primitives::db::{Database, DatabaseRef}; -/// Helper alias type for the state's [CacheDB] +/// Helper alias type for the state's [`CacheDB`] pub(crate) type StateCacheDB = CacheDB>; /// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace. @@ -1402,7 +1402,7 @@ where /// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for /// EIP-1559 transactions. /// - /// Returns (max_fee, priority_fee) + /// Returns (`max_fee`, `priority_fee`) pub(crate) async fn eip1559_fees( &self, max_fee_per_gas: Option, @@ -1457,7 +1457,7 @@ where Err(EthApiError::InvalidTransactionSignature) } - /// Get Transaction by [BlockId] and the index of the transaction within that Block. + /// Get Transaction by [`BlockId`] and the index of the transaction within that Block. /// /// Returns `Ok(None)` if the block does not exist, or the block as fewer transactions pub(crate) async fn transaction_by_block_and_tx_index( @@ -1549,9 +1549,9 @@ where ) } - /// Builds op metadata object using the provided [TransactionSigned], L1 block info and - /// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the - /// transaction. If the L1BlockInfo is not provided, the meta info will be empty. + /// Builds op metadata object using the provided [`TransactionSigned`], L1 block info and + /// `block_timestamp`. The `L1BlockInfo` is used to calculate the l1 fee and l1 data gas for the + /// transaction. If the `L1BlockInfo` is not provided, the meta info will be empty. #[cfg(feature = "optimism")] pub(crate) fn build_op_tx_meta( &self, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 2135b43a6..a8e088278 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -242,7 +242,7 @@ impl Clone for EthBundle { } } -/// [EthBundle] specific errors. +/// [`EthBundle`] specific errors. #[derive(Debug, thiserror::Error)] pub enum EthBundleError { /// Thrown if the bundle does not contain any transactions. @@ -252,7 +252,7 @@ pub enum EthBundleError { #[error("bundle missing blockNumber")] BundleMissingBlockNumber, /// Thrown when the blob gas usage of the blob transactions in a bundle exceed - /// [MAX_BLOB_GAS_PER_BLOCK]. + /// [`MAX_BLOB_GAS_PER_BLOCK`]. #[error("blob gas usage exceeds the limit of {MAX_BLOB_GAS_PER_BLOCK} gas per block.")] Eip4844BlobGasExceeded, } diff --git a/crates/rpc/rpc/src/eth/cache/config.rs b/crates/rpc/rpc/src/eth/cache/config.rs index d8a01cfb3..73e1c0d7c 100644 --- a/crates/rpc/rpc/src/eth/cache/config.rs +++ b/crates/rpc/rpc/src/eth/cache/config.rs @@ -24,7 +24,7 @@ pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; /// Default number of concurrent database requests. pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; -/// Settings for the [EthStateCache](crate::eth::cache::EthStateCache). +/// Settings for the [`EthStateCache`](crate::eth::cache::EthStateCache). #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EthStateCacheConfig { diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 4dbf30227..5d6ae6508 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -38,7 +38,7 @@ pub use multi_consumer::MultiConsumerLruCache; type BlockTransactionsResponseSender = oneshot::Sender>>>; -/// The type that can send the response to a requested [BlockWithSenders] +/// The type that can send the response to a requested [`BlockWithSenders`] type BlockWithSendersResponseSender = oneshot::Sender>>; /// The type that can send the response to the requested receipts of a block. @@ -70,7 +70,7 @@ pub struct EthStateCache { } impl EthStateCache { - /// Creates and returns both [EthStateCache] frontend and the memory bound service. + /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. fn create( provider: Provider, action_task_spawner: Tasks, @@ -97,9 +97,9 @@ impl EthStateCache { } /// Creates a new async LRU backed cache service task and spawns it to a new task via - /// [tokio::spawn]. + /// [`tokio::spawn`]. /// - /// See also [Self::spawn_with] + /// See also [`Self::spawn_with`] pub fn spawn( provider: Provider, config: EthStateCacheConfig, @@ -203,7 +203,7 @@ impl EthStateCache { Ok(transactions.zip(receipts)) } - /// Requests the [BlockWithSenders] for the block hash + /// Requests the [`BlockWithSenders`] for the block hash /// /// Returns `None` if the block does not exist. pub async fn get_block_with_senders( @@ -215,7 +215,7 @@ impl EthStateCache { rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } - /// Requests the [SealedBlockWithSenders] for the block hash + /// Requests the [`SealedBlockWithSenders`] for the block hash /// /// Returns `None` if the block does not exist. pub async fn get_sealed_block_with_senders( @@ -266,19 +266,19 @@ impl EthStateCache { /// A task than manages caches for data required by the `eth` rpc implementation. /// -/// It provides a caching layer on top of the given [StateProvider](reth_provider::StateProvider) +/// It provides a caching layer on top of the given [`StateProvider`](reth_provider::StateProvider) /// and keeps data fetched via the provider in memory in an LRU cache. If the requested data is /// missing in the cache it is fetched and inserted into the cache afterwards. While fetching data /// from disk is sync, this service is async since requests and data is shared via channels. /// /// This type is an endless future that listens for incoming messages from the user facing -/// [EthStateCache] via a channel. If the requested data is not cached then it spawns a new task +/// [`EthStateCache`] via a channel. If the requested data is not cached then it spawns a new task /// that does the IO and sends the result back to it. This way the caching service only /// handles messages and does LRU lookups and never blocking IO. /// /// Caution: The channel for the data is _unbounded_ it is assumed that this is mainly used by the -/// [EthApi](crate::EthApi) which is typically invoked by the RPC server, which already uses permits -/// to limit concurrent requests. +/// [`EthApi`](crate::EthApi) which is typically invoked by the RPC server, which already uses +/// permits to limit concurrent requests. #[must_use = "Type does nothing unless spawned"] pub(crate) struct EthStateCacheService< Provider, diff --git a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs index 0293840f2..cd02ecc56 100644 --- a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs +++ b/crates/rpc/rpc/src/eth/cache/multi_consumer.rs @@ -83,7 +83,7 @@ where /// /// Can fail if the element is rejected by the limiter or if we fail to grow an empty map. /// - /// See [Schnellru::insert](LruMap::insert) for more info. + /// See [`Schnellru::insert`](LruMap::insert) for more info. pub fn insert<'a>(&mut self, key: L::KeyToInsert<'a>, value: V) -> bool where L::KeyToInsert<'a>: Hash + PartialEq, diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 6e7aeb775..7a6214e52 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -125,7 +125,7 @@ pub enum EthApiError { } impl EthApiError { - /// crates a new [EthApiError::Other] variant. + /// crates a new [`EthApiError::Other`] variant. pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } @@ -636,7 +636,7 @@ pub enum SignError { /// Signer for requested account not found. #[error("unknown account")] NoAccount, - /// TypedData has invalid format. + /// `TypedData` has invalid format. #[error("given typed data is not valid")] InvalidTypedData, /// Invalid transaction request in `sign_transaction`. @@ -647,8 +647,8 @@ pub enum SignError { NoChainId, } -/// Converts the evm [ExecutionResult] into a result where `Ok` variant is the output bytes if it is -/// [ExecutionResult::Success]. +/// Converts the evm [`ExecutionResult`] into a result where `Ok` variant is the output bytes if it +/// is [`ExecutionResult::Success`]. pub(crate) fn ensure_success(result: ExecutionResult) -> EthResult { match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 3a1a651ec..9bd69c9b4 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -53,7 +53,7 @@ where /// This uses the given pool to get notified about new transactions, the provider to interact /// with the blockchain, the cache to fetch cacheable data, like the logs. /// - /// See also [EthFilterConfig]. + /// See also [`EthFilterConfig`]. /// /// This also spawns a task that periodically clears stale filters. pub fn new( @@ -97,7 +97,7 @@ where &self.inner.active_filters } - /// Endless future that [Self::clear_stale_filters] every `stale_filter_ttl` interval. + /// Endless future that [`Self::clear_stale_filters`] every `stale_filter_ttl` interval. /// Nonetheless, this endless future frees the thread at every await point. async fn watch_and_clear_stale_filters(&self) { let mut interval = tokio::time::interval(self.inner.stale_filter_ttl); diff --git a/crates/rpc/rpc/src/eth/gas_oracle.rs b/crates/rpc/rpc/src/eth/gas_oracle.rs index 344020313..b49406cf0 100644 --- a/crates/rpc/rpc/src/eth/gas_oracle.rs +++ b/crates/rpc/rpc/src/eth/gas_oracle.rs @@ -33,7 +33,7 @@ pub const DEFAULT_MAX_GAS_PRICE: U256 = U256::from_limbs([500_000_000_000u64, 0, /// The default minimum gas price, under which the sample will be ignored pub const DEFAULT_IGNORE_GAS_PRICE: U256 = U256::from_limbs([2u64, 0, 0, 0]); -/// Settings for the [GasPriceOracle] +/// Settings for the [`GasPriceOracle`] #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GasPriceOracleConfig { @@ -93,7 +93,7 @@ impl GasPriceOracle where Provider: BlockReaderIdExt + 'static, { - /// Creates and returns the [GasPriceOracle]. + /// Creates and returns the [`GasPriceOracle`]. pub fn new( provider: Provider, mut oracle_config: GasPriceOracleConfig, @@ -269,14 +269,14 @@ where } } -/// Container type for mutable inner state of the [GasPriceOracle] +/// Container type for mutable inner state of the [`GasPriceOracle`] #[derive(Debug)] struct GasPriceOracleInner { last_price: GasPriceOracleResult, lowest_effective_tip_cache: EffectiveTipLruCache, } -/// Wrapper struct for LruMap +/// Wrapper struct for `LruMap` #[derive(Deref, DerefMut)] pub struct EffectiveTipLruCache(LruMap), ByLength>); diff --git a/crates/rpc/rpc/src/eth/id_provider.rs b/crates/rpc/rpc/src/eth/id_provider.rs index 0355d714d..6691e13a9 100644 --- a/crates/rpc/rpc/src/eth/id_provider.rs +++ b/crates/rpc/rpc/src/eth/id_provider.rs @@ -1,7 +1,7 @@ use jsonrpsee::types::SubscriptionId; use std::fmt::Write; -/// An [IdProvider](jsonrpsee::core::traits::IdProvider) for ethereum subscription ids. +/// An [`IdProvider`](jsonrpsee::core::traits::IdProvider) for ethereum subscription ids. /// /// Returns new hex-string [QUANTITY](https://ethereum.org/en/developers/docs/apis/json-rpc/#quantities-encoding) ids #[derive(Debug, Clone, Copy, Default)] diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index eb41c1afb..fdfa836b9 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,4 +1,4 @@ -//! `eth_` PubSub RPC handler implementation +//! `eth_` `PubSub` RPC handler implementation use crate::{ eth::logs_utils, @@ -44,7 +44,7 @@ pub struct EthPubSub { impl EthPubSub { /// Creates a new, shareable instance. /// - /// Subscription tasks are spawned via [tokio::task::spawn] + /// Subscription tasks are spawned via [`tokio::task::spawn`] pub fn new(provider: Provider, pool: Pool, chain_events: Events, network: Network) -> Self { Self::with_spawner( provider, diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 3fc09da34..6943c6e28 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -113,14 +113,14 @@ impl FillableTransaction for TransactionSigned { } } -/// Returns the addresses of the precompiles corresponding to the SpecId. +/// Returns the addresses of the precompiles corresponding to the `SpecId`. #[inline] pub(crate) fn get_precompiles(spec_id: SpecId) -> impl IntoIterator { let spec = PrecompileSpecId::from_spec_id(spec_id); Precompiles::new(spec).addresses().copied().map(Address::from) } -/// Prepares the [EnvWithHandlerCfg] for execution. +/// Prepares the [`EnvWithHandlerCfg`] for execution. /// /// Does not commit any changes to the underlying database. /// @@ -195,7 +195,7 @@ where Ok(env) } -/// Creates a new [EnvWithHandlerCfg] to be used for executing the [TransactionRequest] in +/// Creates a new [`EnvWithHandlerCfg`] to be used for executing the [`TransactionRequest`] in /// `eth_call`. /// /// Note: this does _not_ access the Database to check the sender. @@ -208,10 +208,10 @@ pub(crate) fn build_call_evm_env( Ok(EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx)) } -/// Configures a new [TxEnv] for the [TransactionRequest] +/// Configures a new [`TxEnv`] for the [`TransactionRequest`] /// -/// All [TxEnv] fields are derived from the given [TransactionRequest], if fields are `None`, they -/// fall back to the [BlockEnv]'s settings. +/// All [`TxEnv`] fields are derived from the given [`TransactionRequest`], if fields are `None`, +/// they fall back to the [`BlockEnv`]'s settings. pub(crate) fn create_txn_env( block_env: &BlockEnv, request: TransactionRequest, @@ -277,7 +277,7 @@ pub(crate) fn create_txn_env( Ok(env) } -/// Caps the configured [TxEnv] `gas_limit` with the allowance of the caller. +/// Caps the configured [`TxEnv`] `gas_limit` with the allowance of the caller. pub(crate) fn cap_tx_gas_limit_with_caller_allowance( db: &mut DB, env: &mut TxEnv, @@ -320,7 +320,7 @@ where .unwrap_or_default()) } -/// Helper type for representing the fees of a [TransactionRequest] +/// Helper type for representing the fees of a [`TransactionRequest`] pub(crate) struct CallFees { /// EIP-1559 priority fee max_priority_fee_per_gas: Option, @@ -338,7 +338,7 @@ pub(crate) struct CallFees { // === impl CallFees === impl CallFees { - /// Ensures the fields of a [TransactionRequest] are not conflicting. + /// Ensures the fields of a [`TransactionRequest`] are not conflicting. /// /// # EIP-4844 transactions /// @@ -346,8 +346,8 @@ impl CallFees { /// If the `maxFeePerBlobGas` or `blobVersionedHashes` are set we treat it as an EIP-4844 /// transaction. /// - /// Note: Due to the `Default` impl of [BlockEnv] (Some(0)) this assumes the `block_blob_fee` is - /// always `Some` + /// Note: Due to the `Default` impl of [`BlockEnv`] (Some(0)) this assumes the `block_blob_fee` + /// is always `Some` fn ensure_fees( call_gas_price: Option, call_max_fee: Option, @@ -483,7 +483,7 @@ fn apply_block_overrides(overrides: BlockOverrides, env: &mut BlockEnv) { } } -/// Applies the given state overrides (a set of [AccountOverride]) to the [CacheDB]. +/// Applies the given state overrides (a set of [`AccountOverride`]) to the [`CacheDB`]. pub(crate) fn apply_state_overrides( overrides: StateOverride, db: &mut CacheDB, @@ -498,7 +498,7 @@ where Ok(()) } -/// Applies a single [AccountOverride] to the [CacheDB]. +/// Applies a single [`AccountOverride`] to the [`CacheDB`]. fn apply_account_override( account: Address, account_override: AccountOverride, diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/signer.rs index 9999a7dac..cffaa01f2 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/signer.rs @@ -50,14 +50,14 @@ pub(crate) struct DevSigner { #[allow(dead_code)] impl DevSigner { - /// Generates a random dev signer which satisfies [EthSigner] trait + /// Generates a random dev signer which satisfies [`EthSigner`] trait pub(crate) fn random() -> Box { let mut signers = Self::random_signers(1); signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers - /// which satisfy [EthSigner] trait + /// which satisfy [`EthSigner`] trait pub(crate) fn random_signers(num: u32) -> Vec> { let mut signers = Vec::new(); for _ in 0..num { diff --git a/crates/rpc/rpc/src/eth/utils.rs b/crates/rpc/rpc/src/eth/utils.rs index b22f970cb..a4291c4b9 100644 --- a/crates/rpc/rpc/src/eth/utils.rs +++ b/crates/rpc/rpc/src/eth/utils.rs @@ -3,9 +3,9 @@ use crate::eth::error::{EthApiError, EthResult}; use reth_primitives::{Bytes, PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -/// Recovers a [PooledTransactionsElementEcRecovered] from an enveloped encoded byte stream. +/// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. /// -/// See [PooledTransactionsElement::decode_enveloped] +/// See [`PooledTransactionsElement::decode_enveloped`] pub(crate) fn recover_raw_transaction( data: Bytes, ) -> EthResult { diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index e562ee5bf..17dc8fcb8 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -11,7 +11,7 @@ //! and can reduce overall performance of all concurrent requests handled via the jsonrpsee server. //! //! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See -//! the [EthApi] handler implementations for examples. The rpc-api traits make no use of the +//! the [`EthApi`] handler implementations for examples. The rpc-api traits make no use of the //! available jsonrpsee `blocking` attribute to give implementers more freedom because the //! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a //! lot of handlers make use of async functions, caching for example, but are also using blocking diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc/src/result.rs index 281680d90..f00c9e279 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc/src/result.rs @@ -6,7 +6,7 @@ use std::fmt::Display; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { - /// Converts the error of the [Result] to an [RpcResult] via the `Err` [Display] impl. + /// Converts the error of the [Result] to an [`RpcResult`] via the `Err` [Display] impl. fn to_rpc_result(self) -> RpcResult where Err: Display, diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 87865ef31..17925b5ab 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -24,7 +24,7 @@ impl RethApi { &self.inner.provider } - /// Create a new instance of the [RethApi] + /// Create a new instance of the [`RethApi`] pub fn new(provider: Provider, task_spawner: Box) -> Self { let inner = Arc::new(RethApiInner { provider, task_spawner }); Self { inner } diff --git a/crates/rpc/rpc/src/rpc.rs b/crates/rpc/rpc/src/rpc.rs index 771987a88..0f74730f7 100644 --- a/crates/rpc/rpc/src/rpc.rs +++ b/crates/rpc/rpc/src/rpc.rs @@ -13,7 +13,7 @@ pub struct RPCApi { } impl RPCApi { - /// Return a new RPCApi struct, with given module_map + /// Return a new `RPCApi` struct, with given `module_map` pub fn new(module_map: HashMap) -> Self { Self { rpc_modules: Arc::new(RpcModules::new(module_map)) } } diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 9c7dac6a8..e73e2adbf 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -48,7 +48,7 @@ impl TraceApi { &self.inner.provider } - /// Create a new instance of the [TraceApi] + /// Create a new instance of the [`TraceApi`] pub fn new(provider: Provider, eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { let inner = Arc::new(TraceApiInner { provider, eth_api, blocking_task_guard }); Self { inner } @@ -232,7 +232,7 @@ where /// Returns all transaction traces that match the given filter. /// - /// This is similar to [Self::trace_block] but only returns traces for transactions that match + /// This is similar to [`Self::trace_block`] but only returns traces for transactions that match /// the filter. pub async fn trace_filter( &self, @@ -447,7 +447,7 @@ where /// Returns the opcodes of all transactions in the given block. /// - /// This is the same as [Self::trace_transaction_opcode_gas] but for all transactions in a + /// This is the same as [`Self::trace_transaction_opcode_gas`] but for all transactions in a /// block. pub async fn trace_block_opcode_gas( &self, diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index 39ccc29a3..d92e157ba 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -49,7 +49,7 @@ pub struct MetricsListener { } impl MetricsListener { - /// Creates a new [MetricsListener] with the provided receiver of [MetricEvent]. + /// Creates a new [`MetricsListener`] with the provided receiver of [`MetricEvent`]. pub fn new(events_rx: UnboundedReceiver) -> Self { Self { events_rx, sync_metrics: SyncMetrics::default() } } diff --git a/crates/stages/api/src/metrics/sync_metrics.rs b/crates/stages/api/src/metrics/sync_metrics.rs index 148368f02..3548f6d91 100644 --- a/crates/stages/api/src/metrics/sync_metrics.rs +++ b/crates/stages/api/src/metrics/sync_metrics.rs @@ -12,7 +12,8 @@ pub(crate) struct SyncMetrics { } impl SyncMetrics { - /// Returns existing or initializes a new instance of [StageMetrics] for the provided [StageId]. + /// Returns existing or initializes a new instance of [`StageMetrics`] for the provided + /// [`StageId`]. pub(crate) fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics { self.stages .entry(stage_id) diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 4d44f02c6..83dbe0cf3 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -2,7 +2,7 @@ use reth_primitives::{BlockNumber, SealedHeader}; /// Determines the control flow during pipeline execution. /// -/// See [Pipeline::run_loop](crate::Pipeline::run_loop) for more information. +/// See [`Pipeline::run_loop`](crate::Pipeline::run_loop) for more information. #[derive(Debug, Eq, PartialEq)] pub enum ControlFlow { /// An unwind was requested and must be performed before continuing. diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 6b7b65b9b..a2a17e3c7 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -38,10 +38,10 @@ pub use set::*; pub(crate) type BoxedStage = Box>; /// The future that returns the owned pipeline and the result of the pipeline run. See -/// [Pipeline::run_as_fut]. +/// [`Pipeline::run_as_fut`]. pub type PipelineFut = Pin> + Send>>; -/// The pipeline type itself with the result of [Pipeline::run_as_fut] +/// The pipeline type itself with the result of [`Pipeline::run_as_fut`] pub type PipelineWithResult = (Pipeline, Result); #[cfg_attr(doc, aquamarine::aquamarine)] @@ -53,19 +53,19 @@ pub type PipelineWithResult = (Pipeline, Result { /// Provider factory. provider_factory: ProviderFactory, @@ -195,9 +195,9 @@ where /// up to the block that caused the error. /// /// Returns the control flow after it ran the pipeline. - /// This will be [ControlFlow::Continue] or [ControlFlow::NoProgress] of the _last_ stage in the - /// pipeline (for example the `Finish` stage). Or [ControlFlow::Unwind] of the stage that caused - /// the unwind. + /// This will be [`ControlFlow::Continue`] or [`ControlFlow::NoProgress`] of the _last_ stage in + /// the pipeline (for example the `Finish` stage). Or [`ControlFlow::Unwind`] of the stage + /// that caused the unwind. pub async fn run_loop(&mut self) -> Result { self.move_to_static_files()?; @@ -240,12 +240,12 @@ where /// all data from the database to static files for corresponding /// [segments](reth_primitives::static_file::StaticFileSegment), according to their [stage /// checkpoints](StageCheckpoint): - /// - [StaticFileSegment::Headers](reth_primitives::static_file::StaticFileSegment::Headers) -> - /// [StageId::Headers] - /// - [StaticFileSegment::Receipts](reth_primitives::static_file::StaticFileSegment::Receipts) - /// -> [StageId::Execution] - /// - [StaticFileSegment::Transactions](reth_primitives::static_file::StaticFileSegment::Transactions) - /// -> [StageId::Bodies] + /// - [`StaticFileSegment::Headers`](reth_primitives::static_file::StaticFileSegment::Headers) + /// -> [`StageId::Headers`] + /// - [`StaticFileSegment::Receipts`](reth_primitives::static_file::StaticFileSegment::Receipts) + /// -> [`StageId::Execution`] + /// - [`StaticFileSegment::Transactions`](reth_primitives::static_file::StaticFileSegment::Transactions) + /// -> [`StageId::Bodies`] /// /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index 01613f36c..92fb69bb8 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -201,7 +201,7 @@ where /// Disables the given stage if the given closure returns true. /// - /// See [Self::disable] + /// See [`Self::disable`] pub fn disable_if(self, stage_id: StageId, f: F) -> Self where F: FnOnce() -> bool, @@ -214,7 +214,7 @@ where /// Disables all given stages if the given closure returns true. /// - /// See [Self::disable] + /// See [`Self::disable`] pub fn disable_all_if(self, stages: &[StageId], f: F) -> Self where F: FnOnce() -> bool, diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index b344fff83..835cae45b 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -12,7 +12,7 @@ use std::{ task::{Context, Poll}, }; -/// Stage execution input, see [Stage::execute]. +/// Stage execution input, see [`Stage::execute`]. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] pub struct ExecInput { /// The target block number the stage needs to execute towards. @@ -121,7 +121,7 @@ impl ExecInput { } } -/// Stage unwind input, see [Stage::unwind]. +/// Stage unwind input, see [`Stage::unwind`]. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] pub struct UnwindInput { /// The current highest checkpoint of the stage. @@ -249,7 +249,7 @@ pub trait Stage: Send + Sync { /// [Stage] trait extension. pub trait StageExt: Stage { /// Utility extension for the `Stage` trait that invokes `Stage::poll_execute_ready` - /// with [poll_fn] context. For more information see [Stage::poll_execute_ready]. + /// with [`poll_fn`] context. For more information see [`Stage::poll_execute_ready`]. fn execute_ready( &mut self, input: ExecInput, diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 3caba0737..a8e544767 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -8,7 +8,7 @@ use std::collections::VecDeque; /// A test stage that can be used for testing. /// -/// This can be used to mock expected outputs of [Stage::execute] and [Stage::unwind] +/// This can be used to mock expected outputs of [`Stage::execute`] and [`Stage::unwind`] #[derive(Debug)] pub struct TestStage { id: StageId, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index aecd2d5f8..dfce6445d 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -678,7 +678,7 @@ mod tests { ) } - /// A helper struct for running the [BodyStage]. + /// A helper struct for running the [`BodyStage`]. pub(crate) struct BodyTestRunner { responses: HashMap, db: TestStageDB, @@ -892,7 +892,7 @@ mod tests { } } - /// A [BodyDownloader] that is backed by an internal [HashMap] for testing. + /// A [`BodyDownloader`] that is backed by an internal [`HashMap`] for testing. #[derive(Debug)] pub(crate) struct TestBodyDownloader { provider_factory: ProviderFactory>>, diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index e76b21092..9a9a29d38 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -36,30 +36,30 @@ use tracing::*; /// update history indexes. /// /// Input tables: -/// - [tables::CanonicalHeaders] get next block to execute. -/// - [tables::Headers] get for revm environment variables. -/// - [tables::HeaderTerminalDifficulties] -/// - [tables::BlockBodyIndices] to get tx number -/// - [tables::Transactions] to execute +/// - [`tables::CanonicalHeaders`] get next block to execute. +/// - [`tables::Headers`] get for revm environment variables. +/// - [`tables::HeaderTerminalDifficulties`] +/// - [`tables::BlockBodyIndices`] to get tx number +/// - [`tables::Transactions`] to execute /// -/// For state access [LatestStateProviderRef] provides us latest state and history state -/// For latest most recent state [LatestStateProviderRef] would need (Used for execution Stage): -/// - [tables::PlainAccountState] -/// - [tables::Bytecodes] -/// - [tables::PlainStorageState] +/// For state access [`LatestStateProviderRef`] provides us latest state and history state +/// For latest most recent state [`LatestStateProviderRef`] would need (Used for execution Stage): +/// - [`tables::PlainAccountState`] +/// - [`tables::Bytecodes`] +/// - [`tables::PlainStorageState`] /// /// Tables updated after state finishes execution: -/// - [tables::PlainAccountState] -/// - [tables::PlainStorageState] -/// - [tables::Bytecodes] -/// - [tables::AccountChangeSets] -/// - [tables::StorageChangeSets] +/// - [`tables::PlainAccountState`] +/// - [`tables::PlainStorageState`] +/// - [`tables::Bytecodes`] +/// - [`tables::AccountChangeSets`] +/// - [`tables::StorageChangeSets`] /// /// For unwinds we are accessing: -/// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded -/// - [tables::AccountsHistory] to remove change set and apply old values to -/// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old -/// values to [tables::PlainStorageState] +/// - [`tables::BlockBodyIndices`] get tx index to know what needs to be unwinded +/// - [`tables::AccountsHistory`] to remove change set and apply old values to +/// - [`tables::PlainAccountState`] [`tables::StoragesHistory`] to remove change set and apply old +/// values to [`tables::PlainStorageState`] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { @@ -75,7 +75,7 @@ pub struct ExecutionStage { external_clean_threshold: u64, /// Pruning configuration. prune_modes: PruneModes, - /// Handle to communicate with ExEx manager. + /// Handle to communicate with `ExEx` manager. exex_manager_handle: ExExManagerHandle, } @@ -100,7 +100,7 @@ impl ExecutionStage { /// Create an execution stage with the provided executor. /// - /// The commit threshold will be set to 10_000. + /// The commit threshold will be set to `10_000`. pub fn new_with_executor(executor_provider: E) -> Self { Self::new( executor_provider, @@ -111,7 +111,7 @@ impl ExecutionStage { ) } - /// Create new instance of [ExecutionStage] from configuration. + /// Create new instance of [`ExecutionStage`] from configuration. pub fn from_config( executor_provider: E, config: ExecutionConfig, diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index fc1ab881b..a18e8506c 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -43,7 +43,7 @@ pub struct AccountHashingStage { } impl AccountHashingStage { - /// Create new instance of [AccountHashingStage]. + /// Create new instance of [`AccountHashingStage`]. pub const fn new(config: HashingConfig, etl_config: EtlConfig) -> Self { Self { clean_threshold: config.clean_threshold, @@ -365,8 +365,8 @@ mod tests { self.commit_threshold = threshold; } - /// Iterates over PlainAccount table and checks that the accounts match the ones - /// in the HashedAccounts table + /// Iterates over `PlainAccount` table and checks that the accounts match the ones + /// in the `HashedAccounts` table pub(crate) fn check_hashed_accounts(&self) -> Result<(), TestRunnerError> { self.db.query(|tx| { let mut acc_cursor = tx.cursor_read::()?; @@ -384,7 +384,7 @@ mod tests { Ok(()) } - /// Same as check_hashed_accounts, only that checks with the old account state, + /// Same as `check_hashed_accounts`, only that checks with the old account state, /// namely, the same account with nonce - 1 and balance - 1. pub(crate) fn check_old_hashed_accounts(&self) -> Result<(), TestRunnerError> { self.db.query(|tx| { diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 0e5f44bbd..5ee2275bc 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -44,7 +44,7 @@ pub struct StorageHashingStage { } impl StorageHashingStage { - /// Create new instance of [StorageHashingStage]. + /// Create new instance of [`StorageHashingStage`]. pub const fn new(config: HashingConfig, etl_config: EtlConfig) -> Self { Self { clean_threshold: config.clean_threshold, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 6ca02a4aa..eaeada887 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -55,9 +55,9 @@ pub struct HeaderStage { consensus: Arc, /// Current sync gap. sync_gap: Option, - /// ETL collector with HeaderHash -> BlockNumber + /// ETL collector with `HeaderHash` -> `BlockNumber` hash_collector: Collector, - /// ETL collector with BlockNumber -> SealedHeader + /// ETL collector with `BlockNumber` -> `SealedHeader` header_collector: Collector, /// Returns true if the ETL collector has all necessary headers to fill the gap. is_etl_ready: bool, diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 7b41334aa..57d724833 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -29,7 +29,7 @@ pub struct IndexAccountHistoryStage { } impl IndexAccountHistoryStage { - /// Create new instance of [IndexAccountHistoryStage]. + /// Create new instance of [`IndexAccountHistoryStage`]. pub const fn new( config: IndexHistoryConfig, etl_config: EtlConfig, diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 0b3556396..e38ec784d 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -33,7 +33,7 @@ pub struct IndexStorageHistoryStage { } impl IndexStorageHistoryStage { - /// Create new instance of [IndexStorageHistoryStage]. + /// Create new instance of [`IndexStorageHistoryStage`]. pub const fn new( config: IndexHistoryConfig, etl_config: EtlConfig, diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 4286d4069..eb135d21f 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -84,17 +84,17 @@ pub enum MerkleStage { } impl MerkleStage { - /// Stage default for the [MerkleStage::Execution]. + /// Stage default for the [`MerkleStage::Execution`]. pub const fn default_execution() -> Self { Self::Execution { clean_threshold: MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD } } - /// Stage default for the [MerkleStage::Unwind]. + /// Stage default for the [`MerkleStage::Unwind`]. pub const fn default_unwind() -> Self { Self::Unwind } - /// Create new instance of [MerkleStage::Execution]. + /// Create new instance of [`MerkleStage::Execution`]. pub const fn new_execution(clean_threshold: u64) -> Self { Self::Execution { clean_threshold } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index e66527106..329f51718 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -24,7 +24,7 @@ use thiserror::Error; use tracing::*; /// Maximum amount of transactions to read from disk at one time before we flush their senders to -/// disk. Since each rayon worker will hold at most 100 transactions (WORKER_CHUNK_SIZE), we +/// disk. Since each rayon worker will hold at most 100 transactions (`WORKER_CHUNK_SIZE`), we /// effectively max limit each batch to 1000 channels in memory. const BATCH_SIZE: usize = 100_000; @@ -42,7 +42,7 @@ pub struct SenderRecoveryStage { } impl SenderRecoveryStage { - /// Create new instance of [SenderRecoveryStage]. + /// Create new instance of [`SenderRecoveryStage`]. pub const fn new(config: SenderRecoveryConfig) -> Self { Self { commit_threshold: config.commit_threshold } } @@ -506,10 +506,10 @@ mod tests { /// # Panics /// - /// 1. If there are any entries in the [tables::TransactionSenders] table above a given + /// 1. If there are any entries in the [`tables::TransactionSenders`] table above a given /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionSenders] is not empty. + /// [`tables::TransactionSenders`] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 27e8e68cf..fea8f2c0c 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -44,7 +44,7 @@ impl Default for TransactionLookupStage { } impl TransactionLookupStage { - /// Create new instance of [TransactionLookupStage]. + /// Create new instance of [`TransactionLookupStage`]. pub const fn new( config: TransactionLookupConfig, etl_config: EtlConfig, @@ -427,10 +427,10 @@ mod tests { /// # Panics /// - /// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given - /// block number. + /// 1. If there are any entries in the [`tables::TransactionHashNumbers`] table above a + /// given block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionHashNumbers] is not empty. + /// [`tables::TransactionHashNumbers`] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 17650287e..d0003ff12 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -42,7 +42,7 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { output: Option, ) -> Result<(), TestRunnerError>; - /// Run [Stage::execute] and return a receiver for the result. + /// Run [`Stage::execute`] and return a receiver for the result. fn execute(&self, input: ExecInput) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let (db, mut stage) = (self.db().factory.clone(), self.stage()); @@ -58,7 +58,7 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { rx } - /// Run a hook after [Stage::execute]. Required for Headers & Bodies stages. + /// Run a hook after [`Stage::execute`]. Required for Headers & Bodies stages. async fn after_execution(&self, _seed: Self::Seed) -> Result<(), TestRunnerError> { Ok(()) } @@ -68,7 +68,7 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner { /// Validate the unwind fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError>; - /// Run [Stage::unwind] and return a receiver for the result. + /// Run [`Stage::unwind`] and return a receiver for the result. async fn unwind(&self, input: UnwindInput) -> Result { let (tx, rx) = oneshot::channel(); let (db, mut stage) = (self.db().factory.clone(), self.stage()); @@ -81,7 +81,7 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner { rx.await.unwrap() } - /// Run a hook before [Stage::unwind]. Required for MerkleStage. + /// Run a hook before [`Stage::unwind`]. Required for `MerkleStage`. fn before_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { Ok(()) } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index c92ccc15b..01593eb30 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -32,7 +32,7 @@ pub struct TestStageDB { } impl Default for TestStageDB { - /// Create a new instance of [TestStageDB] + /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); Self { @@ -101,7 +101,7 @@ impl TestStageDB { } /// Check that there is no table entry above a given - /// number by [Table::Key] + /// number by [`Table::Key`] pub fn ensure_no_entry_above(&self, num: u64, mut selector: F) -> ProviderResult<()> where T: Table, @@ -117,7 +117,7 @@ impl TestStageDB { } /// Check that there is no table entry above a given - /// number by [Table::Value] + /// number by [`Table::Value`] pub fn ensure_no_entry_above_by_value( &self, num: u64, @@ -189,7 +189,7 @@ impl TestStageDB { Ok(()) } - /// Insert ordered collection of [SealedHeader] into the corresponding static file and tables + /// Insert ordered collection of [`SealedHeader`] into the corresponding static file and tables /// that are supposed to be populated by the headers stage. pub fn insert_headers<'a, I>(&self, headers: I) -> ProviderResult<()> where @@ -200,7 +200,7 @@ impl TestStageDB { /// Inserts total difficulty of headers into the corresponding static file and tables. /// - /// Superset functionality of [TestStageDB::insert_headers]. + /// Superset functionality of [`TestStageDB::insert_headers`]. pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()> where I: IntoIterator, @@ -208,10 +208,10 @@ impl TestStageDB { self.insert_headers_inner::(headers) } - /// Insert ordered collection of [SealedBlock] into corresponding tables. - /// Superset functionality of [TestStageDB::insert_headers]. + /// Insert ordered collection of [`SealedBlock`] into corresponding tables. + /// Superset functionality of [`TestStageDB::insert_headers`]. /// - /// If tx_offset is set to `None`, then transactions will be stored on static files, otherwise + /// If `tx_offset` is set to `None`, then transactions will be stored on static files, otherwise /// database. /// /// Assumes that there's a single transition for each transaction (i.e. no block rewards). @@ -308,7 +308,7 @@ impl TestStageDB { }) } - /// Insert collection of ([TxNumber], [Receipt]) into the corresponding table. + /// Insert collection of ([`TxNumber`], [Receipt]) into the corresponding table. pub fn insert_receipts(&self, receipts: I) -> ProviderResult<()> where I: IntoIterator, @@ -321,8 +321,8 @@ impl TestStageDB { }) } - /// Insert collection of ([TxNumber], [Receipt]) organized by respective block numbers into the - /// corresponding table or static file segment. + /// Insert collection of ([`TxNumber`], [Receipt]) organized by respective block numbers into + /// the corresponding table or static file segment. pub fn insert_receipts_by_block( &self, receipts: I, @@ -413,7 +413,7 @@ impl TestStageDB { }) } - /// Insert collection of [ChangeSet] into corresponding tables. + /// Insert collection of [`ChangeSet`] into corresponding tables. pub fn insert_changesets( &self, changesets: I, diff --git a/crates/static-file-types/src/filters.rs b/crates/static-file-types/src/filters.rs index cc844468e..b6935fbb5 100644 --- a/crates/static-file-types/src/filters.rs +++ b/crates/static-file-types/src/filters.rs @@ -3,7 +3,7 @@ use strum::AsRefStr; #[derive(Debug, Copy, Clone)] /// Static File filters. pub enum Filters { - /// Static File uses filters with [InclusionFilter] and [PerfectHashingFunction]. + /// Static File uses filters with [`InclusionFilter`] and [`PerfectHashingFunction`]. WithFilters(InclusionFilter, PerfectHashingFunction), /// Static File doesn't use any filters. WithoutFilters, diff --git a/crates/static-file-types/src/segment.rs b/crates/static-file-types/src/segment.rs index 666d47258..b325e3e9e 100644 --- a/crates/static-file-types/src/segment.rs +++ b/crates/static-file-types/src/segment.rs @@ -103,8 +103,8 @@ impl StaticFileSegment { /// Parses a filename into a `StaticFileSegment` and its expected block range. /// /// The filename is expected to follow the format: - /// "static_file_{segment}_{block_start}_{block_end}". This function checks - /// for the correct prefix ("static_file"), and then parses the segment and the inclusive + /// "`static_file`_{segment}_{`block_start`}_{`block_end`}". This function checks + /// for the correct prefix ("`static_file`"), and then parses the segment and the inclusive /// ranges for blocks. It ensures that the start of each range is less than or equal to the /// end. /// @@ -277,7 +277,7 @@ impl SegmentHeader { }; } - /// Sets a new block_range. + /// Sets a new `block_range`. pub fn set_block_range(&mut self, block_start: BlockNumber, block_end: BlockNumber) { if let Some(block_range) = &mut self.block_range { block_range.start = block_start; @@ -287,7 +287,7 @@ impl SegmentHeader { } } - /// Sets a new tx_range. + /// Sets a new `tx_range`. pub fn set_tx_range(&mut self, tx_start: TxNumber, tx_end: TxNumber) { if let Some(tx_range) = &mut self.tx_range { tx_range.start = tx_start; diff --git a/crates/static-file/src/event.rs b/crates/static-file/src/event.rs index 1a2ca31b2..a11333ce5 100644 --- a/crates/static-file/src/event.rs +++ b/crates/static-file/src/event.rs @@ -1,7 +1,7 @@ use crate::StaticFileTargets; use std::time::Duration; -/// An event emitted by a [StaticFileProducer][crate::StaticFileProducer]. +/// An event emitted by a [`StaticFileProducer`][crate::StaticFileProducer]. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StaticFileProducerEvent { /// Emitted when static file producer started running. diff --git a/crates/static-file/src/segments/headers.rs b/crates/static-file/src/segments/headers.rs index 2c7e62e50..253829772 100644 --- a/crates/static-file/src/segments/headers.rs +++ b/crates/static-file/src/segments/headers.rs @@ -11,7 +11,7 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{ops::RangeInclusive, path::Path}; -/// Static File segment responsible for [StaticFileSegment::Headers] part of data. +/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. #[derive(Debug, Default)] pub struct Headers; diff --git a/crates/static-file/src/segments/mod.rs b/crates/static-file/src/segments/mod.rs index 581a2cba7..df6b8da73 100644 --- a/crates/static-file/src/segments/mod.rs +++ b/crates/static-file/src/segments/mod.rs @@ -1,4 +1,4 @@ -//! StaticFile segment implementations and utilities. +//! `StaticFile` segment implementations and utilities. mod transactions; pub use transactions::Transactions; @@ -33,8 +33,8 @@ pub trait Segment: Send + Sync { /// Returns the [`StaticFileSegment`]. fn segment(&self) -> StaticFileSegment; - /// Move data to static files for the provided block range. [StaticFileProvider] will handle the - /// management of and writing to files. + /// Move data to static files for the provided block range. [`StaticFileProvider`] will handle + /// the management of and writing to files. fn copy_to_static_files( &self, provider: DatabaseProviderRO, diff --git a/crates/static-file/src/segments/receipts.rs b/crates/static-file/src/segments/receipts.rs index 85b4863d8..70ce310db 100644 --- a/crates/static-file/src/segments/receipts.rs +++ b/crates/static-file/src/segments/receipts.rs @@ -14,7 +14,7 @@ use reth_provider::{ use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::RangeInclusive, path::Path}; -/// Static File segment responsible for [StaticFileSegment::Receipts] part of data. +/// Static File segment responsible for [`StaticFileSegment::Receipts`] part of data. #[derive(Debug, Default)] pub struct Receipts; diff --git a/crates/static-file/src/segments/transactions.rs b/crates/static-file/src/segments/transactions.rs index ba1745077..2ac9f0fb3 100644 --- a/crates/static-file/src/segments/transactions.rs +++ b/crates/static-file/src/segments/transactions.rs @@ -14,7 +14,7 @@ use reth_provider::{ use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::RangeInclusive, path::Path}; -/// Static File segment responsible for [StaticFileSegment::Transactions] part of data. +/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. #[derive(Debug, Default)] pub struct Transactions; @@ -23,8 +23,8 @@ impl Segment for Transactions { StaticFileSegment::Transactions } - /// Write transactions from database table [tables::Transactions] to static files with segment - /// [StaticFileSegment::Transactions] for the provided block range. + /// Write transactions from database table [`tables::Transactions`] to static files with segment + /// [`StaticFileSegment::Transactions`] for the provided block range. fn copy_to_static_files( &self, provider: DatabaseProviderRO, diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index 549398e03..982ae7af1 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -18,19 +18,19 @@ use std::{ }; use tracing::{debug, trace}; -/// Result of [StaticFileProducerInner::run] execution. +/// Result of [`StaticFileProducerInner::run`] execution. pub type StaticFileProducerResult = ProviderResult; -/// The [StaticFileProducer] instance itself with the result of [StaticFileProducerInner::run] +/// The [`StaticFileProducer`] instance itself with the result of [`StaticFileProducerInner::run`] pub type StaticFileProducerWithResult = (StaticFileProducer, StaticFileProducerResult); -/// Static File producer. It's a wrapper around [StaticFileProducer] that allows to share it +/// Static File producer. It's a wrapper around [`StaticFileProducer`] that allows to share it /// between threads. #[derive(Debug, Clone)] pub struct StaticFileProducer(Arc>>); impl StaticFileProducer { - /// Creates a new [StaticFileProducer]. + /// Creates a new [`StaticFileProducer`]. pub fn new( provider_factory: ProviderFactory, static_file_provider: StaticFileProvider, @@ -52,7 +52,8 @@ impl Deref for StaticFileProducer { } } -/// Static File producer routine. See [StaticFileProducerInner::run] for more detailed description. +/// Static File producer routine. See [`StaticFileProducerInner::run`] for more detailed +/// description. #[derive(Debug)] pub struct StaticFileProducerInner { /// Provider factory @@ -60,8 +61,8 @@ pub struct StaticFileProducerInner { /// Static File provider static_file_provider: StaticFileProvider, /// Pruning configuration for every part of the data that can be pruned. Set by user, and - /// needed in [StaticFileProducerInner] to prevent attempting to move prunable data to static - /// files. See [StaticFileProducerInner::get_static_file_targets]. + /// needed in [`StaticFileProducerInner`] to prevent attempting to move prunable data to static + /// files. See [`StaticFileProducerInner::get_static_file_targets`]. prune_modes: PruneModes, event_sender: EventSender, } @@ -114,16 +115,16 @@ impl StaticFileProducerInner { } } - /// Listen for events on the static_file_producer. + /// Listen for events on the `static_file_producer`. pub fn events(&self) -> EventStream { self.event_sender.new_listener() } - /// Run the static_file_producer. + /// Run the `static_file_producer`. /// - /// For each [Some] target in [StaticFileTargets], initializes a corresponding [Segment] and - /// runs it with the provided block range using [StaticFileProvider] and a read-only - /// database transaction from [ProviderFactory]. All segments are run in parallel. + /// For each [Some] target in [`StaticFileTargets`], initializes a corresponding [Segment] and + /// runs it with the provided block range using [`StaticFileProvider`] and a read-only + /// database transaction from [`ProviderFactory`]. All segments are run in parallel. /// /// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic /// lives in the `prune` crate. @@ -184,8 +185,8 @@ impl StaticFileProducerInner { } /// Returns a static file targets at the provided finalized block numbers per segment. - /// The target is determined by the check against highest static_files using - /// [StaticFileProvider::get_highest_static_files]. + /// The target is determined by the check against highest `static_files` using + /// [`StaticFileProvider::get_highest_static_files`]. pub fn get_static_file_targets( &self, finalized_block_numbers: HighestStaticFiles, diff --git a/crates/storage/codecs/derive/src/compact/enums.rs b/crates/storage/codecs/derive/src/compact/enums.rs index 64de6c99f..97e2f0e63 100644 --- a/crates/storage/codecs/derive/src/compact/enums.rs +++ b/crates/storage/codecs/derive/src/compact/enums.rs @@ -48,8 +48,8 @@ impl<'a> EnumHandler<'a> { /// Generates `from_compact` code for an enum variant. /// - /// `fields_iterator` might look something like \[VariantUnit, VariantUnnamedField, Field, - /// VariantUnit...\]. + /// `fields_iterator` might look something like \[`VariantUnit`, `VariantUnnamedField`, Field, + /// `VariantUnit`...\]. pub fn from(&mut self, variant_name: &str, ident: &Ident) { let variant_name = format_ident!("{variant_name}"); let current_variant_index = self.current_variant_index; @@ -91,8 +91,8 @@ impl<'a> EnumHandler<'a> { /// Generates `to_compact` code for an enum variant. /// - /// `fields_iterator` might look something like [VariantUnit, VariantUnnamedField, Field, - /// VariantUnit...]. + /// `fields_iterator` might look something like [`VariantUnit`, `VariantUnnamedField`, Field, + /// `VariantUnit`...]. pub fn to(&mut self, variant_name: &str, ident: &Ident) { let variant_name = format_ident!("{variant_name}"); let current_variant_index = self.current_variant_index; diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 24757d8e6..46a165023 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -25,7 +25,7 @@ pub(crate) fn generate_flag_struct( .iter() .filter_map(|f| { if let FieldTypes::StructField(f) = f { - return Some(f) + return Some(f); } None }) @@ -36,7 +36,7 @@ pub(crate) fn generate_flag_struct( }; if total_bits == 0 { - return placeholder_flag_struct(ident, &flags_ident) + return placeholder_flag_struct(ident, &flags_ident); } let (total_bytes, unused_bits) = pad_flag_struct(total_bits, &mut field_flags); diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index c28bf8d1a..889c68b00 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -89,7 +89,7 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To let ident = format_ident!("{name}"); return Some(quote! { #ident: #ident, - }) + }); } None }); diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index d4732256e..40b7b2f31 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -156,7 +156,7 @@ fn should_use_alt_impl(ftype: &String, segment: &syn::PathSegment) -> bool { ] .contains(&path.ident.to_string().as_str()) { - return true + return true; } } } @@ -179,7 +179,7 @@ pub fn get_bit_size(ftype: &str) -> u8 { } /// Given the field type in a string format, checks if its type should be added to the -/// StructFlags. +/// `StructFlags`. pub fn is_flag_type(ftype: &str) -> bool { get_bit_size(ftype) > 0 } diff --git a/crates/storage/codecs/derive/src/compact/structs.rs b/crates/storage/codecs/derive/src/compact/structs.rs index 899e589db..a4f32364e 100644 --- a/crates/storage/codecs/derive/src/compact/structs.rs +++ b/crates/storage/codecs/derive/src/compact/structs.rs @@ -68,7 +68,7 @@ impl<'a> StructHandler<'a> { }) } - return + return; } let name = format_ident!("{name}"); diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 32e793637..7c668a6cb 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -28,9 +28,9 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream { compact::derive(input, is_zstd) } -/// This code implements the main codec. If the codec supports it, it will also provide the [derive_arbitrary()] function, which automatically implements arbitrary traits and roundtrip fuzz tests. +/// This code implements the main codec. If the codec supports it, it will also provide the [`derive_arbitrary()`] function, which automatically implements arbitrary traits and roundtrip fuzz tests. /// -/// If you prefer to manually implement the arbitrary traits, you can still use the [add_arbitrary_tests()] function to add arbitrary fuzz tests. +/// If you prefer to manually implement the arbitrary traits, you can still use the [`add_arbitrary_tests()`] function to add arbitrary fuzz tests. /// /// Example usage: /// * `#[main_codec(rlp)]`: will implement `derive_arbitrary(rlp)` or `derive_arbitrary(compact, rlp)`, if `compact` is the `main_codec`. diff --git a/crates/storage/codecs/src/alloy/txkind.rs b/crates/storage/codecs/src/alloy/txkind.rs index 14ab51236..b2d55400f 100644 --- a/crates/storage/codecs/src/alloy/txkind.rs +++ b/crates/storage/codecs/src/alloy/txkind.rs @@ -1,4 +1,4 @@ -//! Native Compact codec impl for primitive alloy [TxKind]. +//! Native Compact codec impl for primitive alloy [`TxKind`]. use crate::Compact; use alloy_primitives::{Address, TxKind}; diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index ee21883fe..717dd8b6b 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -26,7 +26,7 @@ criterion_main!(benches); /// * `put_sorted`: Table is preloaded with rows (same as batch size). Sorts during benchmark. /// * `put_unsorted`: Table is preloaded with rows (same as batch size). /// -/// It does the above steps with different batches of rows. 10_000, 100_000, 1_000_000. In the +/// It does the above steps with different batches of rows. `10_000`, `100_000`, `1_000_000`. In the /// end, the table statistics are shown (eg. number of pages, table size...) pub fn hash_keys(c: &mut Criterion) { let mut group = c.benchmark_group("Hash-Keys Table Insertion"); diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index a5f7d4bbd..5fab2a700 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -13,7 +13,7 @@ use std::{path::Path, sync::Arc}; #[allow(dead_code)] const BENCH_DB_PATH: &str = "/tmp/reth-benches"; -/// Used for RandomRead and RandomWrite benchmarks. +/// Used for `RandomRead` and `RandomWrite` benchmarks. #[allow(dead_code)] const RANDOM_INDEXES: [usize; 10] = [23, 2, 42, 5, 3, 99, 54, 0, 33, 64]; diff --git a/crates/storage/db/src/abstraction/cursor.rs b/crates/storage/db/src/abstraction/cursor.rs index eb7a209da..bada51283 100644 --- a/crates/storage/db/src/abstraction/cursor.rs +++ b/crates/storage/db/src/abstraction/cursor.rs @@ -120,7 +120,7 @@ pub trait DbCursorRW { fn delete_current(&mut self) -> Result<(), DatabaseError>; } -/// Read Write Cursor over DupSorted table. +/// Read Write Cursor over `DupSorted` table. pub trait DbDupCursorRW { /// Delete all duplicate entries for current key. fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError>; @@ -209,7 +209,7 @@ where } impl<'cursor, T: Table, CURSOR: DbCursorRO> ReverseWalker<'cursor, T, CURSOR> { - /// construct ReverseWalker + /// construct `ReverseWalker` pub fn new(cursor: &'cursor mut CURSOR, start: IterPairResult) -> Self { Self { cursor, start } } @@ -300,7 +300,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> Iterator for RangeWalker<'cursor, } impl<'cursor, T: Table, CURSOR: DbCursorRO> RangeWalker<'cursor, T, CURSOR> { - /// construct RangeWalker + /// construct `RangeWalker` pub fn new( cursor: &'cursor mut CURSOR, start: IterPairResult, diff --git a/crates/storage/db/src/abstraction/database_metrics.rs b/crates/storage/db/src/abstraction/database_metrics.rs index f8b5d3058..8ca6b35bc 100644 --- a/crates/storage/db/src/abstraction/database_metrics.rs +++ b/crates/storage/db/src/abstraction/database_metrics.rs @@ -49,7 +49,7 @@ pub struct DatabaseMetadataValue { } impl DatabaseMetadataValue { - /// Creates a new [DatabaseMetadataValue] with the given freelist size. + /// Creates a new [`DatabaseMetadataValue`] with the given freelist size. pub const fn new(freelist_size: Option) -> Self { Self { freelist_size } } @@ -60,10 +60,10 @@ impl DatabaseMetadataValue { } } -/// Includes a method to return a [DatabaseMetadataValue] type, which can be used to dynamically +/// Includes a method to return a [`DatabaseMetadataValue`] type, which can be used to dynamically /// retrieve information about the database. pub trait DatabaseMetadata { - /// Returns a metadata type, [DatabaseMetadataValue] for the database. + /// Returns a metadata type, [`DatabaseMetadataValue`] for the database. fn metadata(&self) -> DatabaseMetadataValue; } diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index f62746cca..e972821d8 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -14,7 +14,7 @@ use crate::{ use core::ops::Bound; use std::{collections::BTreeMap, ops::RangeBounds}; -/// Mock database used for testing with inner BTreeMap structure +/// Mock database used for testing with inner `BTreeMap` structure // TODO #[derive(Clone, Debug, Default)] pub struct DatabaseMock { diff --git a/crates/storage/db/src/abstraction/table.rs b/crates/storage/db/src/abstraction/table.rs index 862664d70..41ee7522f 100644 --- a/crates/storage/db/src/abstraction/table.rs +++ b/crates/storage/db/src/abstraction/table.rs @@ -98,7 +98,7 @@ pub trait Table: Send + Sync + Debug + 'static { /// Tuple with `T::Key` and `T::Value`. pub type TableRow = (::Key, ::Value); -/// DupSort allows for keys to be repeated in the database. +/// `DupSort` allows for keys to be repeated in the database. /// /// Upstream docs: pub trait DupSort: Table { diff --git a/crates/storage/db/src/abstraction/transaction.rs b/crates/storage/db/src/abstraction/transaction.rs index 4a6f23d80..f39cf92fb 100644 --- a/crates/storage/db/src/abstraction/transaction.rs +++ b/crates/storage/db/src/abstraction/transaction.rs @@ -8,7 +8,7 @@ use crate::{ pub trait DbTx: Send + Sync { /// Cursor type for this read-only transaction type Cursor: DbCursorRO + Send + Sync; - /// DupCursor type for this read-only transaction + /// `DupCursor` type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; /// Get value @@ -32,7 +32,7 @@ pub trait DbTx: Send + Sync { pub trait DbTxMut: Send + Sync { /// Read-Write Cursor type type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; - /// Read-Write DupCursor type + /// Read-Write `DupCursor` type type DupCursorMut: DbDupCursorRW + DbCursorRW + DbDupCursorRO @@ -49,6 +49,6 @@ pub trait DbTxMut: Send + Sync { fn clear(&self) -> Result<(), DatabaseError>; /// Cursor mut fn cursor_write(&self) -> Result, DatabaseError>; - /// DupCursor mut. + /// `DupCursor` mut. fn cursor_dup_write(&self) -> Result, DatabaseError>; } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 947eb6b2b..98d872122 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -38,7 +38,7 @@ const TERABYTE: usize = GIGABYTE * 1024; const DEFAULT_MAX_READERS: u64 = 32_000; /// Space that a read-only transaction can occupy until the warning is emitted. -/// See [reth_libmdbx::EnvironmentBuilder::set_handle_slow_readers] for more information. +/// See [`reth_libmdbx::EnvironmentBuilder::set_handle_slow_readers`] for more information. #[cfg(not(windows))] const MAX_SAFE_READER_SPACE: usize = 10 * GIGABYTE; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index f930a122c..6009058d0 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -32,7 +32,7 @@ pub struct Tx { pub inner: Transaction, /// Handler for metrics with its own [Drop] implementation for cases when the transaction isn't - /// closed by [Tx::commit] or [Tx::abort], but we still need to report it in the metrics. + /// closed by [`Tx::commit`] or [`Tx::abort`], but we still need to report it in the metrics. /// /// If [Some], then metrics are reported. metrics_handler: Option>, @@ -186,13 +186,13 @@ struct MetricsHandler { /// Duration after which we emit the log about long-lived database transactions. long_transaction_duration: Duration, /// If `true`, the metric about transaction closing has already been recorded and we don't need - /// to do anything on [Drop::drop]. + /// to do anything on [`Drop::drop`]. close_recorded: bool, /// If `true`, the backtrace of transaction will be recorded and logged. - /// See [MetricsHandler::log_backtrace_on_long_read_transaction]. + /// See [`MetricsHandler::log_backtrace_on_long_read_transaction`]. record_backtrace: bool, /// If `true`, the backtrace of transaction has already been recorded and logged. - /// See [MetricsHandler::log_backtrace_on_long_read_transaction]. + /// See [`MetricsHandler::log_backtrace_on_long_read_transaction`]. backtrace_recorded: AtomicBool, env_metrics: Arc, _marker: PhantomData, @@ -233,11 +233,11 @@ impl MetricsHandler { } /// Logs the backtrace of current call if the duration that the read transaction has been open - /// is more than [LONG_TRANSACTION_DURATION] and `record_backtrace == true`. + /// is more than [`LONG_TRANSACTION_DURATION`] and `record_backtrace == true`. /// The backtrace is recorded and logged just once, guaranteed by `backtrace_recorded` atomic. /// - /// NOTE: Backtrace is recorded using [Backtrace::force_capture], so `RUST_BACKTRACE` env var is - /// not needed. + /// NOTE: Backtrace is recorded using [`Backtrace::force_capture`], so `RUST_BACKTRACE` env var + /// is not needed. fn log_backtrace_on_long_read_transaction(&self) { if self.record_backtrace && !self.backtrace_recorded.load(Ordering::Relaxed) && diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 15ceb671a..438f53a9c 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -172,7 +172,7 @@ pub mod test_utils { } } - /// Create static_files path for testing + /// Create `static_files` path for testing pub fn create_test_static_files_dir() -> (TempDir, PathBuf) { let temp_dir = TempDir::with_prefix("reth-test-static-").expect(ERROR_TEMPDIR); let path = temp_dir.path().to_path_buf(); diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index ebfc240e7..6b2aac3df 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -18,7 +18,7 @@ use sysinfo::System; pub struct StorageLock(Arc); impl StorageLock { - /// Tries to acquires a write lock on the target directory, returning [StorageLockError] if + /// Tries to acquire a write lock on the target directory, returning [`StorageLockError`] if /// unsuccessful. pub fn try_acquire(path: &Path) -> Result { let path = path.join("lock"); diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index 33dd4882b..328b9caab 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -48,7 +48,7 @@ pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result eyre::Result { let db = DatabaseEnv::open(path, DatabaseEnvKind::RW, args.clone()) diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index e38884623..3ae387572 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -18,12 +18,12 @@ const LARGE_VALUE_THRESHOLD_BYTES: usize = 4096; /// Otherwise, metric recording will no-op. #[derive(Debug)] pub(crate) struct DatabaseEnvMetrics { - /// Caches OperationMetrics handles for each table and operation tuple. + /// Caches `OperationMetrics` handles for each table and operation tuple. operations: FxHashMap<(Tables, Operation), OperationMetrics>, - /// Caches TransactionMetrics handles for counters grouped by only transaction mode. + /// Caches `TransactionMetrics` handles for counters grouped by only transaction mode. /// Updated both at tx open and close. transactions: FxHashMap, - /// Caches TransactionOutcomeMetrics handles for counters grouped by transaction mode and + /// Caches `TransactionOutcomeMetrics` handles for counters grouped by transaction mode and /// outcome. Can only be updated at tx close, as outcome is only known at that point. transaction_outcomes: FxHashMap<(TransactionMode, TransactionOutcome), TransactionOutcomeMetrics>, @@ -336,8 +336,8 @@ impl TransactionOutcomeMetrics { pub(crate) struct OperationMetrics { /// Total number of database operations made calls_total: Counter, - /// The time it took to execute a database operation (put/upsert/insert/append/append_dup) with - /// value larger than [LARGE_VALUE_THRESHOLD_BYTES] bytes. + /// The time it took to execute a database operation (`put/upsert/insert/append/append_dup`) + /// with value larger than [`LARGE_VALUE_THRESHOLD_BYTES`] bytes. large_value_duration_seconds: Histogram, } @@ -345,7 +345,7 @@ impl OperationMetrics { /// Record operation metric. /// /// The duration it took to execute the closure is recorded only if the provided `value_size` is - /// larger than [LARGE_VALUE_THRESHOLD_BYTES]. + /// larger than [`LARGE_VALUE_THRESHOLD_BYTES`]. pub(crate) fn record(&self, value_size: Option, f: impl FnOnce() -> R) -> R { self.calls_total.increment(1); diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index de5932ea9..700ef82e0 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -37,7 +37,7 @@ add_segments!(Header, Receipt, Transaction); pub trait ColumnSelectorOne { /// First desired column value type FIRST: Decompress; - /// Mask to obtain desired values, should correspond to the order of columns in a static_file. + /// Mask to obtain desired values, should correspond to the order of columns in a `static_file`. const MASK: usize; } @@ -47,7 +47,7 @@ pub trait ColumnSelectorTwo { type FIRST: Decompress; /// Second desired column value type SECOND: Decompress; - /// Mask to obtain desired values, should correspond to the order of columns in a static_file. + /// Mask to obtain desired values, should correspond to the order of columns in a `static_file`. const MASK: usize; } @@ -59,7 +59,7 @@ pub trait ColumnSelectorThree { type SECOND: Decompress; /// Third desired column value type THIRD: Decompress; - /// Mask to obtain desired values, should correspond to the order of columns in a static_file. + /// Mask to obtain desired values, should correspond to the order of columns in a `static_file`. const MASK: usize; } diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index 17736b565..3929b8dd8 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -25,7 +25,7 @@ mod masks; type SortedStaticFiles = HashMap)>>; -/// Given the static_files directory path, it returns a list over the existing static_files +/// Given the `static_files` directory path, it returns a list over the existing `static_files` /// organized by [`StaticFileSegment`]. Each segment has a sorted list of block ranges and /// transaction ranges as presented in the file configuration. pub fn iter_static_files(path: impl AsRef) -> Result { diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index 907fb2146..7f67076bc 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -87,7 +87,7 @@ macro_rules! impl_compression_fixed_compact { impl_compression_fixed_compact!(B256, Address); -/// Adds wrapper structs for some primitive types so they can use StructFlags from Compact, when +/// Adds wrapper structs for some primitive types so they can use `StructFlags` from Compact, when /// used as pure table values. macro_rules! add_wrapper_struct { ($(($name:tt, $wrapper:tt)),+) => { diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 0ece7b3ba..0bdeb68f5 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -21,7 +21,7 @@ impl Table for RawTable { type Value = RawValue; } -/// Raw DupSort table that can be used to access any table and its data in raw mode. +/// Raw `DupSort` table that can be used to access any table and its data in raw mode. /// This is useful for delayed decoding/encoding of data. #[derive(Default, Copy, Clone, Debug)] pub struct RawDupSort { diff --git a/crates/storage/db/src/version.rs b/crates/storage/db/src/version.rs index c7cebaed8..09c1f9467 100644 --- a/crates/storage/db/src/version.rs +++ b/crates/storage/db/src/version.rs @@ -7,11 +7,11 @@ use std::{ /// The name of the file that contains the version of the database. pub const DB_VERSION_FILE_NAME: &str = "database.version"; -/// The version of the database stored in the [DB_VERSION_FILE_NAME] file in the same directory as +/// The version of the database stored in the [`DB_VERSION_FILE_NAME`] file in the same directory as /// database. pub const DB_VERSION: u64 = 2; -/// Error when checking a database version using [check_db_version_file] +/// Error when checking a database version using [`check_db_version_file`] #[derive(thiserror::Error, Debug)] pub enum DatabaseVersionError { /// Unable to determine the version of the database; the file is missing. @@ -41,10 +41,10 @@ pub enum DatabaseVersionError { }, } -/// Checks the database version file with [DB_VERSION_FILE_NAME] name. +/// Checks the database version file with [`DB_VERSION_FILE_NAME`] name. /// -/// Returns [Ok] if file is found and has one line which equals to [DB_VERSION]. -/// Otherwise, returns different [DatabaseVersionError] error variants. +/// Returns [Ok] if file is found and has one line which equals to [`DB_VERSION`]. +/// Otherwise, returns different [`DatabaseVersionError`] error variants. pub fn check_db_version_file>(db_path: P) -> Result<(), DatabaseVersionError> { let version = get_db_version(db_path)?; if version != DB_VERSION { @@ -54,10 +54,10 @@ pub fn check_db_version_file>(db_path: P) -> Result<(), DatabaseV Ok(()) } -/// Returns the database version from file with [DB_VERSION_FILE_NAME] name. +/// Returns the database version from file with [`DB_VERSION_FILE_NAME`] name. /// /// Returns [Ok] if file is found and contains a valid version. -/// Otherwise, returns different [DatabaseVersionError] error variants. +/// Otherwise, returns different [`DatabaseVersionError`] error variants. pub fn get_db_version>(db_path: P) -> Result { let version_file_path = db_version_file_path(db_path); match fs::read_to_string(&version_file_path) { @@ -69,7 +69,8 @@ pub fn get_db_version>(db_path: P) -> Result for StorageLockError { fn from(source: FsPathError) -> Self { Self::Other(source.to_string()) diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index 15f20204e..26af09990 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -81,7 +81,7 @@ impl TableObject for ObjectLength { impl TableObject for [u8; LEN] { fn decode(data_val: &[u8]) -> Result { if data_val.len() != LEN { - return Err(Error::DecodeErrorLenDiff) + return Err(Error::DecodeErrorLenDiff); } let mut a = [0; LEN]; a[..].copy_from_slice(data_val); diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index ae642a52b..30ea03834 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -152,7 +152,7 @@ where self.get_full(None, None, MDBX_FIRST) } - /// [DatabaseFlags::DUP_SORT]-only: Position at first data item of current key. + /// [`DatabaseFlags::DUP_SORT`]-only: Position at first data item of current key. pub fn first_dup(&mut self) -> Result> where Value: TableObject, @@ -160,7 +160,7 @@ where self.get_value(None, None, MDBX_FIRST_DUP) } - /// [DatabaseFlags::DUP_SORT]-only: Position at key/data pair. + /// [`DatabaseFlags::DUP_SORT`]-only: Position at key/data pair. pub fn get_both(&mut self, k: &[u8], v: &[u8]) -> Result> where Value: TableObject, @@ -168,7 +168,7 @@ where self.get_value(Some(k), Some(v), MDBX_GET_BOTH) } - /// [DatabaseFlags::DUP_SORT]-only: Position at given key and at first data greater than or + /// [`DatabaseFlags::DUP_SORT`]-only: Position at given key and at first data greater than or /// equal to specified data. pub fn get_both_range(&mut self, k: &[u8], v: &[u8]) -> Result> where @@ -187,7 +187,7 @@ where } /// DupFixed-only: Return up to a page of duplicate data items from current cursor position. - /// Move cursor to prepare for [Self::next_multiple()]. + /// Move cursor to prepare for [`Self::next_multiple()`]. pub fn get_multiple(&mut self) -> Result> where Value: TableObject, @@ -222,7 +222,7 @@ where self.get_full(None, None, MDBX_NEXT) } - /// [DatabaseFlags::DUP_SORT]-only: Position at next data item of current key. + /// [`DatabaseFlags::DUP_SORT`]-only: Position at next data item of current key. pub fn next_dup(&mut self) -> Result> where Key: TableObject, @@ -231,8 +231,8 @@ where self.get_full(None, None, MDBX_NEXT_DUP) } - /// [DatabaseFlags::DUP_FIXED]-only: Return up to a page of duplicate data items from next - /// cursor position. Move cursor to prepare for MDBX_NEXT_MULTIPLE. + /// [`DatabaseFlags::DUP_FIXED`]-only: Return up to a page of duplicate data items from next + /// cursor position. Move cursor to prepare for `MDBX_NEXT_MULTIPLE`. pub fn next_multiple(&mut self) -> Result> where Key: TableObject, @@ -259,7 +259,7 @@ where self.get_full(None, None, MDBX_PREV) } - /// [DatabaseFlags::DUP_SORT]-only: Position at previous data item of current key. + /// [`DatabaseFlags::DUP_SORT`]-only: Position at previous data item of current key. pub fn prev_dup(&mut self) -> Result> where Key: TableObject, @@ -303,7 +303,7 @@ where self.get_full(Some(key), None, MDBX_SET_RANGE) } - /// [DatabaseFlags::DUP_FIXED]-only: Position at previous page and return up to a page of + /// [`DatabaseFlags::DUP_FIXED`]-only: Position at previous page and return up to a page of /// duplicate data items. pub fn prev_multiple(&mut self) -> Result> where @@ -316,7 +316,7 @@ where /// Position at first key-value pair greater than or equal to specified, return both key and /// data, and the return code depends on a exact match. /// - /// For non DupSort-ed collections this works the same as [Self::set_range()], but returns + /// For non DupSort-ed collections this works the same as [`Self::set_range()`], but returns /// [false] if key found exactly and [true] if greater key was found. /// /// For DupSort-ed a data value is taken into account for duplicates, i.e. for a pairs/tuples of @@ -337,7 +337,7 @@ where /// The iterator will begin with item next after the cursor, and continue until the end of the /// database. For new cursors, the iterator will begin with the first item in the database. /// - /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the + /// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the /// duplicate data items of each key will be returned before moving on to /// the next key. pub fn iter(&mut self) -> Iter<'_, K, Key, Value> @@ -350,7 +350,7 @@ where /// Iterate over database items starting from the beginning of the database. /// - /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the + /// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the /// duplicate data items of each key will be returned before moving on to /// the next key. pub fn iter_start(&mut self) -> Iter<'_, K, Key, Value> @@ -363,7 +363,7 @@ where /// Iterate over database items starting from the given key. /// - /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the + /// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the /// duplicate data items of each key will be returned before moving on to /// the next key. pub fn iter_from(&mut self, key: &[u8]) -> Iter<'_, K, Key, Value> @@ -373,7 +373,7 @@ where { let res: Result> = self.set_range(key); if let Err(error) = res { - return Iter::Err(Some(error)) + return Iter::Err(Some(error)); }; Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT) } @@ -408,7 +408,7 @@ where { let res: Result> = self.set_range(key); if let Err(error) = res { - return IterDup::Err(Some(error)) + return IterDup::Err(Some(error)); }; IterDup::new(self, ffi::MDBX_GET_CURRENT) } @@ -424,7 +424,7 @@ where Ok(Some(_)) => (), Ok(None) => { let _: Result> = self.last(); - return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT) + return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT); } Err(error) => return Iter::Err(Some(error)), }; @@ -453,8 +453,8 @@ impl Cursor { /// /// ### Flags /// - /// [WriteFlags::NO_DUP_DATA] may be used to delete all data items for the - /// current key, if the database was opened with [DatabaseFlags::DUP_SORT]. + /// [`WriteFlags::NO_DUP_DATA`] may be used to delete all data items for the + /// current key, if the database was opened with [`DatabaseFlags::DUP_SORT`]. pub fn del(&mut self, flags: WriteFlags) -> Result<()> { mdbx_result(unsafe { self.txn.txn_execute(|_| ffi::mdbx_cursor_del(self.cursor, flags.bits()))? @@ -511,14 +511,14 @@ where Key: TableObject, Value: TableObject, { - /// An iterator that returns an error on every call to [Iter::next()]. + /// An iterator that returns an error on every call to [`Iter::next()`]. /// Cursor.iter*() creates an Iter of this type when MDBX returns an error /// on retrieval of a cursor. Using this variant instead of returning - /// an error makes Cursor.iter()* methods infallible, so consumers only - /// need to check the result of Iter.next(). + /// an error makes `Cursor.iter()`* methods infallible, so consumers only + /// need to check the result of `Iter.next()`. Err(Option), - /// An iterator that returns an Item on calls to [Iter::next()]. + /// An iterator that returns an Item on calls to [`Iter::next()`]. /// The Item is a [Result], so this variant /// might still return an error, if retrieval of the key/value pair /// fails for some reason. @@ -526,7 +526,7 @@ where /// The MDBX cursor with which to iterate. cursor: Cursor, - /// The first operation to perform when the consumer calls [Iter::next()]. + /// The first operation to perform when the consumer calls [`Iter::next()`]. op: ffi::MDBX_cursor_op, /// The next and subsequent operations to perform. @@ -602,14 +602,14 @@ where Key: TableObject, Value: TableObject, { - /// An iterator that returns an error on every call to [Iter::next()]. + /// An iterator that returns an error on every call to [`Iter::next()`]. /// Cursor.iter*() creates an Iter of this type when MDBX returns an error /// on retrieval of a cursor. Using this variant instead of returning - /// an error makes Cursor.iter()* methods infallible, so consumers only - /// need to check the result of Iter.next(). + /// an error makes `Cursor.iter()`* methods infallible, so consumers only + /// need to check the result of `Iter.next()`. Err(Option), - /// An iterator that returns an Item on calls to [Iter::next()]. + /// An iterator that returns an Item on calls to [`Iter::next()`]. /// The Item is a [Result], so this variant /// might still return an error, if retrieval of the key/value pair /// fails for some reason. @@ -617,7 +617,7 @@ where /// The MDBX cursor with which to iterate. cursor: &'cur mut Cursor, - /// The first operation to perform when the consumer calls [Iter::next()]. + /// The first operation to perform when the consumer calls [`Iter::next()`]. op: ffi::MDBX_cursor_op, /// The next and subsequent operations to perform. @@ -699,14 +699,14 @@ where Key: TableObject, Value: TableObject, { - /// An iterator that returns an error on every call to Iter.next(). + /// An iterator that returns an error on every call to `Iter.next()`. /// Cursor.iter*() creates an Iter of this type when MDBX returns an error /// on retrieval of a cursor. Using this variant instead of returning - /// an error makes Cursor.iter()* methods infallible, so consumers only - /// need to check the result of Iter.next(). + /// an error makes `Cursor.iter()`* methods infallible, so consumers only + /// need to check the result of `Iter.next()`. Err(Option), - /// An iterator that returns an Item on calls to Iter.next(). + /// An iterator that returns an Item on calls to `Iter.next()`. /// The Item is a Result<(&'txn [u8], &'txn [u8])>, so this variant /// might still return an error, if retrieval of the key/value pair /// fails for some reason. @@ -714,7 +714,7 @@ where /// The MDBX cursor with which to iterate. cursor: &'cur mut Cursor, - /// The first operation to perform when the consumer calls Iter.next(). + /// The first operation to perform when the consumer calls `Iter.next()`. op: MDBX_cursor_op, _marker: PhantomData, diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 32be47949..4a8d64841 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -70,13 +70,13 @@ impl Environment { self.inner.env_kind } - /// Returns true if the environment was opened in [crate::Mode::ReadWrite] mode. + /// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode. #[inline] pub fn is_read_write(&self) -> bool { self.inner.env_kind.is_write_map() } - /// Returns true if the environment was opened in [crate::Mode::ReadOnly] mode. + /// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode. #[inline] pub fn is_read_only(&self) -> bool { !self.inner.env_kind.is_write_map() @@ -118,10 +118,10 @@ impl Environment { warn!(target: "libmdbx", "Process stalled, awaiting read-write transaction lock."); } sleep(Duration::from_millis(250)); - continue + continue; } - break res + break res; }?; Ok(Transaction::new_from_ptr(self.clone(), txn.0)) } @@ -184,7 +184,7 @@ impl Environment { /// Retrieves the total number of pages on the freelist. /// - /// Along with [Environment::info()], this can be used to calculate the exact number + /// Along with [`Environment::info()`], this can be used to calculate the exact number /// of used pages as well as free pages in this environment. /// /// ``` @@ -216,7 +216,7 @@ impl Environment { for result in cursor.iter_slices() { let (_key, value) = result?; if value.len() < size_of::() { - return Err(Error::Corrupted) + return Err(Error::Corrupted); } let s = &value[..size_of::()]; @@ -230,7 +230,7 @@ impl Environment { /// Container type for Environment internals. /// /// This holds the raw pointer to the MDBX environment and the transaction manager. -/// The env is opened via [mdbx_env_create](ffi::mdbx_env_create) and closed when this type drops. +/// The env is opened via [`mdbx_env_create`](ffi::mdbx_env_create) and closed when this type drops. struct EnvironmentInner { /// The raw pointer to the MDBX environment. /// @@ -265,10 +265,10 @@ pub enum EnvironmentKind { #[default] Default, /// Open the environment as mdbx-WRITEMAP. - /// Use a writeable memory map unless the environment is opened as MDBX_RDONLY - /// ([crate::Mode::ReadOnly]). + /// Use a writeable memory map unless the environment is opened as `MDBX_RDONLY` + /// ([`crate::Mode::ReadOnly`]). /// - /// All data will be mapped into memory in the read-write mode [crate::Mode::ReadWrite]. This + /// All data will be mapped into memory in the read-write mode [`crate::Mode::ReadWrite`]. This /// offers a significant performance benefit, since the data will be modified directly in /// mapped memory and then flushed to disk by single system call, without any memory /// management nor copying. @@ -468,7 +468,7 @@ pub struct PageOps { pub fsync: u64, /// Number of prefault write operations pub prefault: u64, - /// Number of mincore() calls + /// Number of `mincore()` calls pub mincore: u64, } @@ -589,7 +589,7 @@ pub struct EnvironmentBuilder { handle_slow_readers: Option, #[cfg(feature = "read-tx-timeouts")] /// The maximum duration of a read transaction. If [None], but the `read-tx-timeout` feature is - /// enabled, the default value of [DEFAULT_MAX_READ_TRANSACTION_DURATION] is used. + /// enabled, the default value of [`DEFAULT_MAX_READ_TRANSACTION_DURATION`] is used. max_read_transaction_duration: Option, } @@ -708,7 +708,7 @@ impl EnvironmentBuilder { })() { ffi::mdbx_env_close_ex(env, false); - return Err(e) + return Err(e); } } @@ -744,7 +744,7 @@ impl EnvironmentBuilder { /// Opens the environment with mdbx WRITEMAP /// - /// See also [EnvironmentKind] + /// See also [`EnvironmentKind`] pub fn write_map(&mut self) -> &mut Self { self.set_kind(EnvironmentKind::WriteMap) } @@ -772,7 +772,7 @@ impl EnvironmentBuilder { /// unnamed database can ignore this option. /// /// Currently a moderate number of slots are cheap but a huge number gets - /// expensive: 7-120 words per transaction, and every [Transaction::open_db()] + /// expensive: 7-120 words per transaction, and every [`Transaction::open_db()`] /// does a linear search of the opened slots. pub fn set_max_dbs(&mut self, v: usize) -> &mut Self { self.max_dbs = Some(v as u64); @@ -832,7 +832,8 @@ impl EnvironmentBuilder { self } - /// Set the Handle-Slow-Readers callback. See [HandleSlowReadersCallback] for more information. + /// Set the Handle-Slow-Readers callback. See [`HandleSlowReadersCallback`] for more + /// information. #[cfg(not(windows))] pub fn set_handle_slow_readers(&mut self, hsr: HandleSlowReadersCallback) -> &mut Self { self.handle_slow_readers = Some(hsr); diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 27d781e53..b66d94398 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -109,12 +109,12 @@ pub enum Error { #[error("invalid parameter specified or active write transaction")] DecodeErrorLenDiff, /// If the [Environment](crate::Environment) was opened with - /// [EnvironmentKind::WriteMap](crate::EnvironmentKind::WriteMap) flag, nested transactions are - /// not supported. + /// [`EnvironmentKind::WriteMap`](crate::EnvironmentKind::WriteMap) flag, nested transactions + /// are not supported. #[error("nested transactions are not supported with WriteMap")] NestedTransactionsUnsupportedWithWriteMap, /// If the [Environment](crate::Environment) was opened with in read-only mode - /// [Mode::ReadOnly](crate::flags::Mode::ReadOnly), write transactions can't be opened. + /// [`Mode::ReadOnly`](crate::flags::Mode::ReadOnly), write transactions can't be opened. #[error("write transactions are not supported in read-only mode")] WriteTransactionUnsupportedInReadOnlyMode, /// Read transaction has been timed out. diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index 883ed3faf..d733327ce 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -13,8 +13,8 @@ pub enum SyncMode { /// /// Flush system buffers to disk only once per transaction commit, omit the metadata flush. /// Defer that until the system flushes files to disk, or next non-read-only commit or - /// [Environment::sync()](crate::Environment::sync). Depending on the platform and - /// hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance. + /// [`Environment::sync()`](crate::Environment::sync). Depending on the platform and + /// hardware, with [`SyncMode::NoMetaSync`] you may get a doubling of write performance. /// /// This trade-off maintains database integrity, but a system crash may undo the last committed /// transaction. I.e. it preserves the ACPI (atomicity, consistency, isolation) but not D @@ -23,24 +23,24 @@ pub enum SyncMode { /// Don't sync anything but keep previous steady commits. /// - /// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system - /// buffers to disk when committing a transaction. But there is a huge difference in how - /// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see - /// below). + /// [`SyncMode::UtterlyNoSync`] the [`SyncMode::SafeNoSync`] flag disable similarly flush + /// system buffers to disk when committing a transaction. But there is a huge difference in + /// how are recycled the MVCC snapshots corresponding to previous "steady" transactions + /// (see below). /// - /// With [crate::EnvironmentKind::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use - /// asynchronous mmap-flushes to disk. Asynchronous mmap-flushes means that actually all - /// writes will scheduled and performed by operation system on it own manner, i.e. + /// With [`crate::EnvironmentKind::WriteMap`] the [`SyncMode::SafeNoSync`] instructs MDBX to + /// use asynchronous mmap-flushes to disk. Asynchronous mmap-flushes means that actually + /// all writes will scheduled and performed by operation system on it own manner, i.e. /// unordered. MDBX itself just notify operating system that it would be nice to write data /// to disk, but no more. /// - /// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple - /// increase of write performance, even 10 times or more. + /// Depending on the platform and hardware, with [`SyncMode::SafeNoSync`] you may get a + /// multiple increase of write performance, even 10 times or more. /// - /// In contrast to [SyncMode::UtterlyNoSync] mode, with [SyncMode::SafeNoSync] flag MDBX will - /// keeps untouched pages within B-tree of the last transaction "steady" which was synced to - /// disk completely. This has big implications for both data durability and (unfortunately) - /// performance: + /// In contrast to [`SyncMode::UtterlyNoSync`] mode, with [`SyncMode::SafeNoSync`] flag MDBX + /// will keeps untouched pages within B-tree of the last transaction "steady" which was + /// synced to disk completely. This has big implications for both data durability and + /// (unfortunately) performance: /// - A system crash can't corrupt the database, but you will lose the last transactions; /// because MDBX will rollback to last steady commit since it kept explicitly. /// - The last steady transaction makes an effect similar to "long-lived" read transaction @@ -51,49 +51,49 @@ pub enum SyncMode { /// size of the file on disk. /// /// In other words, with - /// [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the - /// cost increasing database size and/or number of disk IOPs. So, [SyncMode::SafeNoSync] - /// flag could be used with [Environment::sync()](crate::Environment::sync) as alternatively + /// [`SyncMode::SafeNoSync`] flag MDBX protects you from the whole database corruption, at the + /// cost increasing database size and/or number of disk IOPs. So, [`SyncMode::SafeNoSync`] + /// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively /// for batch committing or nested transaction (in some cases). /// - /// The number and volume of of disk IOPs with [SyncMode::SafeNoSync] flag will exactly the as - /// without any no-sync flags. However, you should expect a larger process's work set and - /// significantly worse a locality of reference, due to the more intensive allocation of - /// previously unused pages and increase the size of the database. + /// The number and volume of of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the + /// as without any no-sync flags. However, you should expect a larger process's work set + /// and significantly worse a locality of reference, due to the more intensive allocation + /// of previously unused pages and increase the size of the database. SafeNoSync, /// Don't sync anything and wipe previous steady commits. /// /// Don't flush system buffers to disk when committing a transaction. /// This optimization means a system crash can corrupt the database, if buffers are not yet - /// flushed to disk. Depending on the platform and hardware, with [SyncMode::UtterlyNoSync] + /// flushed to disk. Depending on the platform and hardware, with [`SyncMode::UtterlyNoSync`] /// you may get a multiple increase of write performance, even 100 times or more. /// /// If the filesystem preserves write order (which is rare and never provided unless explicitly - /// noted) and the [WriteMap](crate::EnvironmentKind::WriteMap) and - /// [EnvironmentFlags::liforeclaim] flags are not used, then a system crash can't corrupt + /// noted) and the [`WriteMap`](crate::EnvironmentKind::WriteMap) and + /// [`EnvironmentFlags::liforeclaim`] flags are not used, then a system crash can't corrupt /// the database, but you can lose the last transactions, if at least one buffer is not yet /// flushed to disk. The risk is governed by how often the system flushes dirty buffers to - /// disk and how often [Environment::sync()](crate::Environment::sync) is called. So, + /// disk and how often [`Environment::sync()`](crate::Environment::sync) is called. So, /// transactions exhibit ACPI (atomicity, consistency, isolation) properties and only lose D /// (durability). I.e. database integrity is maintained, but a system crash may undo the /// final transactions. /// /// Otherwise, if the filesystem not preserves write order (which is typically) or - /// [WriteMap](crate::EnvironmentKind::WriteMap) or [EnvironmentFlags::liforeclaim] flags are - /// used, you should expect the corrupted database after a system crash. + /// [`WriteMap`](crate::EnvironmentKind::WriteMap) or [`EnvironmentFlags::liforeclaim`] flags + /// are used, you should expect the corrupted database after a system crash. /// - /// So, most important thing about [SyncMode::UtterlyNoSync]: + /// So, most important thing about [`SyncMode::UtterlyNoSync`]: /// - A system crash immediately after commit the write transaction high likely lead to /// database corruption. - /// - Successful completion of [Environment::sync(force=true)](crate::Environment::sync) after - /// one or more committed transactions guarantees consistency and durability. + /// - Successful completion of [`Environment::sync(force=true`)](crate::Environment::sync) + /// after one or more committed transactions guarantees consistency and durability. /// - BUT by committing two or more transactions you back database into a weak state, in which /// a system crash may lead to database corruption! In case single transaction after - /// [Environment::sync()](crate::Environment::sync), you may lose transaction itself, but not - /// a whole database. + /// [`Environment::sync()`](crate::Environment::sync), you may lose transaction itself, but + /// not a whole database. /// - /// Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in + /// Nevertheless, [`SyncMode::UtterlyNoSync`] provides "weak" durability in /// case of an application crash (but no durability on system failure), and therefore may /// be very useful in scenarios where data durability is not required over a system failure /// (e.g for short-lived data), or if you can take such risk. diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index ba8c6b062..6ffaf8cb0 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -42,7 +42,7 @@ mod test_utils { use byteorder::{ByteOrder, LittleEndian}; use tempfile::tempdir; - /// Regression test for https://github.com/danburkert/lmdb-rs/issues/21. + /// Regression test for . /// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer /// GCC compilers. #[test] diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 1bffc9f47..d7cdab277 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -133,7 +133,7 @@ where /// /// This function retrieves the data associated with the given key in the /// database. If the database supports duplicate keys - /// ([DatabaseFlags::DUP_SORT]) then the first data item for the key will be + /// ([`DatabaseFlags::DUP_SORT`]) then the first data item for the key will be /// returned. Retrieval of other items requires the use of /// [Cursor]. If the item is not in the database, then /// [None] will be returned. @@ -210,7 +210,7 @@ where /// /// If `name` is not [None], then the returned handle will be for a named database. In this /// case the environment must be configured to allow named databases through - /// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs). + /// [`EnvironmentBuilder::set_max_dbs()`](crate::EnvironmentBuilder::set_max_dbs). /// /// The returned database handle may be shared among any transaction in the environment. /// @@ -366,9 +366,9 @@ impl Transaction { /// /// If `name` is not [None], then the returned handle will be for a named database. In this /// case the environment must be configured to allow named databases through - /// [EnvironmentBuilder::set_max_dbs()](crate::EnvironmentBuilder::set_max_dbs). + /// [`EnvironmentBuilder::set_max_dbs()`](crate::EnvironmentBuilder::set_max_dbs). /// - /// This function will fail with [Error::BadRslot] if called by a thread with an open + /// This function will fail with [`Error::BadRslot`] if called by a thread with an open /// transaction. pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result { self.open_db_with_flags(name, flags | DatabaseFlags::CREATE) @@ -379,7 +379,7 @@ impl Transaction { /// This function stores key/data pairs in the database. The default /// behavior is to enter the new key/data pair, replacing any previously /// existing key if duplicates are disallowed, or adding a duplicate data - /// item if duplicates are allowed ([DatabaseFlags::DUP_SORT]). + /// item if duplicates are allowed ([`DatabaseFlags::DUP_SORT`]). pub fn put( &self, dbi: ffi::MDBX_dbi, @@ -504,7 +504,7 @@ impl Transaction { /// Begins a new nested transaction inside of this transaction. pub fn begin_nested_txn(&mut self) -> Result { if self.inner.env.is_write_map() { - return Err(Error::NestedTransactionsUnsupportedWithWriteMap) + return Err(Error::NestedTransactionsUnsupportedWithWriteMap); } self.txn_execute(|txn| { let (tx, rx) = sync_channel(0); @@ -576,7 +576,7 @@ impl TransactionPtr { // because we're taking a lock for any actions on the transaction pointer, including a call // to the `mdbx_txn_reset`. if self.is_timed_out() { - return Err(Error::ReadTransactionTimeout) + return Err(Error::ReadTransactionTimeout); } Ok((f)(self.txn)) @@ -611,7 +611,7 @@ impl TransactionPtr { pub struct CommitLatency(ffi::MDBX_commit_latency); impl CommitLatency { - /// Create a new CommitLatency with zero'd inner struct `ffi::MDBX_commit_latency`. + /// Create a new `CommitLatency` with zero'd inner struct `ffi::MDBX_commit_latency`. pub(crate) const fn new() -> Self { unsafe { Self(std::mem::zeroed()) } } diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 38952c2f1..1c9c2ca89 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -20,8 +20,8 @@ pub(crate) enum TxnManagerMessage { } /// Manages transactions by doing two things: -/// - Opening, aborting, and committing transactions using [TxnManager::send_message] with the -/// corresponding [TxnManagerMessage] +/// - Opening, aborting, and committing transactions using [`TxnManager::send_message`] with the +/// corresponding [`TxnManagerMessage`] /// - Aborting long-lived read transactions (if the `read-tx-timeouts` feature is enabled and /// `TxnManager::with_max_read_transaction_duration` is called) #[derive(Debug)] @@ -45,12 +45,12 @@ impl TxnManager { txn_manager } - /// Spawns a new [std::thread] that listens to incoming [TxnManagerMessage] messages, executes - /// an FFI function, and returns the result on the provided channel. + /// Spawns a new [`std::thread`] that listens to incoming [`TxnManagerMessage`] messages, + /// executes an FFI function, and returns the result on the provided channel. /// - /// - [TxnManagerMessage::Begin] opens a new transaction with [ffi::mdbx_txn_begin_ex] - /// - [TxnManagerMessage::Abort] aborts a transaction with [ffi::mdbx_txn_abort] - /// - [TxnManagerMessage::Commit] commits a transaction with [ffi::mdbx_txn_commit_ex] + /// - [`TxnManagerMessage::Begin`] opens a new transaction with [`ffi::mdbx_txn_begin_ex`] + /// - [`TxnManagerMessage::Abort`] aborts a transaction with [`ffi::mdbx_txn_abort`] + /// - [`TxnManagerMessage::Commit`] commits a transaction with [`ffi::mdbx_txn_commit_ex`] fn start_message_listener(&self, env: EnvPtr, rx: Receiver) { let task = move || { #[allow(clippy::redundant_locals)] @@ -163,7 +163,7 @@ mod read_transactions { #[derive(Debug, Default)] pub(super) struct ReadTransactions { /// Maximum duration that a read transaction can be open until the - /// [ReadTransactions::start_monitor] aborts it. + /// [`ReadTransactions::start_monitor`] aborts it. max_duration: Duration, /// List of currently active read transactions. /// @@ -199,7 +199,7 @@ mod read_transactions { self.timed_out_not_aborted.len() } - /// Spawns a new [std::thread] that monitors the list of active read transactions and + /// Spawns a new [`std::thread`] that monitors the list of active read transactions and /// timeouts those that are open for longer than `ReadTransactions.max_duration`. pub(super) fn start_monitor(self: Arc) { let task = move || { diff --git a/crates/storage/nippy-jar/src/compression/lz4.rs b/crates/storage/nippy-jar/src/compression/lz4.rs index 670bed82b..d5169ea54 100644 --- a/crates/storage/nippy-jar/src/compression/lz4.rs +++ b/crates/storage/nippy-jar/src/compression/lz4.rs @@ -43,7 +43,7 @@ impl Compression for Lz4 { Err(err) => { multiplier *= 2; if multiplier == 16 { - return Err(NippyJarError::Custom(err.to_string())) + return Err(NippyJarError::Custom(err.to_string())); } } } diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index c55ca103a..8194538e4 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -60,7 +60,7 @@ impl Zstd { pub fn decompressors(&self) -> Result>, NippyJarError> { if let Some(dictionaries) = &self.dictionaries { debug_assert!(dictionaries.len() == self.columns); - return dictionaries.decompressors() + return dictionaries.decompressors(); } Ok(vec![]) @@ -72,12 +72,12 @@ impl Zstd { ZstdState::PendingDictionary => Err(NippyJarError::CompressorNotReady), ZstdState::Ready => { if !self.use_dict { - return Ok(None) + return Ok(None); } if let Some(dictionaries) = &self.dictionaries { debug!(target: "nippy-jar", count=?dictionaries.len(), "Generating ZSTD compressor dictionaries."); - return Ok(Some(dictionaries.compressors()?)) + return Ok(Some(dictionaries.compressors()?)); } Ok(None) } @@ -102,7 +102,7 @@ impl Zstd { buffer.reserve(column_value.len() * multiplier); multiplier += 1; if multiplier == 5 { - return Err(NippyJarError::Disconnect(err)) + return Err(NippyJarError::Disconnect(err)); } } @@ -191,7 +191,7 @@ impl Compression for Zstd { columns: Vec>>, ) -> Result<(), NippyJarError> { if !self.use_dict { - return Ok(()) + return Ok(()); } // There's a per 2GB hard limit on each column data set for training @@ -205,7 +205,7 @@ impl Compression for Zstd { // ``` if columns.len() != self.columns { - return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) + return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())); } // TODO: parallel calculation @@ -363,7 +363,7 @@ impl<'a> Serialize for ZstdDictionary<'a> { impl<'a> PartialEq for ZstdDictionary<'a> { fn eq(&self, other: &Self) -> bool { if let (Self::Raw(a), Self::Raw(b)) = (self, &other) { - return a == b + return a == b; } unimplemented!("`DecoderDictionary` can't be compared. So comparison should be done after decompressing a value."); } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 434c40a9a..be7fc829e 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -86,11 +86,11 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { .offsets_index .access(row_index as usize) .expect("built from same set") as u64; - return self.next_row() + return self.next_row(); } } } else { - return Err(NippyJarError::UnsupportedFilterQuery) + return Err(NippyJarError::UnsupportedFilterQuery); } Ok(None) @@ -108,7 +108,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { if self.row as usize >= self.jar.rows { // Has reached the end - return Ok(None) + return Ok(None); } let mut row = Vec::with_capacity(self.jar.columns); @@ -154,11 +154,11 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { .offsets_index .access(row_index as usize) .expect("built from same set") as u64; - return self.next_row_with_cols(mask) + return self.next_row_with_cols(mask); } } } else { - return Err(NippyJarError::UnsupportedFilterQuery) + return Err(NippyJarError::UnsupportedFilterQuery); } Ok(None) @@ -182,7 +182,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { if self.row as usize >= self.jar.rows { // Has reached the end - return Ok(None) + return Ok(None); } let columns = self.jar.columns; diff --git a/crates/storage/nippy-jar/src/filter/cuckoo.rs b/crates/storage/nippy-jar/src/filter/cuckoo.rs index 0e338a032..f8e5d5d26 100644 --- a/crates/storage/nippy-jar/src/filter/cuckoo.rs +++ b/crates/storage/nippy-jar/src/filter/cuckoo.rs @@ -11,7 +11,7 @@ pub struct Cuckoo { /// This is necessary because the inner implementation will fail on adding an element past capacity, **but it will still add it and remove other**: [source](https://github.com/axiomhq/rust-cuckoofilter/tree/624da891bed1dd5d002c8fa92ce0dcd301975561#notes--todos) remaining: usize, - /// CuckooFilter. + /// `CuckooFilter`. filter: CuckooFilter, // TODO does it need an actual hasher? } @@ -28,7 +28,7 @@ impl Cuckoo { impl InclusionFilter for Cuckoo { fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { if self.remaining == 0 { - return Err(NippyJarError::FilterMaxCapacity) + return Err(NippyJarError::FilterMaxCapacity); } self.remaining -= 1; diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index b1da44293..c64b57e7d 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -56,7 +56,7 @@ type RefRow<'a> = Vec<&'a [u8]>; /// Alias type for a column value wrapped in `Result`. pub type ColumnResult = Result>; -/// A trait for the user-defined header of [NippyJar]. +/// A trait for the user-defined header of [`NippyJar`]. pub trait NippyJarHeader: Send + Sync + Serialize + for<'b> Deserialize<'b> + std::fmt::Debug + 'static { @@ -87,15 +87,15 @@ impl NippyJarHeader for T where /// may also produce false positives but not false negatives, necessitating subsequent data /// verification. /// -/// Note: that the key (eg. BlockHash) passed to a filter and phf does not need to actually be +/// Note: that the key (eg. `BlockHash`) passed to a filter and phf does not need to actually be /// stored. /// /// Ultimately, the `freeze` function yields two files: a data file containing both the data and its -/// configuration, and an index file that houses the offsets and offsets_index. +/// configuration, and an index file that houses the offsets and `offsets_index`. #[derive(Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub struct NippyJar { - /// The version of the NippyJar format. + /// The version of the `NippyJar` format. version: usize, /// User-defined header data. /// Default: zero-sized unit type: no header data @@ -462,7 +462,7 @@ impl PerfectHashingFunction for NippyJar { /// Manages the reading of static file data using memory-mapped files. /// -/// Holds file and mmap descriptors of the data and offsets files of a static_file. +/// Holds file and mmap descriptors of the data and offsets files of a `static_file`. #[derive(Debug)] pub struct DataReader { /// Data file descriptor. Needs to be kept alive as long as `data_mmap` handle. @@ -844,7 +844,7 @@ mod tests { } } - /// Tests NippyJar with everything enabled: compression, filter, offset list and offset index. + /// Tests `NippyJar` with everything enabled: compression, filter, offset list and offset index. #[test] fn test_full_nippy_jar() { let (col1, col2) = test_data(None); diff --git a/crates/storage/nippy-jar/src/phf/fmph.rs b/crates/storage/nippy-jar/src/phf/fmph.rs index a332c40cf..20e1d9ac0 100644 --- a/crates/storage/nippy-jar/src/phf/fmph.rs +++ b/crates/storage/nippy-jar/src/phf/fmph.rs @@ -28,7 +28,7 @@ impl PerfectHashingFunction for Fmph { fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { if let Some(f) = &self.function { - return Ok(f.get(key)) + return Ok(f.get(key)); } Err(NippyJarError::PHFMissingKeys) } @@ -92,7 +92,7 @@ impl<'de> Deserialize<'de> for Fmph { function: Some( Function::read(&mut std::io::Cursor::new(buffer)).map_err(D::Error::custom)?, ), - }) + }); } Ok(Self { function: None }) } diff --git a/crates/storage/nippy-jar/src/phf/go_fmph.rs b/crates/storage/nippy-jar/src/phf/go_fmph.rs index 328ddcb4d..8898c8be1 100644 --- a/crates/storage/nippy-jar/src/phf/go_fmph.rs +++ b/crates/storage/nippy-jar/src/phf/go_fmph.rs @@ -28,7 +28,7 @@ impl PerfectHashingFunction for GoFmph { fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { if let Some(f) = &self.function { - return Ok(f.get(key)) + return Ok(f.get(key)); } Err(NippyJarError::PHFMissingKeys) } @@ -93,7 +93,7 @@ impl<'de> Deserialize<'de> for GoFmph { GOFunction::read(&mut std::io::Cursor::new(buffer)) .map_err(D::Error::custom)?, ), - }) + }); } Ok(Self { function: None }) } diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 5c405856d..ee9b0cef0 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -45,7 +45,7 @@ impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. /// /// If `read_only` is set to `true`, any inconsistency issue won't be healed, and will return - /// [NippyJarError::InconsistentState] instead. + /// [`NippyJarError::InconsistentState`] instead. pub fn new( jar: NippyJar, check_mode: ConsistencyFailStrategy, @@ -127,7 +127,7 @@ impl NippyJarWriter { } /// Performs consistency checks on the [`NippyJar`] file and might self-heal or throw an error - /// according to [ConsistencyFailStrategy]. + /// according to [`ConsistencyFailStrategy`]. /// * Is the offsets file size expected? /// * Is the data file size expected? /// @@ -142,7 +142,7 @@ impl NippyJarWriter { // When an offset size is smaller than the initial (8), we are dealing with immutable // data. if reader.offset_size() != OFFSET_SIZE_BYTES { - return Err(NippyJarError::FrozenJar) + return Err(NippyJarError::FrozenJar); } let expected_offsets_file_size: u64 = (1 + // first byte is the size of one offset @@ -153,7 +153,7 @@ impl NippyJarWriter { if check_mode.should_err() && expected_offsets_file_size.cmp(&actual_offsets_file_size) != Ordering::Equal { - return Err(NippyJarError::InconsistentState) + return Err(NippyJarError::InconsistentState); } // Offsets configuration wasn't properly committed @@ -184,7 +184,7 @@ impl NippyJarWriter { let data_file_len = self.data_file.get_ref().metadata()?.len(); if check_mode.should_err() && last_offset.cmp(&data_file_len) != Ordering::Equal { - return Err(NippyJarError::InconsistentState) + return Err(NippyJarError::InconsistentState); } // Offset list wasn't properly committed @@ -214,7 +214,7 @@ impl NippyJarWriter { // Since we decrease the offset list, we need to check the consistency of // `self.jar.rows` again self.ensure_file_consistency(ConsistencyFailStrategy::Heal)?; - break + break; } } } @@ -347,7 +347,7 @@ impl NippyJarWriter { return Err(NippyJarError::InvalidPruning( num_offsets, remaining_to_prune as u64, - )) + )); } let new_num_offsets = num_offsets.saturating_sub(remaining_to_prune as u64); @@ -373,7 +373,7 @@ impl NippyJarWriter { self.data_file.get_mut().set_len(last_offset)?; } } else { - return Err(NippyJarError::InvalidPruning(0, remaining_to_prune as u64)) + return Err(NippyJarError::InvalidPruning(0, remaining_to_prune as u64)); } } @@ -463,7 +463,7 @@ impl NippyJarWriter { for offset in self.offsets.drain(..) { if let Some(last_offset_ondisk) = last_offset_ondisk.take() { if last_offset_ondisk == offset { - continue + continue; } } self.offsets_file.write_all(&offset.to_le_bytes())?; @@ -514,7 +514,7 @@ impl NippyJarWriter { } } -/// Strategy on encountering an inconsistent state when creating a [NippyJarWriter]. +/// Strategy on encountering an inconsistent state when creating a [`NippyJarWriter`]. #[derive(Debug, Copy, Clone)] pub enum ConsistencyFailStrategy { /// Writer should heal. diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 1fe7a3481..e069978da 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -23,7 +23,7 @@ impl From for StateReverts { impl StateReverts { /// Write reverts to database. /// - /// Note:: Reverts will delete all wiped storage from plain state. + /// `Note::` Reverts will delete all wiped storage from plain state. pub fn write_to_db( self, tx: &TX, @@ -93,7 +93,7 @@ impl StateReverts { } /// Iterator over storage reverts. -/// See [StorageRevertsIter::next] for more details. +/// See [`StorageRevertsIter::next`] for more details. #[allow(missing_debug_implementations)] pub struct StorageRevertsIter { reverts: Peekable, diff --git a/crates/storage/provider/src/providers/chain_info.rs b/crates/storage/provider/src/providers/chain_info.rs index 7bde9ac24..905be1287 100644 --- a/crates/storage/provider/src/providers/chain_info.rs +++ b/crates/storage/provider/src/providers/chain_info.rs @@ -29,7 +29,7 @@ impl ChainInfoTracker { } } - /// Returns the [ChainInfo] for the canonical head. + /// Returns the [`ChainInfo`] for the canonical head. pub(crate) fn chain_info(&self) -> ChainInfo { let inner = self.inner.canonical_head.read(); ChainInfo { best_hash: inner.hash(), best_number: inner.number } diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 18035f889..82be1e1e8 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -15,10 +15,10 @@ pub use reth_storage_errors::provider::ConsistentViewError; /// ## Usage /// /// The view should only be used outside of staged-sync. -/// Otherwise, any attempt to create a provider will result in [ConsistentViewError::Syncing]. +/// Otherwise, any attempt to create a provider will result in [`ConsistentViewError::Syncing`]. /// /// When using the view, the consumer should either -/// 1) have a failover for when the state changes and handle [ConsistentViewError::Inconsistent] +/// 1) have a failover for when the state changes and handle [`ConsistentViewError::Inconsistent`] /// appropriately. /// 2) be sure that the state does not change. #[derive(Clone, Debug)] diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8cde416d7..d6506742c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -374,10 +374,10 @@ impl DatabaseProvider { /// /// If UNWIND is false we will just read the state/blocks and return them. /// - /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all the + /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the /// transaction ids. - /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table and the - /// [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to reconstruct + /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the + /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct /// the changesets. /// - In order to have both the old and new values in the changesets, we also access the /// plain state tables. diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 9b72b3bd9..8233e1cb4 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -129,12 +129,12 @@ where { /// Ensures that the given block number is canonical (synced) /// - /// This is a helper for guarding the [HistoricalStateProvider] against block numbers that are + /// This is a helper for guarding the [`HistoricalStateProvider`] against block numbers that are /// out of range and would lead to invalid results, mainly during initial sync. /// - /// Verifying the block_number would be expensive since we need to lookup sync table + /// Verifying the `block_number` would be expensive since we need to lookup sync table /// Instead, we ensure that the `block_number` is within the range of the - /// [Self::best_block_number] which is updated when a block is synced. + /// [`Self::best_block_number`] which is updated when a block is synced. #[inline] fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { let latest = self.best_block_number()?; @@ -315,7 +315,7 @@ where /// Returns the block with senders with matching number or hash from database. /// - /// **NOTE: If [TransactionVariant::NoHash] is provided then the transactions have invalid + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid /// hashes, since they would need to be calculated on the spot, and we want fast querying.** /// /// Returns `None` if block is not found. @@ -634,7 +634,7 @@ where /// Returns the state provider for pending state. /// /// If there's no pending block available then the latest state provider is returned: - /// [Self::latest] + /// [`Self::latest`] fn pending(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting provider for pending state"); diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index e3a72077f..5b9da66ff 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -25,11 +25,11 @@ use std::fmt::Debug; /// It means that all changes made in the provided block number are not included. /// /// Historical state provider reads the following tables: -/// - [tables::AccountsHistory] -/// - [tables::Bytecodes] -/// - [tables::StoragesHistory] -/// - [tables::AccountChangeSets] -/// - [tables::StorageChangeSets] +/// - [`tables::AccountsHistory`] +/// - [`tables::Bytecodes`] +/// - [`tables::StoragesHistory`] +/// - [`tables::AccountChangeSets`] +/// - [`tables::StorageChangeSets`] #[derive(Debug)] pub struct HistoricalStateProviderRef<'b, TX: DbTx> { /// Transaction @@ -51,7 +51,7 @@ pub enum HistoryInfo { } impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { - /// Create new StateProvider for historical block number + /// Create new `StateProvider` for historical block number pub fn new( tx: &'b TX, block_number: BlockNumber, @@ -60,7 +60,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } } - /// Create new StateProvider for historical block number and lowest block numbers at which + /// Create new `StateProvider` for historical block number and lowest block numbers at which /// account & storage histories are available. pub const fn new_with_lowest_available_blocks( tx: &'b TX, @@ -71,7 +71,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { Self { tx, block_number, lowest_available_blocks, static_file_provider } } - /// Lookup an account in the AccountsHistory table + /// Lookup an account in the `AccountsHistory` table pub fn account_history_lookup(&self, address: Address) -> ProviderResult { if !self.lowest_available_blocks.is_account_history_available(self.block_number) { return Err(ProviderError::StateAtBlockPruned(self.block_number)) @@ -86,7 +86,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ) } - /// Lookup a storage key in the StoragesHistory table + /// Lookup a storage key in the `StoragesHistory` table pub fn storage_history_lookup( &self, address: Address, @@ -315,7 +315,7 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { } /// State provider for a given block number. -/// For more detailed description, see [HistoricalStateProviderRef]. +/// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] pub struct HistoricalStateProvider { /// Database transaction @@ -329,7 +329,7 @@ pub struct HistoricalStateProvider { } impl HistoricalStateProvider { - /// Create new StateProvider for historical block number + /// Create new `StateProvider` for historical block number pub fn new( tx: TX, block_number: BlockNumber, @@ -376,12 +376,12 @@ delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx]); #[derive(Clone, Copy, Debug, Default)] pub struct LowestAvailableBlocks { /// Lowest block number at which the account history is available. It may not be available if - /// [reth_primitives::PruneSegment::AccountHistory] was pruned. - /// [Option::None] means all history is available. + /// [`reth_primitives::PruneSegment::AccountHistory`] was pruned. + /// [`Option::None`] means all history is available. pub account_history_block_number: Option, /// Lowest block number at which the storage history is available. It may not be available if - /// [reth_primitives::PruneSegment::StorageHistory] was pruned. - /// [Option::None] means all history is available. + /// [`reth_primitives::PruneSegment::StorageHistory`] was pruned. + /// [`Option::None`] means all history is available. pub storage_history_block_number: Option, } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 0efd8d9c7..0bbd0f4dd 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -1,4 +1,4 @@ -//! Helper macros for implementing traits for various [StateProvider](crate::StateProvider) +//! Helper macros for implementing traits for various [`StateProvider`](crate::StateProvider) //! implementations /// A macro that delegates trait implementations to the `as_ref` function of the type. @@ -23,9 +23,9 @@ pub(crate) use delegate_impls_to_as_ref; /// Delegates the provider trait implementations to the `as_ref` function of the type: /// -/// [AccountReader](crate::AccountReader) -/// [BlockHashReader](crate::BlockHashReader) -/// [StateProvider](crate::StateProvider) +/// [`AccountReader`](crate::AccountReader) +/// [`BlockHashReader`](crate::BlockHashReader) +/// [`StateProvider`](crate::StateProvider) macro_rules! delegate_provider_impls { ($target:ty $(where [$($generics:tt)*])?) => { $crate::providers::state::macros::delegate_impls_to_as_ref!( diff --git a/crates/storage/provider/src/providers/state/mod.rs b/crates/storage/provider/src/providers/state/mod.rs index f04523647..06a5fefb4 100644 --- a/crates/storage/provider/src/providers/state/mod.rs +++ b/crates/storage/provider/src/providers/state/mod.rs @@ -1,4 +1,4 @@ -//! [StateProvider](crate::StateProvider) implementations +//! [`StateProvider`](crate::StateProvider) implementations pub(crate) mod historical; pub(crate) mod latest; pub(crate) mod macros; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 0a29436cb..c6287dd8b 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -107,17 +107,17 @@ pub struct StaticFileProviderInner { static_files_max_block: RwLock>, /// Available static file block ranges on disk indexed by max transactions. static_files_tx_index: RwLock, - /// Directory where static_files are located + /// Directory where `static_files` are located path: PathBuf, /// Whether [`StaticFileJarProvider`] loads filters into memory. If not, `by_hash` queries /// won't be able to be queried directly. load_filters: bool, - /// Maintains a map of StaticFile writers for each [`StaticFileSegment`] + /// Maintains a map of `StaticFile` writers for each [`StaticFileSegment`] writers: DashMap, metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, - /// Write lock for when access is [StaticFileAccess::RW]. + /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, } @@ -516,7 +516,7 @@ impl StaticFileProvider { /// /// For each static file segment: /// * the corresponding database table should overlap or have continuity in their keys - /// ([TxNumber] or [BlockNumber]). + /// ([`TxNumber`] or [`BlockNumber`]). /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// @@ -626,7 +626,7 @@ impl StaticFileProvider { /// Check invariants for each corresponding table and static file segment: /// /// * the corresponding database table should overlap or have continuity in their keys - /// ([TxNumber] or [BlockNumber]). + /// ([`TxNumber`] or [`BlockNumber`]). /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// * If the checkpoint block is higher, then request a pipeline unwind to the static file @@ -726,7 +726,7 @@ impl StaticFileProvider { } } - /// Iterates through segment static_files in reverse order, executing a function until it + /// Iterates through segment `static_files` in reverse order, executing a function until it /// returns some object. Useful for finding objects by [`TxHash`] or [`BlockHash`]. pub fn find_static_file( &self, @@ -856,7 +856,7 @@ impl StaticFileProvider { })) } - /// Returns directory where static_files are located. + /// Returns directory where `static_files` are located. pub fn directory(&self) -> &Path { &self.path } @@ -897,13 +897,13 @@ impl StaticFileProvider { fetch_from_database() } - /// Gets data within a specified range, potentially spanning different static_files and + /// Gets data within a specified range, potentially spanning different `static_files` and /// database. /// /// # Arguments /// * `segment` - The segment of the static file to query. /// * `block_range` - The range of data to fetch. - /// * `fetch_from_static_file` - A function to fetch data from the static_file. + /// * `fetch_from_static_file` - A function to fetch data from the `static_file`. /// * `fetch_from_database` - A function to fetch data from the database. /// * `predicate` - A function used to evaluate each item in the fetched data. Fetching is /// terminated when this function returns false, thereby filtering the data based on the @@ -949,7 +949,7 @@ impl StaticFileProvider { } #[cfg(any(test, feature = "test-utils"))] - /// Returns static_files directory + /// Returns `static_files` directory pub fn path(&self) -> &Path { &self.path } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3b88812da..7ca6f62f9 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -26,9 +26,9 @@ pub type StaticFileProviderRWRefMut<'a> = RefMut<'a, StaticFileSegment, StaticFi #[derive(Debug)] /// Extends `StaticFileProvider` with writing capabilities pub struct StaticFileProviderRW { - /// Reference back to the provider. We need [Weak] here because [StaticFileProviderRW] is - /// stored in a [dashmap::DashMap] inside the parent [StaticFileProvider].which is an [Arc]. - /// If we were to use an [Arc] here, we would create a reference cycle. + /// Reference back to the provider. We need [Weak] here because [`StaticFileProviderRW`] is + /// stored in a [`dashmap::DashMap`] inside the parent [`StaticFileProvider`].which is an + /// [Arc]. If we were to use an [Arc] here, we would create a reference cycle. reader: Weak, /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, @@ -120,10 +120,10 @@ impl StaticFileProviderRW { /// Checks the consistency of the file and heals it if necessary and `read_only` is set to /// false. If the check fails, it will return an error. /// - /// If healing does happen, it will update the end range on the [SegmentHeader]. However, for + /// If healing does happen, it will update the end range on the [`SegmentHeader`]. However, for /// transaction based segments, the block end range has to be found and healed externally. /// - /// Check [NippyJarWriter::ensure_file_consistency] for more on healing. + /// Check [`NippyJarWriter::ensure_file_consistency`] for more on healing. pub fn ensure_file_consistency(&mut self, read_only: bool) -> ProviderResult<()> { let inconsistent_error = || { ProviderError::NippyJar( @@ -333,7 +333,7 @@ impl StaticFileProviderRW { /// Truncates a number of rows from disk. It deletes and loads an older static file if block /// goes beyond the start of the current block range. /// - /// **last_block** should be passed only with transaction based segments. + /// **`last_block`** should be passed only with transaction based segments. /// /// # Note /// Commits to the configuration file at the end. diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs index baa6bc470..39e53772c 100644 --- a/crates/storage/provider/src/test_utils/events.rs +++ b/crates/storage/provider/src/test_utils/events.rs @@ -3,7 +3,7 @@ use tokio::sync::broadcast::{self, Sender}; use crate::{CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, Chain}; -/// A test ChainEventSubscriptions +/// A test `ChainEventSubscriptions` #[derive(Clone, Debug, Default)] pub struct TestCanonStateSubscriptions { canon_notif_tx: Arc>>>, diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/storage/provider/src/traits/chain.rs index 6ed498a38..00a36e820 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -12,10 +12,10 @@ use tokio::sync::broadcast; use tokio_stream::{wrappers::BroadcastStream, Stream}; use tracing::debug; -/// Type alias for a receiver that receives [CanonStateNotification] +/// Type alias for a receiver that receives [`CanonStateNotification`] pub type CanonStateNotifications = broadcast::Receiver; -/// Type alias for a sender that sends [CanonStateNotification] +/// Type alias for a sender that sends [`CanonStateNotification`] pub type CanonStateNotificationSender = broadcast::Sender; /// A type that allows to register chain related event subscriptions. @@ -104,7 +104,7 @@ impl CanonStateNotification { /// Get the new chain if any. /// - /// Returns the new committed [Chain] for [Self::Reorg] and [Self::Commit] variants. + /// Returns the new committed [Chain] for [`Self::Reorg`] and [`Self::Commit`] variants. pub fn committed(&self) -> Arc { match self { Self::Commit { new } => new.clone(), @@ -114,8 +114,8 @@ impl CanonStateNotification { /// Returns the new tip of the chain. /// - /// Returns the new tip for [Self::Reorg] and [Self::Commit] variants which commit at least 1 - /// new block. + /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least + /// 1 new block. pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Commit { new } => new.tip(), diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/provider/src/traits/chain_info.rs index 76eb7fd3f..39f8639dd 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/provider/src/traits/chain_info.rs @@ -8,14 +8,14 @@ pub trait CanonChainTracker: Send + Sync { fn on_forkchoice_update_received(&self, update: &ForkchoiceState); /// Returns the last time a fork choice update was received from the CL - /// ([CanonChainTracker::on_forkchoice_update_received]) + /// ([`CanonChainTracker::on_forkchoice_update_received`]) fn last_received_update_timestamp(&self) -> Option; /// Notify the tracker about a transition configuration exchange. fn on_transition_configuration_exchanged(&self); /// Returns the last time a transition configuration was exchanged with the CL - /// ([CanonChainTracker::on_transition_configuration_exchanged]) + /// ([`CanonChainTracker::on_transition_configuration_exchanged`]) fn last_exchanged_transition_configuration_timestamp(&self) -> Option; /// Sets the canonical head of the chain. diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index b7450bd4f..5cd14542c 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -3,7 +3,7 @@ use reth_db::transaction::{DbTx, DbTxMut}; use reth_storage_errors::provider::ProviderResult; use revm::db::OriginalValuesKnown; -/// A helper trait for [BundleStateWithReceipts](reth_execution_types::BundleStateWithReceipts) to +/// A helper trait for [`BundleStateWithReceipts`](reth_execution_types::BundleStateWithReceipts) to /// write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs index cd5eaab18..a8eea44a6 100644 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -1,7 +1,7 @@ use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; -/// Helper trait to combine all the traits we need for the BlockchainProvider +/// Helper trait to combine all the traits we need for the `BlockchainProvider` /// /// This is a temporary solution pub trait TreeViewer: diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 102716bb5..db16455ab 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -154,7 +154,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the pending block header if available /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. fn pending_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) @@ -162,7 +162,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the latest block header if available /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. fn latest_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) @@ -170,7 +170,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the safe block header if available /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. fn safe_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) @@ -178,7 +178,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the finalized block header if available /// - /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the + /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. fn finalized_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 28d721eb6..ad71ab724 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -7,7 +7,7 @@ use reth_primitives::{ }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -/// Type alias of boxed [StateProvider]. +/// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; /// An abstraction for a type that provides state data. diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index e0741fa09..ee5222e91 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -47,13 +47,13 @@ pub mod pool; /// A type that can spawn tasks. /// -/// The main purpose of this type is to abstract over [TaskExecutor] so it's more convenient to +/// The main purpose of this type is to abstract over [`TaskExecutor`] so it's more convenient to /// provide default impls for testing. /// /// /// # Examples /// -/// Use the [TokioTaskExecutor] that spawns with [tokio::task::spawn] +/// Use the [`TokioTaskExecutor`] that spawns with [`tokio::task::spawn`] /// /// ``` /// # async fn t() { @@ -67,7 +67,7 @@ pub mod pool; /// # } /// ``` /// -/// Use the [TaskExecutor] that spawns task directly onto the tokio runtime via the [Handle]. +/// Use the [`TaskExecutor`] that spawns task directly onto the tokio runtime via the [Handle]. /// /// ``` /// # use reth_tasks::TaskManager; @@ -83,7 +83,7 @@ pub mod pool; /// # } /// ``` /// -/// The [TaskSpawner] trait is [DynClone] so `Box` are also `Clone`. +/// The [`TaskSpawner`] trait is [`DynClone`] so `Box` are also `Clone`. pub trait TaskSpawner: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// Spawns the task onto the runtime. /// See also [`Handle::spawn`]. @@ -105,7 +105,7 @@ pub trait TaskSpawner: Send + Sync + Unpin + std::fmt::Debug + DynClone { dyn_clone::clone_trait_object!(TaskSpawner); -/// An [TaskSpawner] that uses [tokio::task::spawn] to execute tasks +/// An [`TaskSpawner`] that uses [`tokio::task::spawn`] to execute tasks #[derive(Debug, Clone, Default)] #[non_exhaustive] pub struct TokioTaskExecutor; @@ -161,14 +161,14 @@ pub struct TaskManager { signal: Option, /// Receiver of the shutdown signal. on_shutdown: Shutdown, - /// How many [GracefulShutdown] tasks are currently active + /// How many [`GracefulShutdown`] tasks are currently active graceful_tasks: Arc, } // === impl TaskManager === impl TaskManager { - /// Returns a a [TaskManager] over the currently running Runtime. + /// Returns a a [`TaskManager`] over the currently running Runtime. /// /// # Panics /// @@ -289,7 +289,7 @@ pub struct TaskExecutor { panicked_tasks_tx: UnboundedSender, // Task Executor Metrics metrics: TaskExecutorMetrics, - /// How many [GracefulShutdown] tasks are currently active + /// How many [`GracefulShutdown`] tasks are currently active graceful_tasks: Arc, } @@ -306,7 +306,7 @@ impl TaskExecutor { &self.on_shutdown } - /// Spawns a future on the tokio runtime depending on the [TaskKind] + /// Spawns a future on the tokio runtime depending on the [`TaskKind`] fn spawn_on_rt(&self, fut: F, task_kind: TaskKind) -> JoinHandle<()> where F: Future + Send + 'static, @@ -320,7 +320,7 @@ impl TaskExecutor { } } - /// Spawns a regular task depending on the given [TaskKind] + /// Spawns a regular task depending on the given [`TaskKind`] fn spawn_task_as(&self, fut: F, task_kind: TaskKind) -> JoinHandle<()> where F: Future + Send + 'static, @@ -383,7 +383,7 @@ impl TaskExecutor { self.handle.spawn(task) } - /// Spawns a critical task depending on the given [TaskKind] + /// Spawns a critical task depending on the given [`TaskKind`] fn spawn_critical_as( &self, name: &'static str, @@ -472,8 +472,8 @@ impl TaskExecutor { /// This spawns a critical task onto the runtime. /// - /// If this task panics, the [TaskManager] is notified. - /// The [TaskManager] will wait until the given future has completed before shutting down. + /// If this task panics, the [`TaskManager`] is notified. + /// The [`TaskManager`] will wait until the given future has completed before shutting down. /// /// # Example /// @@ -521,7 +521,7 @@ impl TaskExecutor { /// This spawns a regular task onto the runtime. /// - /// The [TaskManager] will wait until the given future has completed before shutting down. + /// The [`TaskManager`] will wait until the given future has completed before shutting down. /// /// # Example /// @@ -579,12 +579,12 @@ impl TaskSpawner for TaskExecutor { } } -/// TaskSpawner with extended behaviour +/// `TaskSpawner` with extended behaviour pub trait TaskSpawnerExt: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// This spawns a critical task onto the runtime. /// - /// If this task panics, the [TaskManager] is notified. - /// The [TaskManager] will wait until the given future has completed before shutting down. + /// If this task panics, the [`TaskManager`] is notified. + /// The [`TaskManager`] will wait until the given future has completed before shutting down. fn spawn_critical_with_graceful_shutdown_signal( &self, name: &'static str, @@ -595,7 +595,7 @@ pub trait TaskSpawnerExt: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// This spawns a regular task onto the runtime. /// - /// The [TaskManager] will wait until the given future has completed before shutting down. + /// The [`TaskManager`] will wait until the given future has completed before shutting down. fn spawn_with_graceful_shutdown_signal( &self, f: impl FnOnce(GracefulShutdown) -> F, @@ -629,9 +629,9 @@ impl TaskSpawnerExt for TaskExecutor { /// Determines how a task is spawned enum TaskKind { - /// Spawn the task to the default executor [Handle::spawn] + /// Spawn the task to the default executor [`Handle::spawn`] Default, - /// Spawn the task to the blocking executor [Handle::spawn_blocking] + /// Spawn the task to the blocking executor [`Handle::spawn_blocking`] Blocking, } diff --git a/crates/tasks/src/pool.rs b/crates/tasks/src/pool.rs index a96b53b82..dbb4e19de 100644 --- a/crates/tasks/src/pool.rs +++ b/crates/tasks/src/pool.rs @@ -16,7 +16,7 @@ use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit, Semaphore}; /// `debug_traceTransaction` as well as `eth_getProof` because they can consume a lot of /// memory and CPU. /// -/// This types serves as an entry guard for the [BlockingTaskPool] and is used to rate limit +/// This types serves as an entry guard for the [`BlockingTaskPool`] and is used to rate limit /// parallel blocking tasks in the pool. #[derive(Clone, Debug)] pub struct BlockingTaskGuard(Arc); @@ -28,12 +28,12 @@ impl BlockingTaskGuard { Self(Arc::new(Semaphore::new(max_blocking_tasks))) } - /// See also [Semaphore::acquire_owned] + /// See also [`Semaphore::acquire_owned`] pub async fn acquire_owned(self) -> Result { self.0.acquire_owned().await } - /// See also [Semaphore::acquire_many_owned] + /// See also [`Semaphore::acquire_many_owned`] pub async fn acquire_many_owned(self, n: u32) -> Result { self.0.acquire_many_owned(n).await } diff --git a/crates/tasks/src/shutdown.rs b/crates/tasks/src/shutdown.rs index 5cc012d8e..ec63450e6 100644 --- a/crates/tasks/src/shutdown.rs +++ b/crates/tasks/src/shutdown.rs @@ -14,7 +14,7 @@ use tokio::sync::oneshot; /// A Future that resolves when the shutdown event has been fired. /// -/// The [TaskManager](crate) +/// The [`TaskManager`](crate) #[derive(Debug)] pub struct GracefulShutdown { shutdown: Shutdown, @@ -45,8 +45,8 @@ impl Clone for GracefulShutdown { } } -/// A guard that fires once dropped to signal the [TaskManager](crate::TaskManager) that the -/// [GracefulShutdown] has completed. +/// A guard that fires once dropped to signal the [`TaskManager`](crate::TaskManager) that the +/// [`GracefulShutdown`] has completed. #[derive(Debug)] #[must_use = "if unused the task will not be gracefully shutdown"] pub struct GracefulShutdownGuard(Arc); diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs index 3faaece6d..0e041f8d4 100644 --- a/crates/tokio-util/src/event_stream.rs +++ b/crates/tokio-util/src/event_stream.rs @@ -7,7 +7,7 @@ use std::{ use tokio_stream::Stream; use tracing::warn; -/// Thin wrapper around tokio's BroadcastStream to allow skipping broadcast errors. +/// Thin wrapper around tokio's `BroadcastStream` to allow skipping broadcast errors. #[derive(Debug)] pub struct EventStream { inner: tokio_stream::wrappers::BroadcastStream, diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 545ae8afa..05e051f95 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -17,7 +17,7 @@ pub(crate) type BoxedLayer = Box + Send + Sync>; const RETH_LOG_FILE_NAME: &str = "reth.log"; -/// Default [directives](Directive) for [EnvFilter] which disables high-frequency debug logs from +/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from /// `hyper`, `trust-dns` and `discv5`. const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 4] = ["hyper::proto::h1=off", "trust_dns_proto=off", "trust_dns_resolver=off", "discv5=off"]; diff --git a/crates/transaction-pool/benches/reorder.rs b/crates/transaction-pool/benches/reorder.rs index d3a8f34e9..d70a26cf6 100644 --- a/crates/transaction-pool/benches/reorder.rs +++ b/crates/transaction-pool/benches/reorder.rs @@ -119,7 +119,7 @@ mod implementations { use reth_transaction_pool::PoolTransaction; use std::collections::BinaryHeap; - /// This implementation appends the transactions and uses [Vec::sort_by] function for sorting. + /// This implementation appends the transactions and uses [`Vec::sort_by`] function for sorting. #[derive(Default)] pub(crate) struct VecTxPoolSortStable { inner: Vec, @@ -139,7 +139,7 @@ mod implementations { } } - /// This implementation appends the transactions and uses [Vec::sort_unstable_by] function for + /// This implementation appends the transactions and uses [`Vec::sort_unstable_by`] function for /// sorting. #[derive(Default)] pub(crate) struct VecTxPoolSortUnstable { @@ -185,7 +185,7 @@ mod implementations { } } - /// This implementation uses BinaryHeap which is drained and reconstructed on each reordering. + /// This implementation uses `BinaryHeap` which is drained and reconstructed on each reordering. #[derive(Default)] pub(crate) struct BinaryHeapTxPool { inner: BinaryHeap, diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 25aaf28bd..cae8d02ab 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -60,7 +60,7 @@ fn create_transactions_for_sender( /// Because this uses [Arbitrary], the number of transactions per sender needs to be bounded. This /// is done by using the `max_depth` parameter. /// -/// This uses [create_transactions_for_sender] to generate the transactions. +/// This uses [`create_transactions_for_sender`] to generate the transactions. fn generate_many_transactions(senders: usize, max_depth: usize) -> Vec { let config = ProptestConfig::default(); let rng = TestRng::from_seed(RngAlgorithm::ChaCha, &SEED); diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 862c4923d..5799297cc 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -8,13 +8,13 @@ use schnellru::{ByLength, LruMap}; use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc}; use tracing::{debug, trace}; -/// How many [BlobTransactionSidecar] to cache in memory. +/// How many [`BlobTransactionSidecar`] to cache in memory. pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100; /// A blob store that stores blob data on disk. /// /// The type uses deferred deletion, meaning that blobs are not immediately deleted from disk, but -/// it's expected that the maintenance task will call [BlobStore::cleanup] to remove the deleted +/// it's expected that the maintenance task will call [`BlobStore::cleanup`] to remove the deleted /// blobs from disk. #[derive(Clone, Debug)] pub struct DiskFileBlobStore { @@ -385,7 +385,7 @@ impl fmt::Debug for DiskFileBlobStoreInner { /// Errors that can occur when interacting with a disk file blob store. #[derive(Debug, thiserror::Error)] pub enum DiskFileBlobStoreError { - /// Thrown during [DiskFileBlobStore::open] if the blob store directory cannot be opened. + /// Thrown during [`DiskFileBlobStore::open`] if the blob store directory cannot be opened. #[error("failed to open blobstore at {0}: {1}")] /// Indicates a failure to open the blob store directory. Open(PathBuf, io::Error), diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index af2d7efb5..bba4b8533 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -38,7 +38,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// the number of successfully deleted blobs and the number of failed deletions. /// /// This is intended to be called in the background to clean up any old or unused data, in case - /// the store uses deferred cleanup: [DiskFileBlobStore] + /// the store uses deferred cleanup: [`DiskFileBlobStore`] fn cleanup(&self) -> BlobStoreCleanupStat; /// Retrieves the decoded blob data for the given transaction hash. @@ -58,7 +58,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { txs: Vec, ) -> Result, BlobStoreError>; - /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the exact + /// Returns the exact [`BlobTransactionSidecar`] for the given transaction hashes in the exact /// order they were requested. /// /// Returns an error if any of the blobs are not found in the blob store. diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index fd8b238e3..a58c1a285 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -34,7 +34,7 @@ pub struct PoolConfig { /// Price bump (in %) for the transaction pool underpriced check. pub price_bumps: PriceBumpConfig, /// How to handle locally received transactions: - /// [TransactionOrigin::Local](crate::TransactionOrigin). + /// [`TransactionOrigin::Local`](crate::TransactionOrigin). pub local_transactions_config: LocalTransactionConfig, } @@ -125,13 +125,13 @@ impl Default for PriceBumpConfig { } /// Configuration options for the locally received transactions: -/// [TransactionOrigin::Local](crate::TransactionOrigin) +/// [`TransactionOrigin::Local`](crate::TransactionOrigin) #[derive(Debug, Clone, Eq, PartialEq)] pub struct LocalTransactionConfig { /// Apply no exemptions to the locally received transactions. /// /// This includes: - /// - available slots are limited to the configured `max_account_slots` of [PoolConfig] + /// - available slots are limited to the configured `max_account_slots` of [`PoolConfig`] /// - no price exemptions /// - no eviction exemptions pub no_exemptions: bool, @@ -176,11 +176,11 @@ impl LocalTransactionConfig { } /// Sets toggle to propagate transactions received locally by this client (e.g - /// transactions from eth_sendTransaction to this nodes' RPC server) + /// transactions from `eth_sendTransaction` to this nodes' RPC server) /// /// If set to false, only transactions received by network peers (via /// p2p) will be marked as propagated in the local transaction pool and returned on a - /// GetPooledTransactions p2p request + /// `GetPooledTransactions` p2p request pub const fn set_propagate_local_transactions(mut self, propagate_local_txs: bool) -> Self { self.propagate_local_transactions = propagate_local_txs; self diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index e20624801..b670cdf95 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -8,12 +8,12 @@ pub type PoolResult = Result; /// A trait for additional errors that can be thrown by the transaction pool. /// /// For example during validation -/// [TransactionValidator::validate_transaction](crate::validate::TransactionValidator::validate_transaction) +/// [`TransactionValidator::validate_transaction`](crate::validate::TransactionValidator::validate_transaction) pub trait PoolTransactionError: std::error::Error + Send + Sync { /// Returns `true` if the error was caused by a transaction that is considered bad in the /// context of the transaction pool and warrants peer penalization. /// - /// See [PoolError::is_bad_transaction]. + /// See [`PoolError::is_bad_transaction`]. fn is_bad_transaction(&self) -> bool; } @@ -82,7 +82,7 @@ impl PoolError { /// context of the transaction pool and warrants peer penalization. /// /// Not all error variants are caused by the incorrect composition of the transaction (See also - /// [InvalidPoolTransactionError]) and can be caused by the current state of the transaction + /// [`InvalidPoolTransactionError`]) and can be caused by the current state of the transaction /// pool. For example the transaction pool is already full or the error was caused my an /// internal error, such as database errors. /// @@ -169,7 +169,7 @@ pub enum Eip4844PoolTransactionError { /// Represents errors that can happen when validating transactions for the pool /// -/// See [TransactionValidator](crate::TransactionValidator). +/// See [`TransactionValidator`](crate::TransactionValidator). #[derive(Debug, thiserror::Error)] pub enum InvalidPoolTransactionError { /// Hard consensus errors @@ -180,7 +180,7 @@ pub enum InvalidPoolTransactionError { #[error("transaction's gas limit {0} exceeds block's gas limit {1}")] ExceedsGasLimit(u64, u64), /// Thrown when a new transaction is added to the pool, but then immediately discarded to - /// respect the max_init_code_size. + /// respect the `max_init_code_size`. #[error("transaction's size {0} exceeds max_init_code_size {1}")] ExceedsMaxInitCodeSize(usize, usize), /// Thrown if the input data of a transaction is greater @@ -212,7 +212,7 @@ impl InvalidPoolTransactionError { /// Returns `true` if the error was caused by a transaction that is considered bad in the /// context of the transaction pool and warrants peer penalization. /// - /// See [PoolError::is_bad_transaction]. + /// See [`PoolError::is_bad_transaction`]. #[inline] fn is_bad_transaction(&self) -> bool { match self { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index f81977d67..e02e49ca9 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -71,7 +71,7 @@ //! //! Blob transaction can be quite large hence they are stored in a separate blobstore. The pool is //! responsible for inserting blob data for new transactions into the blobstore. -//! See also [ValidTransaction](validate::ValidTransaction) +//! See also [`ValidTransaction`](validate::ValidTransaction) //! //! //! ## Examples @@ -279,8 +279,8 @@ where Client: StateProviderFactory + reth_provider::BlockReaderIdExt + Clone + 'static, S: BlobStore, { - /// Returns a new [Pool] that uses the default [TransactionValidationTaskExecutor] when - /// validating [EthPooledTransaction]s and ords via [CoinbaseTipOrdering] + /// Returns a new [Pool] that uses the default [`TransactionValidationTaskExecutor`] when + /// validating [`EthPooledTransaction`]s and ords via [`CoinbaseTipOrdering`] /// /// # Example /// diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 5da2e4e76..830252460 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -489,7 +489,7 @@ impl MaintainedPoolState { } } -/// A unique ChangedAccount identified by its address that can be used for deduplication +/// A unique `ChangedAccount` identified by its address that can be used for deduplication #[derive(Eq)] struct ChangedAccountEntry(ChangedAccount); @@ -554,7 +554,7 @@ where Ok(res) } -/// Extracts all changed accounts from the BundleState +/// Extracts all changed accounts from the `BundleState` fn changed_accounts_iter( state: &BundleStateWithReceipts, ) -> impl Iterator + '_ { diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 35ab9e5cc..a595b9f4d 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -131,7 +131,7 @@ impl BestTransactions { } } - /// Checks for new transactions that have come into the PendingPool after this iterator was + /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { while let Some(pending_tx) = self.try_recv() { @@ -204,7 +204,7 @@ impl Iterator for BestTransactions { /// transactions of iter with predicate. /// /// Filter out transactions are marked as invalid: -/// [BestTransactions::mark_invalid](crate::traits::BestTransactions::mark_invalid). +/// [`BestTransactions::mark_invalid`](crate::traits::BestTransactions::mark_invalid). pub struct BestTransactionFilter { pub(crate) best: I, pub(crate) predicate: P, diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index b49b0db98..1683f23fd 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -168,7 +168,7 @@ impl BlobTransactions { transactions } - /// Resorts the transactions in the pool based on the pool's current [PendingFees]. + /// Resorts the transactions in the pool based on the pool's current [`PendingFees`]. pub(crate) fn reprioritize(&mut self) { // mem::take to modify without allocating, then collect to rebuild the BTreeSet self.all = std::mem::take(&mut self.all) @@ -190,7 +190,7 @@ impl BlobTransactions { /// * have a `max_fee_per_blob_gas` greater than or equal to the given `blob_fee`, _and_ /// * have a `max_fee_per_gas` greater than or equal to the given `base_fee` /// - /// This also sets the [PendingFees] for the pool, resorting transactions based on their + /// This also sets the [`PendingFees`] for the pool, resorting transactions based on their /// updated priority. /// /// Note: the transactions are not returned in a particular order. @@ -212,10 +212,10 @@ impl BlobTransactions { removed } - /// Removes transactions until the pool satisfies its [SubPoolLimit]. + /// Removes transactions until the pool satisfies its [`SubPoolLimit`]. /// /// This is done by removing transactions according to their ordering in the pool, defined by - /// the [BlobOrd] struct. + /// the [`BlobOrd`] struct. /// /// Removed transactions are returned in the order they were removed. pub(crate) fn truncate_pool( @@ -407,7 +407,7 @@ pub fn blob_tx_priority( /// A struct used to determine the ordering for a specific blob transaction in the pool. This uses /// a `priority` value to determine the ordering, and uses the `submission_id` to break ties. /// -/// The `priority` value is calculated using the [blob_tx_priority] function, and should be +/// The `priority` value is calculated using the [`blob_tx_priority`] function, and should be /// re-calculated on each block. #[derive(Debug, Clone)] struct BlobOrd { diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 1b3230d51..b9d6c46b5 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -20,7 +20,7 @@ use tokio::sync::mpsc::{ /// The size of the event channel used to propagate transaction events. const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; -/// A Stream that receives [TransactionEvent] only for the transaction with the given hash. +/// A Stream that receives [`TransactionEvent`] only for the transaction with the given hash. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct TransactionEvents { @@ -46,7 +46,7 @@ impl Stream for TransactionEvents { } } -/// A Stream that receives [FullTransactionEvent] for _all_ transaction. +/// A Stream that receives [`FullTransactionEvent`] for _all_ transaction. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct AllTransactionsEvents { @@ -184,7 +184,7 @@ impl PoolEventBroadcast { /// All Sender half(s) of the event channels for all transactions. /// -/// This mimics [tokio::sync::broadcast] but uses separate channels. +/// This mimics [`tokio::sync::broadcast`] but uses separate channels. #[derive(Debug)] struct AllPoolEventsBroadcaster { /// Corresponding sender half(s) for event listener channel @@ -209,7 +209,7 @@ impl AllPoolEventsBroadcaster { /// All Sender half(s) of the event channels for a specific transaction. /// -/// This mimics [tokio::sync::broadcast] but uses separate channels and is unbounded. +/// This mimics [`tokio::sync::broadcast`] but uses separate channels and is unbounded. #[derive(Default, Debug)] struct PoolEventBroadcaster { /// Corresponding sender half(s) for event listener channel diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a23db3964..3fc8f9779 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -296,7 +296,7 @@ where self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() } - /// Returns the [BlobTransaction] for the given transaction if the sidecar exists. + /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. /// /// Caution: this assumes the given transaction is eip-4844 fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { @@ -308,7 +308,7 @@ where None } - /// Returns converted [PooledTransactionsElement] for the given transaction hashes. + /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. pub(crate) fn get_pooled_transaction_elements( &self, tx_hashes: Vec, @@ -351,7 +351,7 @@ where elements } - /// Returns converted [PooledTransactionsElement] for the given transaction hash. + /// Returns converted [`PooledTransactionsElement`] for the given transaction hash. pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, @@ -725,7 +725,7 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } - /// Returns all transactions that where submitted with the given [TransactionOrigin] + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] pub(crate) fn get_transactions_by_origin( &self, origin: TransactionOrigin, @@ -934,9 +934,9 @@ pub struct AddedPendingTransaction { impl AddedPendingTransaction { /// Returns all transactions that were promoted to the pending pool and adhere to the given - /// [TransactionListenerKind]. + /// [`TransactionListenerKind`]. /// - /// If the kind is [TransactionListenerKind::PropagateOnly], then only transactions that + /// If the kind is [`TransactionListenerKind::PropagateOnly`], then only transactions that /// are allowed to be propagated are returned. pub(crate) fn pending_transactions( &self, @@ -1078,7 +1078,7 @@ impl AddedTransaction { } } - /// Returns the [TransactionId] of the added transaction + /// Returns the [`TransactionId`] of the added transaction #[cfg(test)] pub(crate) fn id(&self) -> &TransactionId { match self { @@ -1103,9 +1103,9 @@ pub(crate) struct OnNewCanonicalStateOutcome { impl OnNewCanonicalStateOutcome { /// Returns all transactions that were promoted to the pending pool and adhere to the given - /// [TransactionListenerKind]. + /// [`TransactionListenerKind`]. /// - /// If the kind is [TransactionListenerKind::PropagateOnly], then only transactions that + /// If the kind is [`TransactionListenerKind::PropagateOnly`], then only transactions that /// are allowed to be propagated are returned. pub(crate) fn pending_transactions( &self, @@ -1116,9 +1116,9 @@ impl OnNewCanonicalStateOutcome { } /// Returns all FULL transactions that were promoted to the pending pool and adhere to the given - /// [TransactionListenerKind]. + /// [`TransactionListenerKind`]. /// - /// If the kind is [TransactionListenerKind::PropagateOnly], then only transactions that + /// If the kind is [`TransactionListenerKind::PropagateOnly`], then only transactions that /// are allowed to be propagated are returned. pub(crate) fn full_pending_transactions( &self, diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index ef0766bed..c36f3bd6c 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -19,8 +19,8 @@ use std::{ /// This pool is a bijection: at all times each set (`best`, `by_id`) contains the same /// transactions. /// -/// Note: This type is generic over [ParkedPool] which enforces that the underlying transaction type -/// is [ValidPoolTransaction] wrapped in an [Arc]. +/// Note: This type is generic over [`ParkedPool`] which enforces that the underlying transaction +/// type is [`ValidPoolTransaction`] wrapped in an [Arc]. #[derive(Debug, Clone)] pub struct ParkedPool { /// Keeps track of transactions inserted in the pool. @@ -171,7 +171,7 @@ impl ParkedPool { self.last_sender_submission.iter().cloned() } - /// Truncates the pool by removing transactions, until the given [SubPoolLimit] has been met. + /// Truncates the pool by removing transactions, until the given [`SubPoolLimit`] has been met. /// /// This is done by first ordering senders by the last time they have submitted a transaction /// @@ -384,7 +384,7 @@ impl Ord for ParkedPoolTransaction { } } -/// Includes a [SenderId] and `submission_id`. This is used to sort senders by their last +/// Includes a [`SenderId`] and `submission_id`. This is used to sort senders by their last /// submission id. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub(crate) struct SubmissionSenderId { @@ -395,7 +395,7 @@ pub(crate) struct SubmissionSenderId { } impl SubmissionSenderId { - /// Creates a new [SubmissionSenderId] based on the [SenderId] and `submission_id`. + /// Creates a new [`SubmissionSenderId`] based on the [`SenderId`] and `submission_id`. const fn new(sender_id: SenderId, submission_id: u64) -> Self { Self { sender_id, submission_id } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index d78af7908..2e1154bfb 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -50,8 +50,8 @@ pub struct PendingPool { /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). size_of: SizeTracker, - /// Used to broadcast new transactions that have been added to the PendingPool to existing - /// static_files of this pool. + /// Used to broadcast new transactions that have been added to the `PendingPool` to existing + /// `static_files` of this pool. new_transaction_notifier: broadcast::Sender>, } @@ -126,7 +126,7 @@ impl PendingPool { /// Same as `best` but also includes the given unlocked transactions. /// - /// This mimics the [Self::add_transaction] method, but does not insert the transactions into + /// This mimics the [`Self::add_transaction`] method, but does not insert the transactions into /// pool but only into the returned iterator. /// /// Note: this does not insert the unlocked transactions into the pool. @@ -443,12 +443,12 @@ impl PendingPool { } } - /// Truncates the pool to the given [SubPoolLimit], removing transactions until the subpool + /// Truncates the pool to the given [`SubPoolLimit`], removing transactions until the subpool /// limits are met. /// /// This attempts to remove transactions by roughly the same amount for each sender. For more /// information on this exact process see docs for - /// [remove_to_limit](PendingPool::remove_to_limit). + /// [`remove_to_limit`](PendingPool::remove_to_limit). /// /// This first truncates all of the non-local transactions in the pool. If the subpool is still /// not under the limit, this truncates the entire pool, including non-local transactions. The diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 28d0df38a..0fe999e09 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -41,7 +41,7 @@ use tracing::trace; /// /// This pool maintains the state of all transactions and stores them accordingly. /// -/// include_mmd!("docs/mermaid/txpool.mmd") +/// `include_mmd!("docs/mermaid/txpool.mmd`") pub struct TxPool { /// Contains the currently known information about the senders. sender_info: FxHashMap, @@ -647,7 +647,7 @@ impl TxPool { /// subpool. /// /// This is intended to be used when a transaction is included in a block, - /// [Self::on_canonical_state_change] + /// [`Self::on_canonical_state_change`] fn prune_transaction_by_hash( &mut self, tx_hash: &B256, @@ -912,7 +912,7 @@ pub(crate) struct AllTransactions { pending_fees: PendingFees, /// Configured price bump settings for replacements price_bumps: PriceBumpConfig, - /// How to handle [TransactionOrigin::Local](crate::TransactionOrigin) transactions. + /// How to handle [`TransactionOrigin::Local`](crate::TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// All Transactions metrics metrics: AllTransactionsMetrics, @@ -1272,7 +1272,7 @@ impl AllTransactions { /// Checks if the given transaction's type conflicts with an existing transaction. /// - /// See also [ValidPoolTransaction::tx_type_conflicts_with]. + /// See also [`ValidPoolTransaction::tx_type_conflicts_with`]. /// /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. #[inline] diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 171c29d91..e116a63d0 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -23,12 +23,12 @@ use reth_primitives::{ }; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; -/// A transaction pool implementation using [MockOrdering] for transaction ordering. +/// A transaction pool implementation using [`MockOrdering`] for transaction ordering. /// /// This type is an alias for [`TxPool`]. pub type MockTxPool = TxPool; -/// A validated transaction in the transaction pool, using [MockTransaction] as the transaction +/// A validated transaction in the transaction pool, using [`MockTransaction`] as the transaction /// type. /// /// This type is an alias for [`ValidPoolTransaction`]. @@ -113,7 +113,7 @@ pub enum MockTransaction { value: U256, /// The transaction input data. input: Bytes, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. + /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, }, /// EIP-2930 transaction type. @@ -138,7 +138,7 @@ pub enum MockTransaction { gas_price: u128, /// The access list associated with the transaction. access_list: AccessList, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. + /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, }, /// EIP-1559 transaction type. @@ -165,7 +165,7 @@ pub enum MockTransaction { access_list: AccessList, /// The transaction input data. input: Bytes, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. + /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, }, /// EIP-4844 transaction type. @@ -198,7 +198,7 @@ pub enum MockTransaction { input: Bytes, /// The sidecar information for the transaction. sidecar: BlobTransactionSidecar, - /// The size of the transaction, returned in the implementation of [PoolTransaction]. + /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, }, } @@ -297,14 +297,14 @@ impl MockTransaction { transaction } - /// Creates a new transaction with the given [TxType]. + /// Creates a new transaction with the given [`TxType`]. /// /// See the default constructors for each of the transaction types: /// - /// * [MockTransaction::legacy] - /// * [MockTransaction::eip2930] - /// * [MockTransaction::eip1559] - /// * [MockTransaction::eip4844] + /// * [`MockTransaction::legacy`] + /// * [`MockTransaction::eip2930`] + /// * [`MockTransaction::eip1559`] + /// * [`MockTransaction::eip4844`] pub fn new_from_type(tx_type: TxType) -> Self { #[allow(unreachable_patterns)] match tx_type { @@ -526,7 +526,7 @@ impl MockTransaction { this } - /// Returns the transaction type identifier associated with the current [MockTransaction]. + /// Returns the transaction type identifier associated with the current [`MockTransaction`]. pub const fn tx_type(&self) -> u8 { match self { Self::Legacy { .. } => LEGACY_TX_TYPE_ID, @@ -1108,18 +1108,18 @@ pub struct MockTransactionFactory { // === impl MockTransactionFactory === impl MockTransactionFactory { - /// Generates a transaction ID for the given [MockTransaction]. + /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { let sender = self.ids.sender_id_or_create(tx.get_sender()); TransactionId::new(sender, tx.get_nonce()) } - /// Validates a [MockTransaction] and returns a [MockValidTx]. + /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. pub fn validated(&mut self, transaction: MockTransaction) -> MockValidTx { self.validated_with_origin(TransactionOrigin::External, transaction) } - /// Validates a [MockTransaction] and returns a shared [`Arc`]. + /// Validates a [`MockTransaction`] and returns a shared [`Arc`]. pub fn validated_arc(&mut self, transaction: MockTransaction) -> Arc { Arc::new(self.validated(transaction)) } @@ -1139,27 +1139,27 @@ impl MockTransactionFactory { } } - /// Creates a validated legacy [MockTransaction]. + /// Creates a validated legacy [`MockTransaction`]. pub fn create_legacy(&mut self) -> MockValidTx { self.validated(MockTransaction::legacy()) } - /// Creates a validated EIP-1559 [MockTransaction]. + /// Creates a validated EIP-1559 [`MockTransaction`]. pub fn create_eip1559(&mut self) -> MockValidTx { self.validated(MockTransaction::eip1559()) } - /// Creates a validated EIP-4844 [MockTransaction]. + /// Creates a validated EIP-4844 [`MockTransaction`]. pub fn create_eip4844(&mut self) -> MockValidTx { self.validated(MockTransaction::eip4844()) } } -/// MockOrdering is just a CoinbaseTipOrdering with MockTransaction +/// `MockOrdering` is just a `CoinbaseTipOrdering` with `MockTransaction` pub type MockOrdering = CoinbaseTipOrdering; /// A ratio of each of the configured transaction types. The percentages sum up to 100, this is -/// enforced in [MockTransactionRatio::new] by an assert. +/// enforced in [`MockTransactionRatio::new`] by an assert. #[derive(Debug, Clone)] pub struct MockTransactionRatio { /// Percent of transactions that are legacy transactions @@ -1173,7 +1173,7 @@ pub struct MockTransactionRatio { } impl MockTransactionRatio { - /// Creates a new [MockTransactionRatio] with the given percentages. + /// Creates a new [`MockTransactionRatio`] with the given percentages. /// /// Each argument is treated as a full percent, for example `30u32` is `30%`. /// @@ -1189,7 +1189,7 @@ impl MockTransactionRatio { Self { legacy_pct, access_list_pct, dynamic_fee_pct, blob_pct } } - /// Create a [WeightedIndex] from this transaction ratio. + /// Create a [`WeightedIndex`] from this transaction ratio. /// /// This index will sample in the following order: /// * Legacy transaction => 0 @@ -1210,7 +1210,7 @@ impl MockTransactionRatio { /// The range of each type of fee, for the different transaction types #[derive(Debug, Clone)] pub struct MockFeeRange { - /// The range of gas_price or legacy and access list transactions + /// The range of `gas_price` or legacy and access list transactions pub gas_price: Uniform, /// The range of priority fees for EIP-1559 and EIP-4844 transactions pub priority_fee: Uniform, @@ -1221,7 +1221,7 @@ pub struct MockFeeRange { } impl MockFeeRange { - /// Creates a new [MockFeeRange] with the given ranges. + /// Creates a new [`MockFeeRange`] with the given ranges. /// /// Expects the bottom of the `priority_fee_range` to be greater than the top of the /// `max_fee_range`. @@ -1412,7 +1412,7 @@ pub enum NonConflictingSetOutcome { } impl NonConflictingSetOutcome { - /// Returns the inner [MockTransactionSet] + /// Returns the inner [`MockTransactionSet`] pub fn into_inner(self) -> MockTransactionSet { match self { Self::BlobsOnly(set) | Self::Mixed(set) => set, @@ -1422,10 +1422,10 @@ impl NonConflictingSetOutcome { /// Introduces artificial nonce gaps into the transaction set, at random, with a range of gap /// sizes. /// - /// If this is a [NonConflictingSetOutcome::BlobsOnly], then nonce gaps will not be introduced. - /// Otherwise, the nonce gaps will be introduced to the mixed transaction set. + /// If this is a [`NonConflictingSetOutcome::BlobsOnly`], then nonce gaps will not be + /// introduced. Otherwise, the nonce gaps will be introduced to the mixed transaction set. /// - /// See [MockTransactionSet::with_nonce_gaps] for more information on the generation process. + /// See [`MockTransactionSet::with_nonce_gaps`] for more information on the generation process. pub fn with_nonce_gaps( &mut self, gap_pct: u32, @@ -1439,14 +1439,14 @@ impl NonConflictingSetOutcome { } } -/// A set of [MockTransaction]s that can be modified at once +/// A set of [`MockTransaction`]s that can be modified at once #[derive(Debug, Clone)] pub struct MockTransactionSet { pub(crate) transactions: Vec, } impl MockTransactionSet { - /// Create a new [MockTransactionSet] from a list of transactions + /// Create a new [`MockTransactionSet`] from a list of transactions fn new(transactions: Vec) -> Self { Self { transactions } } @@ -1514,12 +1514,12 @@ impl MockTransactionSet { } } - /// Add transactions to the [MockTransactionSet] + /// Add transactions to the [`MockTransactionSet`] pub fn extend>(&mut self, txs: T) { self.transactions.extend(txs); } - /// Extract the inner [Vec] of [MockTransaction]s + /// Extract the inner [Vec] of [`MockTransaction`]s pub fn into_vec(self) -> Vec { self.transactions } diff --git a/crates/transaction-pool/src/test_utils/mod.rs b/crates/transaction-pool/src/test_utils/mod.rs index e2f08634c..363d6e2a4 100644 --- a/crates/transaction-pool/src/test_utils/mod.rs +++ b/crates/transaction-pool/src/test_utils/mod.rs @@ -15,7 +15,7 @@ mod pool; pub type TestPool = Pool, MockOrdering, InMemoryBlobStore>; -/// Structure encapsulating a [TestPool] used for testing +/// Structure encapsulating a [`TestPool`] used for testing #[derive(Debug, Clone)] pub struct TestPoolBuilder(TestPool); @@ -31,7 +31,7 @@ impl Default for TestPoolBuilder { } impl TestPoolBuilder { - /// Returns a new [TestPoolBuilder] with a custom validator used for testing purposes + /// Returns a new [`TestPoolBuilder`] with a custom validator used for testing purposes pub fn with_validator(self, validator: MockTransactionValidator) -> Self { Self(Pool::new( validator, @@ -41,7 +41,7 @@ impl TestPoolBuilder { )) } - /// Returns a new [TestPoolBuilder] with a custom ordering used for testing purposes + /// Returns a new [`TestPoolBuilder`] with a custom ordering used for testing purposes pub fn with_ordering(self, ordering: MockOrdering) -> Self { Self(Pool::new( self.pool.validator().clone(), @@ -51,7 +51,7 @@ impl TestPoolBuilder { )) } - /// Returns a new [TestPoolBuilder] with a custom blob store used for testing purposes + /// Returns a new [`TestPoolBuilder`] with a custom blob store used for testing purposes pub fn with_blob_store(self, blob_store: InMemoryBlobStore) -> Self { Self(Pool::new( self.pool.validator().clone(), @@ -61,7 +61,7 @@ impl TestPoolBuilder { )) } - /// Returns a new [TestPoolBuilder] with a custom configuration used for testing purposes + /// Returns a new [`TestPoolBuilder`] with a custom configuration used for testing purposes pub fn with_config(self, config: PoolConfig) -> Self { Self(Pool::new( self.pool.validator().clone(), diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 58377a19e..b8bd64e44 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -28,7 +28,7 @@ use std::{ }; use tokio::sync::mpsc::Receiver; -/// The PeerId type. +/// The `PeerId` type. pub type PeerId = reth_primitives::B512; /// General purpose abstraction of a transaction-pool. @@ -432,7 +432,7 @@ pub enum TransactionListenerKind { All, /// Only transactions that are allowed to be propagated. /// - /// See also [ValidPoolTransaction] + /// See also [`ValidPoolTransaction`] PropagateOnly, } @@ -458,12 +458,12 @@ pub struct AllPoolTransactions { // === impl AllPoolTransactions === impl AllPoolTransactions { - /// Returns an iterator over all pending [TransactionSignedEcRecovered] transactions. + /// Returns an iterator over all pending [`TransactionSignedEcRecovered`] transactions. pub fn pending_recovered(&self) -> impl Iterator + '_ { self.pending.iter().map(|tx| tx.transaction.to_recovered_transaction()) } - /// Returns an iterator over all queued [TransactionSignedEcRecovered] transactions. + /// Returns an iterator over all queued [`TransactionSignedEcRecovered`] transactions. pub fn queued_recovered(&self) -> impl Iterator + '_ { self.queued.iter().map(|tx| tx.transaction.to_recovered_transaction()) } @@ -526,7 +526,7 @@ impl Clone for NewTransactionEvent { } /// This type represents a new blob sidecar that has been stored in the transaction pool's -/// blobstore; it includes the TransactionHash of the blob transaction along with the assoc. +/// blobstore; it includes the `TransactionHash` of the blob transaction along with the assoc. /// sidecar (blobs, commitments, proofs) #[derive(Debug, Clone)] pub struct NewBlobSidecar { @@ -784,12 +784,12 @@ pub trait PoolTransaction: /// Returns the EIP-1559 the maximum fee per gas the caller is willing to pay. /// - /// For legacy transactions this is gas_price. + /// For legacy transactions this is `gas_price`. /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128; - /// Returns the access_list for the particular transaction type. + /// Returns the `access_list` for the particular transaction type. /// For Legacy transactions, returns default. fn access_list(&self) -> Option<&AccessList>; @@ -817,7 +817,7 @@ pub trait PoolTransaction: /// [`TxKind::Create`] if the transaction is a contract creation. fn kind(&self) -> TxKind; - /// Returns the recipient of the transaction if it is not a [TxKind::Create] + /// Returns the recipient of the transaction if it is not a [`TxKind::Create`] /// transaction. fn to(&self) -> Option
{ self.kind().to().copied() @@ -847,12 +847,12 @@ pub trait PoolTransaction: /// Note: Implementations should cache this value. fn encoded_length(&self) -> usize; - /// Returns chain_id + /// Returns `chain_id` fn chain_id(&self) -> Option; } /// An extension trait that provides additional interfaces for the -/// [EthTransactionValidator](crate::EthTransactionValidator). +/// [`EthTransactionValidator`](crate::EthTransactionValidator). pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; @@ -868,13 +868,13 @@ pub trait EthPoolTransaction: PoolTransaction { ) -> Result<(), BlobTransactionValidationError>; } -/// The default [PoolTransaction] for the [Pool](crate::Pool) for Ethereum. +/// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. /// -/// This type is essentially a wrapper around [TransactionSignedEcRecovered] with additional fields -/// derived from the transaction that are frequently used by the pools for ordering. +/// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional +/// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] pub struct EthPooledTransaction { - /// EcRecovered transaction info + /// `EcRecovered` transaction info pub(crate) transaction: TransactionSignedEcRecovered, /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. @@ -891,7 +891,7 @@ pub struct EthPooledTransaction { pub(crate) blob_sidecar: EthBlobTransactionSidecar, } -/// Represents the blob sidecar of the [EthPooledTransaction]. +/// Represents the blob sidecar of the [`EthPooledTransaction`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum EthBlobTransactionSidecar { /// This transaction does not have a blob sidecar @@ -919,7 +919,7 @@ impl EthPooledTransaction { /// Create new instance of [Self]. /// /// Caution: In case of blob transactions, this does marks the blob sidecar as - /// [EthBlobTransactionSidecar::Missing] + /// [`EthBlobTransactionSidecar::Missing`] pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; @@ -1014,7 +1014,7 @@ impl PoolTransaction for EthPooledTransaction { /// Returns the EIP-1559 Max base fee the caller is willing to pay. /// - /// For legacy transactions this is gas_price. + /// For legacy transactions this is `gas_price`. /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128 { @@ -1088,7 +1088,7 @@ impl PoolTransaction for EthPooledTransaction { self.encoded_length } - /// Returns chain_id + /// Returns `chain_id` fn chain_id(&self) -> Option { self.transaction.chain_id() } @@ -1217,7 +1217,7 @@ pub struct BlockInfo { pub pending_blob_fee: Option, } -/// The limit to enforce for [TransactionPool::get_pooled_transaction_elements]. +/// The limit to enforce for [`TransactionPool::get_pooled_transaction_elements`]. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum GetPooledTransactionLimit { /// No limit, return all transactions. diff --git a/crates/transaction-pool/src/validate/constants.rs b/crates/transaction-pool/src/validate/constants.rs index e1d44cf01..489677057 100644 --- a/crates/transaction-pool/src/validate/constants.rs +++ b/crates/transaction-pool/src/validate/constants.rs @@ -1,5 +1,5 @@ /// [`TX_SLOT_BYTE_SIZE`] is used to calculate how many data slots a single transaction -/// takes up based on its byte size. The slots are used as DoS protection, ensuring +/// takes up based on its byte size. The slots are used as `DoS` protection, ensuring /// that validating a new transaction remains a constant operation (in reality /// O(maxslots), where max slots are 4 currently). pub const TX_SLOT_BYTE_SIZE: usize = 32 * 1024; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 057154d2f..bfb9157fb 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -53,7 +53,7 @@ where { /// Validates a single transaction. /// - /// See also [TransactionValidator::validate_transaction] + /// See also [`TransactionValidator::validate_transaction`] pub fn validate_one( &self, origin: TransactionOrigin, @@ -66,7 +66,7 @@ where /// /// Returns all outcomes for the given transactions in the same order. /// - /// See also [Self::validate_one] + /// See also [`Self::validate_one`] pub fn validate_all( &self, transactions: Vec<(TransactionOrigin, Tx)>, @@ -102,7 +102,7 @@ where } } -/// A [TransactionValidator] implementation that validates ethereum transaction. +/// A [`TransactionValidator`] implementation that validates ethereum transaction. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain @@ -125,7 +125,7 @@ pub(crate) struct EthTransactionValidatorInner { minimum_priority_fee: Option, /// Stores the setup and parameters needed for validating KZG proofs. kzg_settings: Arc, - /// How to handle [TransactionOrigin::Local](TransactionOrigin) transactions. + /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Maximum size in bytes a single transaction can have in order to be accepted into the pool. max_tx_input_bytes: usize, @@ -410,7 +410,7 @@ where } } -/// A builder for [TransactionValidationTaskExecutor] +/// A builder for [`TransactionValidationTaskExecutor`] #[derive(Debug, Clone)] pub struct EthTransactionValidatorBuilder { chain_spec: Arc, @@ -435,14 +435,14 @@ pub struct EthTransactionValidatorBuilder { /// Stores the setup and parameters needed for validating KZG proofs. kzg_settings: Arc, - /// How to handle [TransactionOrigin::Local](TransactionOrigin) transactions. + /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Max size in bytes of a single transaction allowed max_tx_input_bytes: usize, } impl EthTransactionValidatorBuilder { - /// Creates a new builder for the given [ChainSpec] + /// Creates a new builder for the given [`ChainSpec`] /// /// By default this assumes the network is on the `Cancun` hardfork and the following /// transactions are allowed: @@ -537,7 +537,7 @@ impl EthTransactionValidatorBuilder { self } - /// Sets the [KzgSettings] to use for validating KZG proofs. + /// Sets the [`KzgSettings`] to use for validating KZG proofs. pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { self.kzg_settings = kzg_settings; self @@ -570,7 +570,7 @@ impl EthTransactionValidatorBuilder { self } - /// Builds a the [EthTransactionValidator] without spawning validator tasks. + /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. pub fn build( self, client: Client, @@ -616,8 +616,8 @@ impl EthTransactionValidatorBuilder { EthTransactionValidator { inner: Arc::new(inner) } } - /// Builds a the [EthTransactionValidator] and spawns validation tasks via the - /// [TransactionValidationTaskExecutor] + /// Builds a the [`EthTransactionValidator`] and spawns validation tasks via the + /// [`TransactionValidationTaskExecutor`] /// /// The validator will spawn `additional_tasks` additional tasks for validation. /// @@ -699,7 +699,7 @@ pub fn ensure_max_init_code_size( /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. /// -/// See also [calculate_intrinsic_gas_after_merge] +/// See also [`calculate_intrinsic_gas_after_merge`] pub fn ensure_intrinsic_gas( transaction: &T, is_shanghai: bool, diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 89e7f4812..9cadec256 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -37,7 +37,7 @@ pub enum TransactionValidationOutcome { state_nonce: u64, /// The validated transaction. /// - /// See also [ValidTransaction]. + /// See also [`ValidTransaction`]. /// /// If this is a _new_ EIP-4844 blob transaction, then this must contain the extracted /// sidecar. @@ -86,14 +86,14 @@ impl TransactionValidationOutcome { /// /// Note: Since blob transactions can be re-injected without their sidecar (after reorg), the /// validator can omit the sidecar if it is still in the blob store and return a -/// [ValidTransaction::Valid] instead. +/// [`ValidTransaction::Valid`] instead. #[derive(Debug)] pub enum ValidTransaction { /// A valid transaction without a sidecar. Valid(T), /// A valid transaction for which a sidecar should be stored. /// - /// Caution: The [TransactionValidator] must ensure that this is only returned for EIP-4844 + /// Caution: The [`TransactionValidator`] must ensure that this is only returned for EIP-4844 /// transactions. ValidWithSidecar { /// The valid EIP-4844 transaction. @@ -169,7 +169,7 @@ pub trait TransactionValidator: Send + Sync { /// * nonce >= next nonce of the sender /// * ... /// - /// See [InvalidTransactionError](reth_primitives::InvalidTransactionError) for common errors + /// See [`InvalidTransactionError`](reth_primitives::InvalidTransactionError) for common errors /// variants. /// /// The transaction pool makes no additional assumptions about the validity of the transaction @@ -178,7 +178,7 @@ pub trait TransactionValidator: Send + Sync { /// example nonce or balance changes. Hence, any validation checks must be applied in this /// function. /// - /// See [TransactionValidationTaskExecutor] for a reference implementation. + /// See [`TransactionValidationTaskExecutor`] for a reference implementation. fn validate_transaction( &self, origin: TransactionOrigin, @@ -189,7 +189,7 @@ pub trait TransactionValidator: Send + Sync { /// /// Must return all outcomes for the given transactions in the same order. /// - /// See also [Self::validate_transaction]. + /// See also [`Self::validate_transaction`]. fn validate_transactions( &self, transactions: Vec<(TransactionOrigin, Self::Transaction)>, @@ -213,7 +213,7 @@ pub trait TransactionValidator: Send + Sync { /// This is used as the internal representation of a transaction inside the pool. /// /// For EIP-4844 blob transactions this will _not_ contain the blob sidecar which is stored -/// separately in the [BlobStore](crate::blobstore::BlobStore). +/// separately in the [`BlobStore`](crate::blobstore::BlobStore). pub struct ValidPoolTransaction { /// The transaction pub transaction: T, diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index a18cdad84..f8f98e3f4 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -27,7 +27,7 @@ type ValidationStream = ReceiverStream; /// /// This listens for incoming validation jobs and executes them. /// -/// This should be spawned as a task: [ValidationTask::run] +/// This should be spawned as a task: [`ValidationTask::run`] #[derive(Clone)] pub struct ValidationTask { validation_jobs: Arc>, @@ -61,7 +61,7 @@ impl std::fmt::Debug for ValidationTask { } } -/// A sender new type for sending validation jobs to [ValidationTask]. +/// A sender new type for sending validation jobs to [`ValidationTask`]. #[derive(Debug)] pub struct ValidationJobSender { tx: mpsc::Sender + Send>>>, @@ -77,7 +77,7 @@ impl ValidationJobSender { } } -/// A [TransactionValidator] implementation that validates ethereum transaction. +/// A [`TransactionValidator`] implementation that validates ethereum transaction. /// /// This validator is non-blocking, all validation work is done in a separate task. #[derive(Debug, Clone)] @@ -91,7 +91,7 @@ pub struct TransactionValidationTaskExecutor { // === impl TransactionValidationTaskExecutor === impl TransactionValidationTaskExecutor<()> { - /// Convenience method to create a [EthTransactionValidatorBuilder] + /// Convenience method to create a [`EthTransactionValidatorBuilder`] pub fn eth_builder(chain_spec: Arc) -> EthTransactionValidatorBuilder { EthTransactionValidatorBuilder::new(chain_spec) } @@ -114,10 +114,10 @@ impl TransactionValidationTaskExecutor( client: Client, chain_spec: Arc, @@ -130,7 +130,7 @@ where Self::eth_with_additional_tasks(client, chain_spec, blob_store, tasks, 0) } - /// Creates a new instance for the given [ChainSpec] + /// Creates a new instance for the given [`ChainSpec`] /// /// By default this will enable support for: /// - shanghai diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index dec284bee..6c0d1c95e 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -31,10 +31,10 @@ use crate::metrics::ParallelStateRootMetrics; /// nodes in the process. Upon encountering a leaf node, it will poll the storage root /// task for the corresponding hashed address. /// -/// Internally, the calculator uses [ConsistentDbView] since +/// Internally, the calculator uses [`ConsistentDbView`] since /// it needs to rely on database state saying the same until /// the last transaction is open. -/// See docs of using [ConsistentDbView] for caveats. +/// See docs of using [`ConsistentDbView`] for caveats. /// /// For sync usage, take a look at `ParallelStateRoot`. #[derive(Debug)] diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index 513ae1577..35da558a6 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -29,10 +29,10 @@ use crate::metrics::ParallelStateRootMetrics; /// accounts in parallel. Once that's done, it proceeds to walking the state /// trie retrieving the pre-computed storage roots when needed. /// -/// Internally, the calculator uses [ConsistentDbView] since +/// Internally, the calculator uses [`ConsistentDbView`] since /// it needs to rely on database state saying the same until /// the last transaction is open. -/// See docs of using [ConsistentDbView] for caveats. +/// See docs of using [`ConsistentDbView`] for caveats. /// /// If possible, use more optimized `AsyncStateRoot` instead. #[derive(Debug)] diff --git a/crates/trie/trie/src/hashed_cursor/default.rs b/crates/trie/trie/src/hashed_cursor/default.rs index e26dc48bf..4fb88d0b2 100644 --- a/crates/trie/trie/src/hashed_cursor/default.rs +++ b/crates/trie/trie/src/hashed_cursor/default.rs @@ -42,7 +42,7 @@ where } /// The structure wrapping a database cursor for hashed storage and -/// a target hashed address. Implements [HashedCursor] and [HashedStorageCursor] +/// a target hashed address. Implements [`HashedCursor`] and [`HashedStorageCursor`] /// for iterating over hashed storage. #[derive(Debug)] pub struct DatabaseHashedStorageCursor { @@ -53,7 +53,7 @@ pub struct DatabaseHashedStorageCursor { } impl DatabaseHashedStorageCursor { - /// Create new [DatabaseHashedStorageCursor]. + /// Create new [`DatabaseHashedStorageCursor`]. pub const fn new(cursor: C, hashed_address: B256) -> Self { Self { cursor, hashed_address } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 039ad429d..3f52fdc38 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -40,7 +40,7 @@ impl<'a, CF: HashedCursorFactory> HashedCursorFactory for HashedPostStateCursorF pub struct HashedPostStateAccountCursor<'b, C> { /// The database cursor. cursor: C, - /// The reference to the in-memory [HashedPostStateSorted]. + /// The reference to the in-memory [`HashedPostStateSorted`]. post_state: &'b HashedPostStateSorted, /// The post state account index where the cursor is currently at. post_state_account_index: usize, @@ -50,7 +50,7 @@ pub struct HashedPostStateAccountCursor<'b, C> { } impl<'b, C> HashedPostStateAccountCursor<'b, C> { - /// Create new instance of [HashedPostStateAccountCursor]. + /// Create new instance of [`HashedPostStateAccountCursor`]. pub const fn new(cursor: C, post_state: &'b HashedPostStateSorted) -> Self { Self { cursor, post_state, last_account: None, post_state_account_index: 0 } } @@ -101,7 +101,7 @@ where /// database and the post state. The two entries are compared and the lowest is returned. /// /// The returned account key is memoized and the cursor remains positioned at that key until - /// [HashedCursor::seek] or [HashedCursor::next] are called. + /// [`HashedCursor::seek`] or [`HashedCursor::next`] are called. fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { self.last_account = None; @@ -144,7 +144,7 @@ where /// If the cursor is positioned at the entry, return the entry with next greater key. /// Returns [None] if the previous memoized or the next greater entries are missing. /// - /// NOTE: This function will not return any entry unless [HashedCursor::seek] has been + /// NOTE: This function will not return any entry unless [`HashedCursor::seek`] has been /// called. fn next(&mut self) -> Result, reth_db::DatabaseError> { let last_account = match self.last_account.as_ref() { @@ -194,7 +194,7 @@ pub struct HashedPostStateStorageCursor<'b, C> { } impl<'b, C> HashedPostStateStorageCursor<'b, C> { - /// Create new instance of [HashedPostStateStorageCursor] for the given hashed address. + /// Create new instance of [`HashedPostStateStorageCursor`] for the given hashed address. pub const fn new( cursor: C, post_state: &'b HashedPostStateSorted, @@ -304,7 +304,7 @@ where /// /// # Panics /// - /// If the account key is not set. [HashedCursor::seek] must be called first in order to + /// If the account key is not set. [`HashedCursor::seek`] must be called first in order to /// position the cursor. fn next(&mut self) -> Result, reth_db::DatabaseError> { let last_slot = match self.last_slot.as_ref() { @@ -353,8 +353,8 @@ where { /// Returns `true` if the account has no storage entries. /// - /// This function should be called before attempting to call [HashedCursor::seek] or - /// [HashedCursor::next]. + /// This function should be called before attempting to call [`HashedCursor::seek`] or + /// [`HashedCursor::next`]. fn is_storage_empty(&mut self) -> Result { let is_empty = match self.post_state.storages.get(&self.hashed_address) { Some(storage) => { diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index ca544f9bb..03d80d34c 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -47,7 +47,7 @@ pub struct TrieNodeIter { } impl TrieNodeIter { - /// Creates a new [TrieNodeIter]. + /// Creates a new [`TrieNodeIter`]. pub const fn new(walker: TrieWalker, hashed_cursor: H) -> Self { Self { walker, @@ -58,7 +58,7 @@ impl TrieNodeIter { } } - /// Sets the last iterated hashed key and returns the modified [TrieNodeIter]. + /// Sets the last iterated hashed key and returns the modified [`TrieNodeIter`]. /// This is used to resume iteration from the last checkpoint. pub const fn with_last_hashed_key(mut self, previous_hashed_key: B256) -> Self { self.previous_hashed_key = Some(previous_hashed_key); diff --git a/crates/trie/trie/src/prefix_set/mod.rs b/crates/trie/trie/src/prefix_set/mod.rs index c37b90d4a..ea4ebde94 100644 --- a/crates/trie/trie/src/prefix_set/mod.rs +++ b/crates/trie/trie/src/prefix_set/mod.rs @@ -63,7 +63,7 @@ where } impl PrefixSetMut { - /// Create [PrefixSetMut] with pre-allocated capacity. + /// Create [`PrefixSetMut`] with pre-allocated capacity. pub fn with_capacity(capacity: usize) -> Self { Self { keys: Vec::with_capacity(capacity), ..Default::default() } } @@ -130,7 +130,7 @@ impl PrefixSetMut { /// A sorted prefix set that has an immutable _sorted_ list of unique keys. /// -/// See also [PrefixSetMut::freeze]. +/// See also [`PrefixSetMut::freeze`]. #[derive(Debug, Default, Clone)] pub struct PrefixSet { keys: Arc>, diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 9c8886017..6637e9dd9 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -32,7 +32,7 @@ pub struct HashedPostState { } impl HashedPostState { - /// Initialize [HashedPostState] from bundle state. + /// Initialize [`HashedPostState`] from bundle state. /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. pub fn from_bundle_state<'a>( @@ -54,11 +54,11 @@ impl HashedPostState { this } - /// Initialize [HashedPostState] from revert range. + /// Initialize [`HashedPostState`] from revert range. /// Iterate over state reverts in the specified block range and /// apply them to hashed state in reverse. /// - /// NOTE: In order to have the resulting [HashedPostState] be a correct + /// NOTE: In order to have the resulting [`HashedPostState`] be a correct /// overlay of the plain state, the end of the range must be the current tip. pub fn from_revert_range( tx: &TX, @@ -142,7 +142,7 @@ impl HashedPostState { } } - /// Converts hashed post state into [HashedPostStateSorted]. + /// Converts hashed post state into [`HashedPostStateSorted`]. pub fn into_sorted(self) -> HashedPostStateSorted { let mut accounts = Vec::new(); let mut destroyed_accounts = HashSet::default(); @@ -164,7 +164,7 @@ impl HashedPostState { HashedPostStateSorted { accounts, destroyed_accounts, storages } } - /// Construct [TriePrefixSets] from hashed post state. + /// Construct [`TriePrefixSets`] from hashed post state. /// The prefix sets contain the hashed account and storage keys that have been changed in the /// post state. pub fn construct_prefix_sets(&self) -> TriePrefixSets { @@ -198,7 +198,7 @@ impl HashedPostState { } } - /// Calculate the state root for this [HashedPostState]. + /// Calculate the state root for this [`HashedPostState`]. /// Internally, this method retrieves prefixsets and uses them /// to calculate incremental state root. /// @@ -226,7 +226,7 @@ impl HashedPostState { /// /// # Returns /// - /// The state root for this [HashedPostState]. + /// The state root for this [`HashedPostState`]. pub fn state_root(&self, tx: &TX) -> Result { let sorted = self.clone().into_sorted(); let prefix_sets = self.construct_prefix_sets(); @@ -236,8 +236,8 @@ impl HashedPostState { .root() } - /// Calculates the state root for this [HashedPostState] and returns it alongside trie updates. - /// See [Self::state_root] for more info. + /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie + /// updates. See [`Self::state_root`] for more info. pub fn state_root_with_updates( &self, tx: &TX, @@ -261,7 +261,7 @@ pub struct HashedStorage { } impl HashedStorage { - /// Create new instance of [HashedStorage]. + /// Create new instance of [`HashedStorage`]. pub fn new(wiped: bool) -> Self { Self { wiped, storage: HashMap::default() } } @@ -283,7 +283,7 @@ impl HashedStorage { } } - /// Converts hashed storage into [HashedStorageSorted]. + /// Converts hashed storage into [`HashedStorageSorted`]. pub fn into_sorted(self) -> HashedStorageSorted { let mut non_zero_valued_slots = Vec::new(); let mut zero_valued_slots = HashSet::default(); diff --git a/crates/trie/trie/src/test_utils.rs b/crates/trie/trie/src/test_utils.rs index 336448327..67a392e20 100644 --- a/crates/trie/trie/src/test_utils.rs +++ b/crates/trie/trie/src/test_utils.rs @@ -6,7 +6,7 @@ use reth_primitives::{ /// Re-export of [triehash]. pub use triehash; -/// Compute the state root of a given set of accounts using [triehash::sec_trie_root]. +/// Compute the state root of a given set of accounts using [`triehash::sec_trie_root`]. pub fn state_root(accounts: I) -> B256 where I: IntoIterator, @@ -20,14 +20,14 @@ where triehash::sec_trie_root::(encoded_accounts) } -/// Compute the storage root for a given account using [triehash::sec_trie_root]. +/// Compute the storage root for a given account using [`triehash::sec_trie_root`]. pub fn storage_root>(storage: I) -> B256 { let encoded_storage = storage.into_iter().map(|(k, v)| (k, encode_fixed_size(&v))); triehash::sec_trie_root::(encoded_storage) } /// Compute the state root of a given set of accounts with prehashed keys using -/// [triehash::trie_root]. +/// [`triehash::trie_root`]. pub fn state_root_prehashed(accounts: I) -> B256 where I: IntoIterator, @@ -42,7 +42,7 @@ where triehash::trie_root::(encoded_accounts) } -/// Compute the storage root for a given account with prehashed slots using [triehash::trie_root]. +/// Compute the storage root for a given account with prehashed slots using [`triehash::trie_root`]. pub fn storage_root_prehashed>(storage: I) -> B256 { let encoded_storage = storage.into_iter().map(|(k, v)| (k, encode_fixed_size(&v))); triehash::trie_root::(encoded_storage) diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 6a054167d..012ab0b08 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -23,7 +23,7 @@ use tracing::{debug, trace}; #[cfg(feature = "metrics")] use crate::metrics::{StateRootMetrics, TrieRootMetrics, TrieType}; -/// StateRoot is used to compute the root node of a state trie. +/// `StateRoot` is used to compute the root node of a state trie. #[derive(Debug)] pub struct StateRoot { /// The factory for trie cursors. @@ -94,7 +94,7 @@ impl StateRoot { } impl<'a, TX: DbTx> StateRoot<&'a TX, &'a TX> { - /// Create a new [StateRoot] instance. + /// Create a new [`StateRoot`] instance. pub fn from_tx(tx: &'a TX) -> Self { Self { trie_cursor_factory: tx, @@ -341,7 +341,7 @@ where } } -/// StorageRoot is used to compute the root node of an account storage trie. +/// `StorageRoot` is used to compute the root node of an account storage trie. #[derive(Debug)] pub struct StorageRoot { /// A reference to the database transaction. diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 62e48d4e3..ec4761693 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -29,7 +29,7 @@ pub struct TrieWalker { } impl TrieWalker { - /// Constructs a new TrieWalker from existing stack and a cursor. + /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { let mut this = Self { cursor, changes, stack, can_skip_current_node: false, trie_updates: None }; @@ -111,7 +111,7 @@ impl TrieWalker { } impl TrieWalker { - /// Constructs a new TrieWalker, setting up the initial state of the stack and cursor. + /// Constructs a new `TrieWalker`, setting up the initial state of the stack and cursor. pub fn new(cursor: C, changes: PrefixSet) -> Self { // Initialize the walker with a single empty stack element. let mut this = Self { diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 534090eb5..f1997e2c1 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -1,7 +1,7 @@ # eth-wire -The `eth-wire` crate provides abstractions over the [RLPx](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and -[Eth wire](https://github.com/ethereum/devp2p/blob/master/caps/eth.md) protocols. +The `eth-wire` crate provides abstractions over the [``RLPx``](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and +[Eth wire](https://github.com/ethereum/devp2p/blob/master/caps/eth.md) protocols. This crate can be thought of as having 2 components: @@ -47,7 +47,7 @@ pub enum EthMessageID { } ``` -Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is +Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is described using a `RequestPair` struct, which is simply a concatenation of the underlying message with a request id. [File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) @@ -85,8 +85,8 @@ Let's understand how an `EthMessage` is implemented by taking a look at the `Tra Transactions (0x02) [tx₁, tx₂, ...] -Specify transactions that the peer should make sure is included on its transaction queue. -The items in the list are transactions in the format described in the main Ethereum specification. +Specify transactions that the peer should make sure is included on its transaction queue. +The items in the list are transactions in the format described in the main Ethereum specification. ... ``` @@ -140,11 +140,11 @@ The lowest level stream to communicate with other peers is the P2P stream. It ta - Tracks and Manages Ping and pong messages and sends them when needed. - Keeps track of the SharedCapabilities between the reth node and its peers. -- Receives bytes from peers, decompresses and forwards them to its parent stream. +- Receives bytes from peers, decompresses and forwards them to its parent stream. - Receives bytes from its parent stream, compresses them and sends it to peers. -Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) -using the external `snap` crate. +Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) +using the external `snap` crate. [File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore @@ -220,7 +220,7 @@ pub(crate) fn poll_ping( ### Sending and receiving data To send and receive data, the P2PStream itself is a future which implements the `Stream` and `Sink` traits from the `futures` crate. -For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just +For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. [File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) @@ -296,7 +296,7 @@ pub struct EthStream { } ``` EthStream's only job is to perform the RLP decoding/encoding, using the `ProtocolMessage::decode()` and `ProtocolMessage::encode()` -functions we looked at earlier. +functions we looked at earlier. [File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) ```rust,ignore @@ -334,8 +334,8 @@ impl Sink for EthStream { } ``` ## Unauthed streams -For a session to be established, peers in the Ethereum network must first exchange a `Hello` message in the RLPx layer and then a -`Status` message in the eth-wire layer. +For a session to be established, peers in the Ethereum network must first exchange a `Hello` message in the ``RLPx`` layer and then a +`Status` message in the eth-wire layer. To perform these, reth has special `Unauthed` versions of streams described above. @@ -368,5 +368,3 @@ impl UnauthedP2PStream { ``` Similarly, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) - - diff --git a/docs/crates/network.md b/docs/crates/network.md index adcbc24b3..9e381877f 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -145,7 +145,7 @@ pub struct NetworkConfig { pub executor: Option, /// The `Status` message to send to peers at the beginning. pub status: Status, - /// Sets the hello message for the p2p handshake in RLPx + /// Sets the hello message for the p2p handshake in ``RLPx`` pub hello_message: HelloMessage, } ``` @@ -312,7 +312,7 @@ pub struct NetworkState { genesis_hash: B256, /// The type that handles requests. /// - /// The fetcher streams RLPx related requests on a per-peer basis to this type. This type will + /// The fetcher streams ``RLPx`` related requests on a per-peer basis to this type. This type will /// then queue in the request and notify the fetcher once the result has been received. state_fetcher: StateFetcher, } @@ -648,7 +648,7 @@ fn on_bodies_request( ## Transactions Task -The transactions task listens for, requests, and propagates transactions both from the node's peers, and those that are added locally (e.g., submitted via RPC). Note that this task focuses solely on the network communication involved with Ethereum transactions, we will talk more about the structure of the transaction pool itself +The transactions task listens for, requests, and propagates transactions both from the node's peers, and those that are added locally (e.g., submitted via RPC). Note that this task focuses solely on the network communication involved with Ethereum transactions, we will talk more about the structure of the transaction pool itself in the [transaction-pool](../../../ethereum/transaction-pool/README.md) chapter. Again, like the network management and ETH requests tasks, the transactions task is implemented as an endless future that runs as a background task on a standalone `tokio::task`. It's represented by the `TransactionsManager` struct: @@ -832,7 +832,7 @@ struct Peer { } ``` -Note that the `Peer` struct contains a field `transactions`, which is an [LRU cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)) of the transactions this peer is aware of. +Note that the `Peer` struct contains a field `transactions`, which is an [LRU cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)) of the transactions this peer is aware of. The `request_tx` field on the `Peer` is used as the sender end of a channel to send requests to the session with the peer. diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 9d6b4e0f9..c3c53321f 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -68,8 +68,8 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ #### Protocol -- [`net/eth-wire`](../../crates/net/eth-wire): Implements the `eth` wire protocol and the RLPx networking stack. -- [`net/ecies`](../../crates/net/ecies): Implementation of the Elliptic Curve Integrated Encryption Scheme used in the RLPx handshake. +- [`net/eth-wire`](../../crates/net/eth-wire): Implements the `eth` wire protocol and the ``RLPx`` networking stack. +- [`net/ecies`](../../crates/net/ecies): Implementation of the Elliptic Curve Integrated Encryption Scheme used in the ``RLPx`` handshake. #### Downloaders diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 551aa80e4..e7c854459 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -29,7 +29,7 @@ pub fn rng() -> StdRng { } } -/// Generates a range of random [SealedHeader]s. +/// Generates a range of random [`SealedHeader`]s. /// /// The parent hash of the first header /// in the result will be equal to `head`. @@ -51,7 +51,7 @@ pub fn random_header_range( headers } -/// Generate a random [SealedHeader]. +/// Generate a random [`SealedHeader`]. /// /// The header is assumed to not be correct if validated. pub fn random_header(rng: &mut R, number: u64, parent: Option) -> SealedHeader { @@ -85,7 +85,7 @@ pub fn random_tx(rng: &mut R) -> Transaction { /// Generates a random legacy [Transaction] that is signed. /// -/// On top of the considerations of [random_tx], these apply as well: +/// On top of the considerations of [`random_tx`], these apply as well: /// /// - There is no guarantee that the nonce is not used twice for the same account pub fn random_signed_tx(rng: &mut R) -> TransactionSigned { @@ -109,7 +109,7 @@ pub fn generate_keys(rng: &mut R, count: usize) -> Vec { } /// Generate a random block filled with signed transactions (generated using -/// [random_signed_tx]). If no transaction count is provided, the number of transactions +/// [`random_signed_tx`]). If no transaction count is provided, the number of transactions /// will be random, otherwise the provided count will be used. /// /// All fields use the default values (and are assumed to be invalid) except for: @@ -168,7 +168,7 @@ pub fn random_block( /// The parent hash of the first block /// in the result will be equal to `head`. /// -/// See [random_block] for considerations when validating the generated blocks. +/// See [`random_block`] for considerations when validating the generated blocks. pub fn random_block_range( rng: &mut R, block_numbers: RangeInclusive, @@ -274,7 +274,7 @@ where /// Generate a random account change. /// -/// Returns two addresses, a balance_change, and a Vec of new storage entries. +/// Returns two addresses, a `balance_change`, and a Vec of new storage entries. pub fn random_account_change( rng: &mut R, valid_addresses: &[Address], diff --git a/testing/testing-utils/src/genesis_allocator.rs b/testing/testing-utils/src/genesis_allocator.rs index 067f68343..a146401f0 100644 --- a/testing/testing-utils/src/genesis_allocator.rs +++ b/testing/testing-utils/src/genesis_allocator.rs @@ -157,7 +157,7 @@ impl<'a> GenesisAllocator<'a> { self.alloc.insert(address, GenesisAccount::default().with_balance(balance)); } - /// Adds the given [GenesisAccount] to the genesis alloc. + /// Adds the given [`GenesisAccount`] to the genesis alloc. /// /// Returns the key pair for the account and the account's address. pub fn add_account(&mut self, account: GenesisAccount) -> Address {