mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Compare commits
34 Commits
239ee5f8e8
...
nb-2025082
| Author | SHA1 | Date | |
|---|---|---|---|
| 3ffd7bb351 | |||
| 52909eea3f | |||
| 0f9c2c5897 | |||
| ad4a8cd365 | |||
| 80506a7a43 | |||
| 2af312b628 | |||
| 1908e9f414 | |||
| 65cdc27b51 | |||
| 4f430487d6 | |||
| 19f35a6b54 | |||
| d61020e996 | |||
| 657df240f4 | |||
| 73a34a4bc1 | |||
| d8eef6305b | |||
| bae68ef8db | |||
| f576dddfa6 | |||
| 894ebcbfa5 | |||
| b91fa639f7 | |||
| cf4e76db20 | |||
| 5af7182919 | |||
| b6d5031865 | |||
| 7daf203bc2 | |||
| 20610ccc82 | |||
| 6543fac314 | |||
| 26c1973503 | |||
| 095ad0f65d | |||
| 67cc8b8360 | |||
| ff67ae87c8 | |||
| 8cebe6db10 | |||
| 78b9028ded | |||
| 7f0f7c94a6 | |||
| 2712cbb413 | |||
| 4be1aa83de | |||
| dd455d3a41 |
1850
Cargo.lock
generated
1850
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
122
Cargo.toml
122
Cargo.toml
@ -25,86 +25,86 @@ lto = "fat"
|
|||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
reth = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-cli = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-cli = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-db = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-evm = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-evm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-revm = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-revm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-network = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-network = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-provider = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc", features = ["test-utils"] }
|
reth-provider = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb", features = ["test-utils"] }
|
||||||
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
|
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
|
||||||
revm = { version = "26.0.1" }
|
revm = { version = "28.0.1", default-features = false }
|
||||||
|
|
||||||
# alloy dependencies
|
# alloy dependencies
|
||||||
alloy-genesis = "1.0.13"
|
alloy-genesis = { version = "1.0.23", default-features = false }
|
||||||
alloy-consensus = { version = "1.0.13", features = ["serde"] }
|
alloy-consensus = { version = "1.0.23", default-features = false }
|
||||||
alloy-chains = "0.2.0"
|
alloy-chains = { version = "0.2.5", default-features = false }
|
||||||
alloy-eips = "1.0.13"
|
alloy-eips = { version = "1.0.23", default-features = false }
|
||||||
alloy-evm = "0.12"
|
alloy-evm = { version = "0.18.2", default-features = false }
|
||||||
alloy-json-abi = { version = "1.0.0", default-features = false }
|
alloy-json-abi = { version = "1.3.1", default-features = false }
|
||||||
alloy-json-rpc = { version = "1.0.13", default-features = false }
|
alloy-json-rpc = { version = "1.0.23", default-features = false }
|
||||||
alloy-dyn-abi = "1.2.0"
|
alloy-dyn-abi = "1.3.1"
|
||||||
alloy-network = "1.0.13"
|
alloy-network = { version = "1.0.23", default-features = false }
|
||||||
alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] }
|
alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] }
|
||||||
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
|
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
|
||||||
alloy-rpc-types = { version = "1.0.13", features = ["engine"] }
|
alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false }
|
||||||
alloy-rpc-types-eth = "1.0.13"
|
alloy-rpc-types-eth = { version = "1.0.23", default-features = false }
|
||||||
alloy-rpc-types-engine = "1.0.13"
|
alloy-rpc-types-engine = { version = "1.0.23", default-features = false }
|
||||||
alloy-signer = "1.0.13"
|
alloy-signer = { version = "1.0.23", default-features = false }
|
||||||
alloy-sol-macro = "1.2.0"
|
alloy-sol-macro = "1.3.1"
|
||||||
alloy-sol-types = { version = "1.2.0", default-features = false }
|
alloy-sol-types = { version = "1.3.1", default-features = false }
|
||||||
|
|
||||||
jsonrpsee = "0.25.1"
|
jsonrpsee = "0.25.1"
|
||||||
jsonrpsee-core = { version = "0.25.1" }
|
jsonrpsee-core = "0.25.1"
|
||||||
jsonrpsee-types = "0.25.1"
|
jsonrpsee-types = "0.25.1"
|
||||||
|
|
||||||
# misc dependencies
|
# misc dependencies
|
||||||
auto_impl = "1"
|
auto_impl = "1"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1.68"
|
||||||
bytes = "1.5"
|
bytes = { version = "1.5", default-features = false }
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
cfg-if = { version = "1.0", default-features = false }
|
cfg-if = { version = "1.0", default-features = false }
|
||||||
derive_more = { version = "2", default-features = false, features = ["full"] }
|
derive_more = { version = "2", default-features = false, features = ["full"] }
|
||||||
eyre = "0.6"
|
eyre = "0.6"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
once_cell = { version = "1.19", default-features = false, features = ["alloc"] }
|
once_cell = { version = "1.19", default-features = false, features = ["critical-section"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
serde = { version = "1.0", features = ["derive"], default-features = false }
|
serde = { version = "1.0", features = ["derive"], default-features = false }
|
||||||
serde_json = "1.0"
|
serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
|
||||||
thiserror = { version = "2.0.0", default-features = false }
|
thiserror = { version = "2.0.0", default-features = false }
|
||||||
tokio = { version = "1.44.2", features = ["full"] }
|
tokio = { version = "1.44.2", features = ["full"] }
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1.11"
|
||||||
tracing = "0.1"
|
tracing = { version = "0.1.0", default-features = false }
|
||||||
rmp-serde = "1.3"
|
rmp-serde = "1.3"
|
||||||
lz4_flex = "0.11"
|
lz4_flex = "0.11"
|
||||||
ureq = "3.0.12"
|
ureq = "3.0.12"
|
||||||
|
|||||||
56
Dockerfile
Normal file
56
Dockerfile
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# syntax=docker.io/docker/dockerfile:1.7-labs
|
||||||
|
|
||||||
|
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
|
||||||
|
WORKDIR /app
|
||||||
|
LABEL org.opencontainers.image.source=https://github.com/hl-archive-node/nanoreth
|
||||||
|
LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0"
|
||||||
|
|
||||||
|
# Install system dependencies
|
||||||
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config
|
||||||
|
|
||||||
|
# Builds a cargo-chef plan
|
||||||
|
FROM chef AS planner
|
||||||
|
COPY --exclude=.git --exclude=dist . .
|
||||||
|
RUN cargo chef prepare --recipe-path recipe.json
|
||||||
|
|
||||||
|
FROM chef AS builder
|
||||||
|
COPY --from=planner /app/recipe.json recipe.json
|
||||||
|
|
||||||
|
# Build profile, release by default
|
||||||
|
ARG BUILD_PROFILE=release
|
||||||
|
ENV BUILD_PROFILE=$BUILD_PROFILE
|
||||||
|
|
||||||
|
# Extra Cargo flags
|
||||||
|
ARG RUSTFLAGS=""
|
||||||
|
ENV RUSTFLAGS="$RUSTFLAGS"
|
||||||
|
|
||||||
|
# Extra Cargo features
|
||||||
|
ARG FEATURES=""
|
||||||
|
ENV FEATURES=$FEATURES
|
||||||
|
|
||||||
|
# Builds dependencies
|
||||||
|
RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json
|
||||||
|
|
||||||
|
# Build application
|
||||||
|
COPY --exclude=dist . .
|
||||||
|
RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin reth-hl
|
||||||
|
|
||||||
|
# ARG is not resolved in COPY so we have to hack around it by copying the
|
||||||
|
# binary to a temporary location
|
||||||
|
RUN cp /app/target/$BUILD_PROFILE/reth-hl /app/reth-hl
|
||||||
|
|
||||||
|
# Use Ubuntu as the release image
|
||||||
|
FROM ubuntu AS runtime
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Install root certificates for aws sdk to work
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates && update-ca-certificates
|
||||||
|
|
||||||
|
# Copy reth over from the build stage
|
||||||
|
COPY --from=builder /app/reth-hl /usr/local/bin
|
||||||
|
|
||||||
|
# Copy licenses
|
||||||
|
COPY LICENSE-* ./
|
||||||
|
|
||||||
|
EXPOSE 9001 8545 8546
|
||||||
|
ENTRYPOINT ["/usr/local/bin/reth-hl"]
|
||||||
@ -1,8 +1,8 @@
|
|||||||
use alloy_eips::BlockId;
|
use alloy_eips::BlockId;
|
||||||
|
use alloy_json_rpc::RpcObject;
|
||||||
use alloy_primitives::{Bytes, U256};
|
use alloy_primitives::{Bytes, U256};
|
||||||
use alloy_rpc_types_eth::{
|
use alloy_rpc_types_eth::{
|
||||||
state::{EvmOverrides, StateOverride},
|
state::{EvmOverrides, StateOverride},
|
||||||
transaction::TransactionRequest,
|
|
||||||
BlockOverrides,
|
BlockOverrides,
|
||||||
};
|
};
|
||||||
use jsonrpsee::{
|
use jsonrpsee::{
|
||||||
@ -12,16 +12,17 @@ use jsonrpsee::{
|
|||||||
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
|
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
|
||||||
};
|
};
|
||||||
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
|
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
|
||||||
use reth_rpc_eth_api::helpers::EthCall;
|
use reth_rpc::eth::EthApiTypes;
|
||||||
|
use reth_rpc_eth_api::{helpers::EthCall, RpcTxReq};
|
||||||
|
|
||||||
#[rpc(server, namespace = "eth")]
|
#[rpc(server, namespace = "eth")]
|
||||||
pub(crate) trait CallForwarderApi {
|
pub(crate) trait CallForwarderApi<TxReq: RpcObject> {
|
||||||
/// Executes a new message call immediately without creating a transaction on the block chain.
|
/// Executes a new message call immediately without creating a transaction on the block chain.
|
||||||
#[method(name = "call")]
|
#[method(name = "call")]
|
||||||
async fn call(
|
async fn call(
|
||||||
&self,
|
&self,
|
||||||
request: TransactionRequest,
|
request: TxReq,
|
||||||
block_number: Option<BlockId>,
|
block_id: Option<BlockId>,
|
||||||
state_overrides: Option<StateOverride>,
|
state_overrides: Option<StateOverride>,
|
||||||
block_overrides: Option<Box<BlockOverrides>>,
|
block_overrides: Option<Box<BlockOverrides>>,
|
||||||
) -> RpcResult<Bytes>;
|
) -> RpcResult<Bytes>;
|
||||||
@ -31,8 +32,8 @@ pub(crate) trait CallForwarderApi {
|
|||||||
#[method(name = "estimateGas")]
|
#[method(name = "estimateGas")]
|
||||||
async fn estimate_gas(
|
async fn estimate_gas(
|
||||||
&self,
|
&self,
|
||||||
request: TransactionRequest,
|
request: TxReq,
|
||||||
block_number: Option<BlockId>,
|
block_id: Option<BlockId>,
|
||||||
state_override: Option<StateOverride>,
|
state_override: Option<StateOverride>,
|
||||||
) -> RpcResult<U256>;
|
) -> RpcResult<U256>;
|
||||||
}
|
}
|
||||||
@ -52,23 +53,24 @@ impl<EthApi> CallForwarderExt<EthApi> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<EthApi> CallForwarderApiServer for CallForwarderExt<EthApi>
|
impl<EthApi> CallForwarderApiServer<RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>>
|
||||||
|
for CallForwarderExt<EthApi>
|
||||||
where
|
where
|
||||||
EthApi: EthCall + Send + Sync + 'static,
|
EthApi: EthCall + Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
async fn call(
|
async fn call(
|
||||||
&self,
|
&self,
|
||||||
request: TransactionRequest,
|
request: RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>,
|
||||||
block_number: Option<BlockId>,
|
block_id: Option<BlockId>,
|
||||||
state_overrides: Option<StateOverride>,
|
state_overrides: Option<StateOverride>,
|
||||||
block_overrides: Option<Box<BlockOverrides>>,
|
block_overrides: Option<Box<BlockOverrides>>,
|
||||||
) -> RpcResult<Bytes> {
|
) -> RpcResult<Bytes> {
|
||||||
let is_latest = block_number.as_ref().map(|b| b.is_latest()).unwrap_or(true);
|
let is_latest = block_id.as_ref().map(|b| b.is_latest()).unwrap_or(true);
|
||||||
let result = if is_latest {
|
let result = if is_latest {
|
||||||
self.upstream_client
|
self.upstream_client
|
||||||
.request(
|
.request(
|
||||||
"eth_call",
|
"eth_call",
|
||||||
rpc_params![request, block_number, state_overrides, block_overrides],
|
rpc_params![request, block_id, state_overrides, block_overrides],
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| match e {
|
.map_err(|e| match e {
|
||||||
@ -83,7 +85,7 @@ where
|
|||||||
EthCall::call(
|
EthCall::call(
|
||||||
&self.eth_api,
|
&self.eth_api,
|
||||||
request,
|
request,
|
||||||
block_number,
|
block_id,
|
||||||
EvmOverrides::new(state_overrides, block_overrides),
|
EvmOverrides::new(state_overrides, block_overrides),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -97,14 +99,14 @@ where
|
|||||||
|
|
||||||
async fn estimate_gas(
|
async fn estimate_gas(
|
||||||
&self,
|
&self,
|
||||||
request: TransactionRequest,
|
request: RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>,
|
||||||
block_number: Option<BlockId>,
|
block_id: Option<BlockId>,
|
||||||
state_override: Option<StateOverride>,
|
state_override: Option<StateOverride>,
|
||||||
) -> RpcResult<U256> {
|
) -> RpcResult<U256> {
|
||||||
let is_latest = block_number.as_ref().map(|b| b.is_latest()).unwrap_or(true);
|
let is_latest = block_id.as_ref().map(|b| b.is_latest()).unwrap_or(true);
|
||||||
let result = if is_latest {
|
let result = if is_latest {
|
||||||
self.upstream_client
|
self.upstream_client
|
||||||
.request("eth_estimateGas", rpc_params![request, block_number, state_override])
|
.request("eth_estimateGas", rpc_params![request, block_id, state_override])
|
||||||
.await
|
.await
|
||||||
.map_err(|e| match e {
|
.map_err(|e| match e {
|
||||||
ClientError::Call(e) => e,
|
ClientError::Call(e) => e,
|
||||||
@ -118,7 +120,7 @@ where
|
|||||||
EthCall::estimate_gas_at(
|
EthCall::estimate_gas_at(
|
||||||
&self.eth_api,
|
&self.eth_api,
|
||||||
request,
|
request,
|
||||||
block_number.unwrap_or_default(),
|
block_id.unwrap_or_default(),
|
||||||
state_override,
|
state_override,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
477
src/addons/hl_node_compliance.rs
Normal file
477
src/addons/hl_node_compliance.rs
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
//! Overrides for RPC methods to post-filter system transactions and logs.
|
||||||
|
//!
|
||||||
|
//! System transactions are always at the beginning of the block,
|
||||||
|
//! so we can use the transaction index to determine if the log is from a system transaction,
|
||||||
|
//! and if it is, we can exclude it.
|
||||||
|
//!
|
||||||
|
//! For non-system transactions, we can just return the log as is, and the client will
|
||||||
|
//! adjust the transaction index accordingly.
|
||||||
|
|
||||||
|
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
|
||||||
|
use alloy_eips::{BlockId, BlockNumberOrTag};
|
||||||
|
use alloy_json_rpc::RpcObject;
|
||||||
|
use alloy_primitives::{B256, U256};
|
||||||
|
use alloy_rpc_types::{
|
||||||
|
pubsub::{Params, SubscriptionKind},
|
||||||
|
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
|
||||||
|
};
|
||||||
|
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
||||||
|
use jsonrpsee_core::{async_trait, RpcResult};
|
||||||
|
use jsonrpsee_types::ErrorObject;
|
||||||
|
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
||||||
|
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
|
||||||
|
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
||||||
|
use reth_rpc::{eth::pubsub::SubscriptionSerializeError, EthFilter, EthPubSub, RpcTypes};
|
||||||
|
use reth_rpc_eth_api::{
|
||||||
|
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
||||||
|
transaction::ConvertReceiptInput,
|
||||||
|
EthApiServer, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock,
|
||||||
|
RpcConvert, RpcHeader, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
|
||||||
|
};
|
||||||
|
use serde::Serialize;
|
||||||
|
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
|
||||||
|
use tokio_stream::{Stream, StreamExt};
|
||||||
|
use tracing::{trace, Instrument};
|
||||||
|
|
||||||
|
use crate::{node::primitives::HlPrimitives, HlBlock};
|
||||||
|
|
||||||
|
pub trait EthWrapper:
|
||||||
|
EthApiServer<
|
||||||
|
RpcTxReq<Self::NetworkTypes>,
|
||||||
|
RpcTransaction<Self::NetworkTypes>,
|
||||||
|
RpcBlock<Self::NetworkTypes>,
|
||||||
|
RpcReceipt<Self::NetworkTypes>,
|
||||||
|
RpcHeader<Self::NetworkTypes>,
|
||||||
|
> + FullEthApiTypes<
|
||||||
|
Primitives = HlPrimitives,
|
||||||
|
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||||
|
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||||
|
+ EthBlocks
|
||||||
|
+ EthTransactions
|
||||||
|
+ LoadReceipt
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> EthWrapper for T where
|
||||||
|
T: EthApiServer<
|
||||||
|
RpcTxReq<Self::NetworkTypes>,
|
||||||
|
RpcTransaction<Self::NetworkTypes>,
|
||||||
|
RpcBlock<Self::NetworkTypes>,
|
||||||
|
RpcReceipt<Self::NetworkTypes>,
|
||||||
|
RpcHeader<Self::NetworkTypes>,
|
||||||
|
> + FullEthApiTypes<
|
||||||
|
Primitives = HlPrimitives,
|
||||||
|
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||||
|
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||||
|
+ EthBlocks
|
||||||
|
+ EthTransactions
|
||||||
|
+ LoadReceipt
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
|
||||||
|
filter: Arc<EthFilter<Eth>>,
|
||||||
|
provider: Arc<Eth::Provider>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
|
||||||
|
pub fn new(filter: Arc<EthFilter<Eth>>, provider: Arc<Eth::Provider>) -> Self {
|
||||||
|
Self { filter, provider }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||||
|
for HlNodeFilterHttp<Eth>
|
||||||
|
{
|
||||||
|
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_newFilter");
|
||||||
|
self.filter.new_filter(filter).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn new_block_filter(&self) -> RpcResult<FilterId> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
|
||||||
|
self.filter.new_block_filter().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn new_pending_transaction_filter(
|
||||||
|
&self,
|
||||||
|
kind: Option<PendingTransactionFilterKind>,
|
||||||
|
) -> RpcResult<FilterId> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_newPendingTransactionFilter");
|
||||||
|
self.filter.new_pending_transaction_filter(kind).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn filter_changes(
|
||||||
|
&self,
|
||||||
|
id: FilterId,
|
||||||
|
) -> RpcResult<FilterChanges<RpcTransaction<Eth::NetworkTypes>>> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_getFilterChanges");
|
||||||
|
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
|
||||||
|
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
|
||||||
|
self.filter.uninstall_filter(id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
|
||||||
|
trace!(target: "rpc::eth", "Serving eth_getLogs");
|
||||||
|
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
|
||||||
|
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &self.provider)).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HlNodeFilterWs<Eth: EthWrapper> {
|
||||||
|
pubsub: Arc<EthPubSub<Eth>>,
|
||||||
|
provider: Arc<Eth::Provider>,
|
||||||
|
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
|
||||||
|
pub fn new(
|
||||||
|
pubsub: Arc<EthPubSub<Eth>>,
|
||||||
|
provider: Arc<Eth::Provider>,
|
||||||
|
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
||||||
|
) -> Self {
|
||||||
|
Self { pubsub, provider, subscription_task_spawner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||||
|
for HlNodeFilterWs<Eth>
|
||||||
|
{
|
||||||
|
async fn subscribe(
|
||||||
|
&self,
|
||||||
|
pending: PendingSubscriptionSink,
|
||||||
|
kind: SubscriptionKind,
|
||||||
|
params: Option<Params>,
|
||||||
|
) -> jsonrpsee::core::SubscriptionResult {
|
||||||
|
let sink = pending.accept().await?;
|
||||||
|
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
|
||||||
|
self.subscription_task_spawner.spawn(Box::pin(async move {
|
||||||
|
if kind == SubscriptionKind::Logs {
|
||||||
|
let filter = match params {
|
||||||
|
Some(Params::Logs(f)) => *f,
|
||||||
|
Some(Params::Bool(_)) => return,
|
||||||
|
_ => Default::default(),
|
||||||
|
};
|
||||||
|
let _ = pipe_from_stream(
|
||||||
|
sink,
|
||||||
|
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
|
||||||
|
let (tx_idx, log_idx) = (log.transaction_index?, log.log_index?);
|
||||||
|
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
|
||||||
|
let (mut sys_tx_count, mut sys_log_count) = (0u64, 0u64);
|
||||||
|
for receipt in receipts {
|
||||||
|
if receipt.cumulative_gas_used() == 0 {
|
||||||
|
sys_tx_count += 1;
|
||||||
|
sys_log_count += receipt.logs().len() as u64;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sys_tx_count > tx_idx {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
log.transaction_index = Some(tx_idx - sys_tx_count);
|
||||||
|
log.log_index = Some(log_idx - sys_log_count);
|
||||||
|
Some(log)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
||||||
|
sink: SubscriptionSink,
|
||||||
|
mut stream: St,
|
||||||
|
) -> Result<(), ErrorObject<'static>> {
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = sink.closed() => break Ok(()),
|
||||||
|
maybe_item = stream.next() => {
|
||||||
|
let Some(item) = maybe_item else { break Ok(()) };
|
||||||
|
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
||||||
|
.map_err(SubscriptionSerializeError::from)?;
|
||||||
|
if sink.send(msg).await.is_err() { break Ok(()); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
|
||||||
|
eth_api: Arc<Eth>,
|
||||||
|
_marker: PhantomData<Eth>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Eth: EthWrapper> HlNodeBlockFilterHttp<Eth> {
|
||||||
|
pub fn new(eth_api: Arc<Eth>) -> Self {
|
||||||
|
Self { eth_api, _marker: PhantomData }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rpc(server, namespace = "eth")]
|
||||||
|
pub trait EthBlockApi<B: RpcObject, R: RpcObject> {
|
||||||
|
/// Returns information about a block by hash.
|
||||||
|
#[method(name = "getBlockByHash")]
|
||||||
|
async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult<Option<B>>;
|
||||||
|
|
||||||
|
/// Returns information about a block by number.
|
||||||
|
#[method(name = "getBlockByNumber")]
|
||||||
|
async fn block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult<Option<B>>;
|
||||||
|
|
||||||
|
/// Returns all transaction receipts for a given block.
|
||||||
|
#[method(name = "getBlockReceipts")]
|
||||||
|
async fn block_receipts(&self, block_id: BlockId) -> RpcResult<Option<Vec<R>>>;
|
||||||
|
|
||||||
|
#[method(name = "getBlockTransactionCountByHash")]
|
||||||
|
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>>;
|
||||||
|
|
||||||
|
#[method(name = "getBlockTransactionCountByNumber")]
|
||||||
|
async fn block_transaction_count_by_number(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> RpcResult<Option<U256>>;
|
||||||
|
|
||||||
|
#[method(name = "getTransactionReceipt")]
|
||||||
|
async fn transaction_receipt(&self, hash: B256) -> RpcResult<Option<R>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! engine_span {
|
||||||
|
() => {
|
||||||
|
tracing::trace_span!(target: "rpc", "engine")
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn adjust_block<Eth: EthWrapper>(
|
||||||
|
recovered_block: &RpcBlock<Eth::NetworkTypes>,
|
||||||
|
eth_api: &Eth,
|
||||||
|
) -> RpcBlock<Eth::NetworkTypes> {
|
||||||
|
let system_tx_count = system_tx_count_for_block(eth_api, recovered_block.number().into());
|
||||||
|
let mut new_block = recovered_block.clone();
|
||||||
|
|
||||||
|
new_block.transactions = match new_block.transactions {
|
||||||
|
BlockTransactions::Full(mut transactions) => {
|
||||||
|
transactions.drain(..system_tx_count);
|
||||||
|
transactions.iter_mut().for_each(|tx| {
|
||||||
|
if let Some(idx) = &mut tx.transaction_index {
|
||||||
|
*idx -= system_tx_count as u64;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
BlockTransactions::Full(transactions)
|
||||||
|
}
|
||||||
|
BlockTransactions::Hashes(mut hashes) => {
|
||||||
|
hashes.drain(..system_tx_count);
|
||||||
|
BlockTransactions::Hashes(hashes)
|
||||||
|
}
|
||||||
|
BlockTransactions::Uncle => BlockTransactions::Uncle,
|
||||||
|
};
|
||||||
|
new_block
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn adjust_block_receipts<Eth: EthWrapper>(
|
||||||
|
block_id: BlockId,
|
||||||
|
eth_api: &Eth,
|
||||||
|
) -> Result<Option<(usize, Vec<RpcReceipt<Eth::NetworkTypes>>)>, Eth::Error> {
|
||||||
|
// Modified from EthBlocks::block_receipt. See `NOTE` comment below.
|
||||||
|
let system_tx_count = system_tx_count_for_block(eth_api, block_id);
|
||||||
|
if let Some((block, receipts)) = EthBlocks::load_block_and_receipts(eth_api, block_id).await? {
|
||||||
|
let block_number = block.number;
|
||||||
|
let base_fee = block.base_fee_per_gas;
|
||||||
|
let block_hash = block.hash();
|
||||||
|
let excess_blob_gas = block.excess_blob_gas;
|
||||||
|
let timestamp = block.timestamp;
|
||||||
|
let mut gas_used = 0;
|
||||||
|
let mut next_log_index = 0;
|
||||||
|
|
||||||
|
let inputs = block
|
||||||
|
.transactions_recovered()
|
||||||
|
.zip(receipts.iter())
|
||||||
|
.enumerate()
|
||||||
|
.filter_map(|(idx, (tx, receipt))| {
|
||||||
|
if receipt.cumulative_gas_used() == 0 {
|
||||||
|
// NOTE: modified to exclude system tx
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let meta = TransactionMeta {
|
||||||
|
tx_hash: *tx.tx_hash(),
|
||||||
|
index: (idx - system_tx_count) as u64,
|
||||||
|
block_hash,
|
||||||
|
block_number,
|
||||||
|
base_fee,
|
||||||
|
excess_blob_gas,
|
||||||
|
timestamp,
|
||||||
|
};
|
||||||
|
|
||||||
|
let input = ConvertReceiptInput {
|
||||||
|
receipt: Cow::Borrowed(receipt),
|
||||||
|
tx,
|
||||||
|
gas_used: receipt.cumulative_gas_used() - gas_used,
|
||||||
|
next_log_index,
|
||||||
|
meta,
|
||||||
|
};
|
||||||
|
|
||||||
|
gas_used = receipt.cumulative_gas_used();
|
||||||
|
next_log_index += receipt.logs().len();
|
||||||
|
|
||||||
|
Some(input)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
return eth_api
|
||||||
|
.tx_resp_builder()
|
||||||
|
.convert_receipts(inputs)
|
||||||
|
.map(|receipts| Some((system_tx_count, receipts)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn adjust_transaction_receipt<Eth: EthWrapper>(
|
||||||
|
tx_hash: B256,
|
||||||
|
eth_api: &Eth,
|
||||||
|
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>, Eth::Error> {
|
||||||
|
match eth_api.load_transaction_and_receipt(tx_hash).await? {
|
||||||
|
Some((_, meta, _)) => {
|
||||||
|
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
|
||||||
|
// doesn't hurt performance much
|
||||||
|
let Some((system_tx_count, block_receipts)) =
|
||||||
|
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
|
||||||
|
else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
Ok(Some(block_receipts.into_iter().nth(meta.index as usize - system_tx_count).unwrap()))
|
||||||
|
}
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function assumes that `block_id` is already validated by the caller.
|
||||||
|
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
|
||||||
|
let provider = eth_api.provider();
|
||||||
|
let block = provider.block_by_id(block_id).unwrap().unwrap();
|
||||||
|
let system_tx_count =
|
||||||
|
block.body.transactions().iter().filter(|tx| tx.is_system_transaction()).count();
|
||||||
|
system_tx_count
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<Eth: EthWrapper> EthBlockApiServer<RpcBlock<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
|
||||||
|
for HlNodeBlockFilterHttp<Eth>
|
||||||
|
where
|
||||||
|
Eth: EthApiTypes + 'static,
|
||||||
|
ErrorObject<'static>: From<Eth::Error>,
|
||||||
|
{
|
||||||
|
/// Handler for: `eth_getBlockByHash`
|
||||||
|
async fn block_by_hash(
|
||||||
|
&self,
|
||||||
|
hash: B256,
|
||||||
|
full: bool,
|
||||||
|
) -> RpcResult<Option<RpcBlock<Eth::NetworkTypes>>> {
|
||||||
|
let res = self.eth_api.block_by_hash(hash, full).instrument(engine_span!()).await?;
|
||||||
|
Ok(res.map(|block| adjust_block(&block, &*self.eth_api)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handler for: `eth_getBlockByNumber`
|
||||||
|
async fn block_by_number(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
full: bool,
|
||||||
|
) -> RpcResult<Option<RpcBlock<Eth::NetworkTypes>>> {
|
||||||
|
trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber");
|
||||||
|
let res = self.eth_api.block_by_number(number, full).instrument(engine_span!()).await?;
|
||||||
|
Ok(res.map(|block| adjust_block(&block, &*self.eth_api)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handler for: `eth_getBlockTransactionCountByHash`
|
||||||
|
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>> {
|
||||||
|
trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash");
|
||||||
|
let res =
|
||||||
|
self.eth_api.block_transaction_count_by_hash(hash).instrument(engine_span!()).await?;
|
||||||
|
Ok(res.map(|count| {
|
||||||
|
let sys_tx_count =
|
||||||
|
system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into()));
|
||||||
|
count - U256::from(sys_tx_count)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handler for: `eth_getBlockTransactionCountByNumber`
|
||||||
|
async fn block_transaction_count_by_number(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> RpcResult<Option<U256>> {
|
||||||
|
trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber");
|
||||||
|
let res = self
|
||||||
|
.eth_api
|
||||||
|
.block_transaction_count_by_number(number)
|
||||||
|
.instrument(engine_span!())
|
||||||
|
.await?;
|
||||||
|
Ok(res.map(|count| {
|
||||||
|
count - U256::from(system_tx_count_for_block(&*self.eth_api, number.into()))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn transaction_receipt(
|
||||||
|
&self,
|
||||||
|
hash: B256,
|
||||||
|
) -> RpcResult<Option<RpcReceipt<Eth::NetworkTypes>>> {
|
||||||
|
trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt");
|
||||||
|
let eth_api = &*self.eth_api;
|
||||||
|
Ok(adjust_transaction_receipt(hash, eth_api).instrument(engine_span!()).await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handler for: `eth_getBlockReceipts`
|
||||||
|
async fn block_receipts(
|
||||||
|
&self,
|
||||||
|
block_id: BlockId,
|
||||||
|
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
|
||||||
|
trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts");
|
||||||
|
let result =
|
||||||
|
adjust_block_receipts(block_id, &*self.eth_api).instrument(engine_span!()).await?;
|
||||||
|
Ok(result.map(|(_, receipts)| receipts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn install_hl_node_compliance<Node, EthApi>(
|
||||||
|
ctx: RpcContext<Node, EthApi>,
|
||||||
|
) -> Result<(), eyre::Error>
|
||||||
|
where
|
||||||
|
Node: FullNodeComponents,
|
||||||
|
Node::Provider: BlockIdReader + BlockReader<Block = crate::HlBlock>,
|
||||||
|
EthApi: EthWrapper,
|
||||||
|
ErrorObject<'static>: From<EthApi::Error>,
|
||||||
|
{
|
||||||
|
ctx.modules.replace_configured(
|
||||||
|
HlNodeFilterHttp::new(
|
||||||
|
Arc::new(ctx.registry.eth_handlers().filter.clone()),
|
||||||
|
Arc::new(ctx.registry.eth_api().provider().clone()),
|
||||||
|
)
|
||||||
|
.into_rpc(),
|
||||||
|
)?;
|
||||||
|
ctx.modules.replace_configured(
|
||||||
|
HlNodeFilterWs::new(
|
||||||
|
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
|
||||||
|
Arc::new(ctx.registry.eth_api().provider().clone()),
|
||||||
|
Box::new(ctx.node().task_executor().clone()),
|
||||||
|
)
|
||||||
|
.into_rpc(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
ctx.modules.replace_configured(
|
||||||
|
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
3
src/addons/mod.rs
Normal file
3
src/addons/mod.rs
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
pub mod call_forwarder;
|
||||||
|
pub mod hl_node_compliance;
|
||||||
|
pub mod tx_forwarder;
|
||||||
@ -37,7 +37,7 @@ impl EthForwarderExt {
|
|||||||
Self { client }
|
Self { client }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject {
|
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject<'static> {
|
||||||
match e {
|
match e {
|
||||||
ClientError::Call(e) => e,
|
ClientError::Call(e) => e,
|
||||||
_ => ErrorObject::owned(
|
_ => ErrorObject::owned(
|
||||||
@ -7,7 +7,6 @@ use std::sync::LazyLock;
|
|||||||
static GENESIS_HASH: B256 =
|
static GENESIS_HASH: B256 =
|
||||||
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
|
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
|
||||||
|
|
||||||
/// Dev hardforks
|
|
||||||
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
||||||
ChainHardforks::new(vec![
|
ChainHardforks::new(vec![
|
||||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
||||||
@ -34,12 +33,10 @@ pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
|||||||
])
|
])
|
||||||
});
|
});
|
||||||
|
|
||||||
/// The Hyperliqiud Mainnet spec
|
pub fn hl_chainspec(chain: Chain, genesis: &'static str) -> ChainSpec {
|
||||||
pub fn hl_mainnet() -> ChainSpec {
|
|
||||||
ChainSpec {
|
ChainSpec {
|
||||||
chain: Chain::from_named(NamedChain::Hyperliquid),
|
chain,
|
||||||
genesis: serde_json::from_str(include_str!("genesis.json"))
|
genesis: serde_json::from_str(genesis).expect("Can't deserialize Hyperliquid genesis json"),
|
||||||
.expect("Can't deserialize Hyperliquid Mainnet genesis json"),
|
|
||||||
genesis_header: empty_genesis_header(),
|
genesis_header: empty_genesis_header(),
|
||||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||||
hardforks: HL_HARDFORKS.clone(),
|
hardforks: HL_HARDFORKS.clone(),
|
||||||
@ -48,6 +45,18 @@ pub fn hl_mainnet() -> ChainSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The Hyperliqiud Mainnet spec
|
||||||
|
pub fn hl_mainnet() -> ChainSpec {
|
||||||
|
hl_chainspec(Chain::from_named(NamedChain::Hyperliquid), include_str!("genesis.json"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The Hyperliqiud Testnet spec
|
||||||
|
pub fn hl_testnet() -> ChainSpec {
|
||||||
|
// Note: Testnet sync starts from snapshotted state [1] instead of genesis block.
|
||||||
|
// So the `alloc` field is not used, which makes it fine to reuse mainnet genesis file.
|
||||||
|
hl_chainspec(Chain::from_id_unchecked(998), include_str!("genesis.json"))
|
||||||
|
}
|
||||||
|
|
||||||
/// Empty genesis header for Hyperliquid Mainnet.
|
/// Empty genesis header for Hyperliquid Mainnet.
|
||||||
///
|
///
|
||||||
/// The exact value is not known per se, but the parent hash of block 1 is known to be
|
/// The exact value is not known per se, but the parent hash of block 1 is known to be
|
||||||
|
|||||||
@ -1,8 +1,7 @@
|
|||||||
//! Chain specification for HyperEVM.
|
|
||||||
pub mod hl;
|
pub mod hl;
|
||||||
pub mod parser;
|
pub mod parser;
|
||||||
|
|
||||||
use crate::hardforks::{hl::HlHardfork, HlHardforks};
|
use crate::hardforks::HlHardforks;
|
||||||
use alloy_consensus::Header;
|
use alloy_consensus::Header;
|
||||||
use alloy_eips::eip7840::BlobParams;
|
use alloy_eips::eip7840::BlobParams;
|
||||||
use alloy_genesis::Genesis;
|
use alloy_genesis::Genesis;
|
||||||
@ -13,12 +12,13 @@ use reth_chainspec::{
|
|||||||
};
|
};
|
||||||
use reth_discv4::NodeRecord;
|
use reth_discv4::NodeRecord;
|
||||||
use reth_evm::eth::spec::EthExecutorSpec;
|
use reth_evm::eth::spec::EthExecutorSpec;
|
||||||
use std::{fmt::Display, sync::Arc};
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
pub const MAINNET_CHAIN_ID: u64 = 999;
|
||||||
|
pub const TESTNET_CHAIN_ID: u64 = 998;
|
||||||
|
|
||||||
/// Hl chain spec type.
|
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||||
pub struct HlChainSpec {
|
pub struct HlChainSpec {
|
||||||
/// [`ChainSpec`].
|
|
||||||
pub inner: ChainSpec,
|
pub inner: ChainSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,10 +72,6 @@ impl EthChainSpec for HlChainSpec {
|
|||||||
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
|
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
|
||||||
self.inner.bootnodes()
|
self.inner.bootnodes()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_optimism(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Hardforks for HlChainSpec {
|
impl Hardforks for HlChainSpec {
|
||||||
@ -102,23 +98,13 @@ impl Hardforks for HlChainSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ChainSpec> for HlChainSpec {
|
|
||||||
fn from(value: ChainSpec) -> Self {
|
|
||||||
Self { inner: value }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthereumHardforks for HlChainSpec {
|
impl EthereumHardforks for HlChainSpec {
|
||||||
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
|
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
|
||||||
self.inner.ethereum_fork_activation(fork)
|
self.inner.ethereum_fork_activation(fork)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlHardforks for HlChainSpec {
|
impl HlHardforks for HlChainSpec {}
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
|
||||||
self.fork(fork)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthExecutorSpec for HlChainSpec {
|
impl EthExecutorSpec for HlChainSpec {
|
||||||
fn deposit_contract_address(&self) -> Option<Address> {
|
fn deposit_contract_address(&self) -> Option<Address> {
|
||||||
@ -126,26 +112,22 @@ impl EthExecutorSpec for HlChainSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<HlChainSpec> for ChainSpec {
|
|
||||||
fn from(value: HlChainSpec) -> Self {
|
|
||||||
value.inner
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HlHardforks for Arc<HlChainSpec> {
|
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
|
||||||
self.as_ref().hl_fork_activation(fork)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HlChainSpec {
|
impl HlChainSpec {
|
||||||
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
|
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
|
||||||
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
|
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
|
||||||
|
|
||||||
pub fn official_rpc_url(&self) -> &'static str {
|
pub fn official_rpc_url(&self) -> &'static str {
|
||||||
match self.inner.chain().id() {
|
match self.inner.chain().id() {
|
||||||
999 => Self::MAINNET_RPC_URL,
|
MAINNET_CHAIN_ID => Self::MAINNET_RPC_URL,
|
||||||
998 => Self::TESTNET_RPC_URL,
|
TESTNET_CHAIN_ID => Self::TESTNET_RPC_URL,
|
||||||
|
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn official_s3_bucket(self) -> &'static str {
|
||||||
|
match self.inner.chain().id() {
|
||||||
|
MAINNET_CHAIN_ID => "hl-mainnet-evm-blocks",
|
||||||
|
TESTNET_CHAIN_ID => "hl-testnet-evm-blocks",
|
||||||
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
|
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
use crate::chainspec::HlChainSpec;
|
use crate::chainspec::{hl::hl_testnet, HlChainSpec};
|
||||||
|
|
||||||
use super::hl::hl_mainnet;
|
use super::hl::hl_mainnet;
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Chains supported by HyperEVM. First value should be used as the default.
|
/// Chains supported by HyperEVM. First value should be used as the default.
|
||||||
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet"];
|
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "testnet"];
|
||||||
|
|
||||||
/// Hyperliquid chain specification parser.
|
/// Hyperliquid chain specification parser.
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
@ -27,6 +27,7 @@ impl ChainSpecParser for HlChainSpecParser {
|
|||||||
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> {
|
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> {
|
||||||
match s {
|
match s {
|
||||||
"mainnet" => Ok(Arc::new(HlChainSpec { inner: hl_mainnet() })),
|
"mainnet" => Ok(Arc::new(HlChainSpec { inner: hl_mainnet() })),
|
||||||
|
"testnet" => Ok(Arc::new(HlChainSpec { inner: hl_testnet() })),
|
||||||
_ => Err(eyre::eyre!("Unsupported chain: {}", s)),
|
_ => Err(eyre::eyre!("Unsupported chain: {}", s)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
use revm::{
|
use revm::{
|
||||||
|
bytecode::opcode::BLOCKHASH,
|
||||||
context::{ContextSetters, Evm, FrameStack},
|
context::{ContextSetters, Evm, FrameStack},
|
||||||
context_interface::ContextTr,
|
context_interface::ContextTr,
|
||||||
handler::{
|
handler::{
|
||||||
@ -7,13 +8,16 @@ use revm::{
|
|||||||
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
|
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
|
||||||
},
|
},
|
||||||
inspector::{InspectorEvmTr, JournalExt},
|
inspector::{InspectorEvmTr, JournalExt},
|
||||||
interpreter::{interpreter::EthInterpreter, InterpreterResult},
|
interpreter::{interpreter::EthInterpreter, Instruction, InterpreterResult},
|
||||||
Inspector,
|
Inspector,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::chainspec::MAINNET_CHAIN_ID;
|
||||||
|
|
||||||
pub mod builder;
|
pub mod builder;
|
||||||
pub mod ctx;
|
pub mod ctx;
|
||||||
mod exec;
|
mod exec;
|
||||||
|
mod patch;
|
||||||
|
|
||||||
pub struct HlEvmInner<
|
pub struct HlEvmInner<
|
||||||
CTX: ContextTr,
|
CTX: ContextTr,
|
||||||
@ -26,10 +30,22 @@ impl<CTX: ContextTr, INSP>
|
|||||||
HlEvmInner<CTX, INSP, EthInstructions<EthInterpreter, CTX>, EthPrecompiles>
|
HlEvmInner<CTX, INSP, EthInstructions<EthInterpreter, CTX>, EthPrecompiles>
|
||||||
{
|
{
|
||||||
pub fn new(ctx: CTX, inspector: INSP) -> Self {
|
pub fn new(ctx: CTX, inspector: INSP) -> Self {
|
||||||
|
let mut instruction = EthInstructions::new_mainnet();
|
||||||
|
|
||||||
|
const NON_PLACEHOLDER_BLOCK_HASH_HEIGHT: u64 = 243_538;
|
||||||
|
if ctx.chain_id() == MAINNET_CHAIN_ID &&
|
||||||
|
ctx.block_number() < NON_PLACEHOLDER_BLOCK_HASH_HEIGHT
|
||||||
|
{
|
||||||
|
instruction.insert_instruction(
|
||||||
|
BLOCKHASH,
|
||||||
|
Instruction::new(patch::blockhash_returning_placeholder, 20),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Self(Evm {
|
Self(Evm {
|
||||||
ctx,
|
ctx,
|
||||||
inspector,
|
inspector,
|
||||||
instruction: EthInstructions::new_mainnet(),
|
instruction,
|
||||||
precompiles: EthPrecompiles::default(),
|
precompiles: EthPrecompiles::default(),
|
||||||
frame_stack: FrameStack::new(),
|
frame_stack: FrameStack::new(),
|
||||||
})
|
})
|
||||||
@ -125,23 +141,3 @@ where
|
|||||||
self.0.frame_return_result(result)
|
self.0.frame_return_result(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// #[cfg(test)]
|
|
||||||
// mod test {
|
|
||||||
// use super::{builder::HlBuilder, ctx::DefaultHl};
|
|
||||||
// use revm::{
|
|
||||||
// inspector::{InspectEvm, NoOpInspector},
|
|
||||||
// Context, ExecuteEvm,
|
|
||||||
// };
|
|
||||||
|
|
||||||
// #[test]
|
|
||||||
// fn default_run_bsc() {
|
|
||||||
// let ctx = Context::bsc();
|
|
||||||
// let mut evm = ctx.build_bsc_with_inspector(NoOpInspector {});
|
|
||||||
|
|
||||||
// // execute
|
|
||||||
// let _ = evm.replay();
|
|
||||||
// // inspect
|
|
||||||
// let _ = evm.inspect_replay();
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|||||||
73
src/evm/api/patch.rs
Normal file
73
src/evm/api/patch.rs
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
//! Modified version of `blockhash` instruction before block `243538`.
|
||||||
|
//!
|
||||||
|
//! This is a mainnet-specific fix for the `blockhash` instruction,
|
||||||
|
//! copied and modified from revm-interpreter-25.0.1/src/instructions/host.rs.
|
||||||
|
|
||||||
|
use alloy_primitives::keccak256;
|
||||||
|
use revm::{
|
||||||
|
context::Host,
|
||||||
|
interpreter::{
|
||||||
|
as_u64_saturated, interpreter_types::StackTr, popn_top, InstructionContext,
|
||||||
|
InterpreterTypes,
|
||||||
|
},
|
||||||
|
primitives::{BLOCK_HASH_HISTORY, U256},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[macro_export]
|
||||||
|
#[collapse_debuginfo(yes)]
|
||||||
|
macro_rules! _count {
|
||||||
|
(@count) => { 0 };
|
||||||
|
(@count $head:tt $($tail:tt)*) => { 1 + _count!(@count $($tail)*) };
|
||||||
|
($($arg:tt)*) => { _count!(@count $($arg)*) };
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pops n values from the stack and returns the top value. Fails the instruction if n values can't
|
||||||
|
/// be popped.
|
||||||
|
#[macro_export]
|
||||||
|
#[collapse_debuginfo(yes)]
|
||||||
|
macro_rules! popn_top {
|
||||||
|
([ $($x:ident),* ], $top:ident, $interpreter:expr $(,$ret:expr)? ) => {
|
||||||
|
// Workaround for https://github.com/rust-lang/rust/issues/144329.
|
||||||
|
if $interpreter.stack.len() < (1 + $crate::_count!($($x)*)) {
|
||||||
|
$interpreter.halt_underflow();
|
||||||
|
return $($ret)?;
|
||||||
|
}
|
||||||
|
let ([$( $x ),*], $top) = unsafe { $interpreter.stack.popn_top().unwrap_unchecked() };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Implements the BLOCKHASH instruction.
|
||||||
|
///
|
||||||
|
/// Gets the hash of one of the 256 most recent complete blocks.
|
||||||
|
pub fn blockhash_returning_placeholder<WIRE: InterpreterTypes, H: Host + ?Sized>(
|
||||||
|
context: InstructionContext<'_, H, WIRE>,
|
||||||
|
) {
|
||||||
|
//gas!(context.interpreter, gas::BLOCKHASH);
|
||||||
|
popn_top!([], number, context.interpreter);
|
||||||
|
|
||||||
|
let requested_number = *number;
|
||||||
|
let block_number = context.host.block_number();
|
||||||
|
|
||||||
|
let Some(diff) = block_number.checked_sub(requested_number) else {
|
||||||
|
*number = U256::ZERO;
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let diff = as_u64_saturated!(diff);
|
||||||
|
|
||||||
|
// blockhash should push zero if number is same as current block number.
|
||||||
|
if diff == 0 {
|
||||||
|
*number = U256::ZERO;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
*number = if diff <= BLOCK_HASH_HISTORY {
|
||||||
|
// NOTE: This is HL-specific modifcation that returns the placeholder hash before specific
|
||||||
|
// block.
|
||||||
|
let hash = keccak256(as_u64_saturated!(requested_number).to_string().as_bytes());
|
||||||
|
U256::from_be_bytes(hash.0)
|
||||||
|
} else {
|
||||||
|
U256::ZERO
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1 +0,0 @@
|
|||||||
|
|
||||||
@ -1,4 +1,3 @@
|
|||||||
pub mod api;
|
pub mod api;
|
||||||
mod handler;
|
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
pub mod transaction;
|
pub mod transaction;
|
||||||
|
|||||||
@ -1,22 +1,15 @@
|
|||||||
use revm::primitives::hardfork::SpecId;
|
use revm::primitives::hardfork::SpecId;
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
|
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
#[allow(non_camel_case_types)]
|
|
||||||
#[allow(clippy::upper_case_acronyms)]
|
|
||||||
pub enum HlSpecId {
|
pub enum HlSpecId {
|
||||||
|
/// Placeholder for evm cancun fork
|
||||||
#[default]
|
#[default]
|
||||||
V1, // V1
|
V1,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlSpecId {
|
impl HlSpecId {
|
||||||
pub const fn is_enabled_in(self, other: HlSpecId) -> bool {
|
|
||||||
other as u8 <= self as u8
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts the [`HlSpecId`] into a [`SpecId`].
|
|
||||||
pub const fn into_eth_spec(self) -> SpecId {
|
pub const fn into_eth_spec(self) -> SpecId {
|
||||||
match self {
|
match self {
|
||||||
Self::V1 => SpecId::CANCUN,
|
Self::V1 => SpecId::CANCUN,
|
||||||
@ -25,31 +18,8 @@ impl HlSpecId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl From<HlSpecId> for SpecId {
|
impl From<HlSpecId> for SpecId {
|
||||||
|
/// Converts the [`HlSpecId`] into a [`SpecId`].
|
||||||
fn from(spec: HlSpecId) -> Self {
|
fn from(spec: HlSpecId) -> Self {
|
||||||
spec.into_eth_spec()
|
spec.into_eth_spec()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// String identifiers for HL hardforks
|
|
||||||
pub mod name {
|
|
||||||
pub const V1: &str = "V1";
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for HlSpecId {
|
|
||||||
type Err = String;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Ok(match s {
|
|
||||||
name::V1 => Self::V1,
|
|
||||||
_ => return Err(format!("Unknown HL spec: {s}")),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<HlSpecId> for &'static str {
|
|
||||||
fn from(spec_id: HlSpecId) -> Self {
|
|
||||||
match spec_id {
|
|
||||||
HlSpecId::V1 => name::V1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -13,7 +13,7 @@ use revm::{
|
|||||||
#[auto_impl(&, &mut, Box, Arc)]
|
#[auto_impl(&, &mut, Box, Arc)]
|
||||||
pub trait HlTxTr: Transaction {}
|
pub trait HlTxTr: Transaction {}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct HlTxEnv<T: Transaction> {
|
pub struct HlTxEnv<T: Transaction> {
|
||||||
pub base: T,
|
pub base: T,
|
||||||
@ -25,12 +25,6 @@ impl<T: Transaction> HlTxEnv<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for HlTxEnv<TxEnv> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self { base: TxEnv::default() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Transaction> Transaction for HlTxEnv<T> {
|
impl<T: Transaction> Transaction for HlTxEnv<T> {
|
||||||
type AccessListItem<'a>
|
type AccessListItem<'a>
|
||||||
= T::AccessListItem<'a>
|
= T::AccessListItem<'a>
|
||||||
@ -130,12 +124,13 @@ impl FromRecoveredTx<TransactionSigned> for HlTxEnv<TxEnv> {
|
|||||||
|
|
||||||
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
|
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
|
||||||
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
|
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
|
||||||
|
use reth_primitives::Transaction;
|
||||||
let base = match tx.clone().into_inner().into_typed_transaction() {
|
let base = match tx.clone().into_inner().into_typed_transaction() {
|
||||||
reth_primitives::Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||||
reth_primitives::Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||||
reth_primitives::Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||||
reth_primitives::Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||||
reth_primitives::Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self { base }
|
Self { base }
|
||||||
|
|||||||
@ -13,88 +13,5 @@ hardfork!(
|
|||||||
HlHardfork {
|
HlHardfork {
|
||||||
/// Initial version
|
/// Initial version
|
||||||
V1,
|
V1,
|
||||||
/// block.number bugfix
|
|
||||||
V2,
|
|
||||||
/// gas mismatch bugfix
|
|
||||||
V3,
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
impl HlHardfork {
|
|
||||||
/// Retrieves the activation block for the specified hardfork on the given chain.
|
|
||||||
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
|
||||||
if chain == Chain::from_named(NamedChain::Hyperliquid) {
|
|
||||||
return Self::hl_mainnet_activation_block(fork);
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
|
|
||||||
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
|
|
||||||
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
|
|
||||||
match_hardfork(
|
|
||||||
fork,
|
|
||||||
|fork| match fork {
|
|
||||||
EthereumHardfork::Frontier |
|
|
||||||
EthereumHardfork::Homestead |
|
|
||||||
EthereumHardfork::Tangerine |
|
|
||||||
EthereumHardfork::SpuriousDragon |
|
|
||||||
EthereumHardfork::Byzantium |
|
|
||||||
EthereumHardfork::Constantinople |
|
|
||||||
EthereumHardfork::Petersburg |
|
|
||||||
EthereumHardfork::Istanbul |
|
|
||||||
EthereumHardfork::MuirGlacier |
|
|
||||||
EthereumHardfork::Berlin |
|
|
||||||
EthereumHardfork::London |
|
|
||||||
EthereumHardfork::Shanghai |
|
|
||||||
EthereumHardfork::Cancun => Some(0),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
|fork| match fork {
|
|
||||||
Self::V1 | Self::V2 | Self::V3 => Some(0),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Hl mainnet list of hardforks.
|
|
||||||
pub fn hl_mainnet() -> ChainHardforks {
|
|
||||||
ChainHardforks::new(vec![
|
|
||||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V1.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V2.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V3.boxed(), ForkCondition::Block(0)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Match helper method since it's not possible to match on `dyn Hardfork`
|
|
||||||
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
|
|
||||||
where
|
|
||||||
H: Hardfork,
|
|
||||||
HF: Fn(&EthereumHardfork) -> Option<u64>,
|
|
||||||
HHF: Fn(&HlHardfork) -> Option<u64>,
|
|
||||||
{
|
|
||||||
let fork: &dyn Any = ⋔
|
|
||||||
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
|
|
||||||
return hardfork_fn(fork);
|
|
||||||
}
|
|
||||||
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1,13 +1,14 @@
|
|||||||
//! Hard forks of hl protocol.
|
//! Hard forks of HyperEVM.
|
||||||
#![allow(unused)]
|
#![allow(unused)]
|
||||||
use hl::HlHardfork;
|
|
||||||
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
|
||||||
|
|
||||||
pub mod hl;
|
pub mod hl;
|
||||||
|
|
||||||
|
use hl::HlHardfork;
|
||||||
|
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Extends [`EthereumHardforks`] with hl helper methods.
|
/// Extends [`EthereumHardforks`] with hl helper methods.
|
||||||
pub trait HlHardforks: EthereumHardforks {
|
///
|
||||||
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
|
/// Currently a placeholder for future use.
|
||||||
/// [`ForkCondition::Never`].
|
pub trait HlHardforks: EthereumHardforks {}
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
|
|
||||||
}
|
impl<T: HlHardforks> HlHardforks for Arc<T> {}
|
||||||
|
|||||||
@ -1,310 +0,0 @@
|
|||||||
/// We need to override the following methods:
|
|
||||||
/// Filter:
|
|
||||||
/// - eth_getLogs
|
|
||||||
/// - eth_subscribe
|
|
||||||
///
|
|
||||||
/// Block (handled by HlEthApi already):
|
|
||||||
/// - eth_getBlockByNumber/eth_getBlockByHash
|
|
||||||
/// - eth_getBlockReceipts
|
|
||||||
use crate::HlBlock;
|
|
||||||
use alloy_consensus::TxReceipt;
|
|
||||||
use alloy_rpc_types::{
|
|
||||||
pubsub::{Params, SubscriptionKind},
|
|
||||||
Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
|
|
||||||
};
|
|
||||||
use jsonrpsee::{PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
|
||||||
use jsonrpsee_core::{async_trait, RpcResult};
|
|
||||||
use jsonrpsee_types::ErrorObject;
|
|
||||||
use reth::{
|
|
||||||
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
|
|
||||||
tasks::TaskSpawner,
|
|
||||||
};
|
|
||||||
use reth_network::NetworkInfo;
|
|
||||||
use reth_primitives::NodePrimitives;
|
|
||||||
use reth_provider::{BlockIdReader, BlockReader, ReceiptProvider, TransactionsProvider};
|
|
||||||
use reth_rpc::{EthFilter, EthPubSub};
|
|
||||||
use reth_rpc_eth_api::{
|
|
||||||
EthApiServer, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock, RpcHeader,
|
|
||||||
RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio_stream::{Stream, StreamExt};
|
|
||||||
use tracing::trace;
|
|
||||||
|
|
||||||
pub trait EthWrapper:
|
|
||||||
EthApiServer<
|
|
||||||
RpcTxReq<Self::NetworkTypes>,
|
|
||||||
RpcTransaction<Self::NetworkTypes>,
|
|
||||||
RpcBlock<Self::NetworkTypes>,
|
|
||||||
RpcReceipt<Self::NetworkTypes>,
|
|
||||||
RpcHeader<Self::NetworkTypes>,
|
|
||||||
> + FullEthApiTypes
|
|
||||||
+ RpcNodeCoreExt<
|
|
||||||
Provider: BlockIdReader + BlockReader<Block = HlBlock>,
|
|
||||||
Primitives: NodePrimitives<
|
|
||||||
SignedTx = <<Self as RpcNodeCore>::Provider as TransactionsProvider>::Transaction,
|
|
||||||
>,
|
|
||||||
Network: NetworkInfo,
|
|
||||||
> + 'static
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl <
|
|
||||||
T:
|
|
||||||
EthApiServer<
|
|
||||||
RpcTxReq<Self::NetworkTypes>,
|
|
||||||
RpcTransaction<Self::NetworkTypes>,
|
|
||||||
RpcBlock<Self::NetworkTypes>,
|
|
||||||
RpcReceipt<Self::NetworkTypes>,
|
|
||||||
RpcHeader<Self::NetworkTypes>,
|
|
||||||
> + FullEthApiTypes
|
|
||||||
+ RpcNodeCoreExt<
|
|
||||||
Provider: BlockIdReader + BlockReader<Block = HlBlock>,
|
|
||||||
Primitives: NodePrimitives<
|
|
||||||
SignedTx = <<Self as RpcNodeCore>::Provider as TransactionsProvider>::Transaction,
|
|
||||||
>,
|
|
||||||
Network: NetworkInfo,
|
|
||||||
> + 'static
|
|
||||||
> EthWrapper for T {
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
|
|
||||||
filter: Arc<EthFilter<Eth>>,
|
|
||||||
provider: Arc<Eth::Provider>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
|
|
||||||
pub fn new(filter: Arc<EthFilter<Eth>>, provider: Arc<Eth::Provider>) -> Self {
|
|
||||||
Self { filter, provider }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
|
|
||||||
for HlNodeFilterHttp<Eth>
|
|
||||||
{
|
|
||||||
/// Handler for `eth_newFilter`
|
|
||||||
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_newFilter");
|
|
||||||
self.filter.new_filter(filter).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handler for `eth_newBlockFilter`
|
|
||||||
async fn new_block_filter(&self) -> RpcResult<FilterId> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
|
|
||||||
self.filter.new_block_filter().await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handler for `eth_newPendingTransactionFilter`
|
|
||||||
async fn new_pending_transaction_filter(
|
|
||||||
&self,
|
|
||||||
kind: Option<PendingTransactionFilterKind>,
|
|
||||||
) -> RpcResult<FilterId> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_newPendingTransactionFilter");
|
|
||||||
self.filter.new_pending_transaction_filter(kind).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handler for `eth_getFilterChanges`
|
|
||||||
async fn filter_changes(
|
|
||||||
&self,
|
|
||||||
id: FilterId,
|
|
||||||
) -> RpcResult<FilterChanges<RpcTransaction<Eth::NetworkTypes>>> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_getFilterChanges");
|
|
||||||
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an array of all logs matching filter with given id.
|
|
||||||
///
|
|
||||||
/// Returns an error if no matching log filter exists.
|
|
||||||
///
|
|
||||||
/// Handler for `eth_getFilterLogs`
|
|
||||||
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
|
|
||||||
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handler for `eth_uninstallFilter`
|
|
||||||
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
|
|
||||||
self.filter.uninstall_filter(id).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns logs matching given filter object.
|
|
||||||
///
|
|
||||||
/// Handler for `eth_getLogs`
|
|
||||||
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
|
|
||||||
trace!(target: "rpc::eth", "Serving eth_getLogs");
|
|
||||||
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
|
|
||||||
let provider = self.provider.clone();
|
|
||||||
|
|
||||||
Ok(logs.into_iter().filter_map(|log| exclude_system_tx::<Eth>(log, &provider)).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct HlNodeFilterWs<Eth: EthWrapper> {
|
|
||||||
pubsub: Arc<EthPubSub<Eth>>,
|
|
||||||
provider: Arc<Eth::Provider>,
|
|
||||||
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
|
|
||||||
pub fn new(
|
|
||||||
pubsub: Arc<EthPubSub<Eth>>,
|
|
||||||
provider: Arc<Eth::Provider>,
|
|
||||||
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
|
||||||
) -> Self {
|
|
||||||
Self { pubsub, provider, subscription_task_spawner }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
|
||||||
for HlNodeFilterWs<Eth>
|
|
||||||
{
|
|
||||||
/// Handler for `eth_subscribe`
|
|
||||||
async fn subscribe(
|
|
||||||
&self,
|
|
||||||
pending: PendingSubscriptionSink,
|
|
||||||
kind: SubscriptionKind,
|
|
||||||
params: Option<Params>,
|
|
||||||
) -> jsonrpsee::core::SubscriptionResult {
|
|
||||||
let sink = pending.accept().await?;
|
|
||||||
let pubsub = self.pubsub.clone();
|
|
||||||
let provider = self.provider.clone();
|
|
||||||
self.subscription_task_spawner.spawn(Box::pin(async move {
|
|
||||||
if kind == SubscriptionKind::Logs {
|
|
||||||
// if no params are provided, used default filter params
|
|
||||||
let filter = match params {
|
|
||||||
Some(Params::Logs(filter)) => *filter,
|
|
||||||
Some(Params::Bool(_)) => {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_ => Default::default(),
|
|
||||||
};
|
|
||||||
let _ = pipe_from_stream(
|
|
||||||
sink,
|
|
||||||
pubsub
|
|
||||||
.log_stream(filter)
|
|
||||||
.filter_map(|log| exclude_system_tx::<Eth>(log, &provider)),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
} else {
|
|
||||||
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
|
||||||
};
|
|
||||||
}));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn exclude_system_tx<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
|
|
||||||
let transaction_index = log.transaction_index?;
|
|
||||||
let log_index = log.log_index?;
|
|
||||||
|
|
||||||
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
|
|
||||||
|
|
||||||
// System transactions are always at the beginning of the block,
|
|
||||||
// so we can use the transaction index to determine if the log is from a system transaction,
|
|
||||||
// and if it is, we can exclude it.
|
|
||||||
//
|
|
||||||
// For non-system transactions, we can just return the log as is, and the client will
|
|
||||||
// adjust the transaction index accordingly.
|
|
||||||
let mut system_tx_count = 0u64;
|
|
||||||
let mut system_tx_logs_count = 0u64;
|
|
||||||
|
|
||||||
for receipt in receipts {
|
|
||||||
let is_system_tx = receipt.cumulative_gas_used() == 0;
|
|
||||||
if is_system_tx {
|
|
||||||
system_tx_count += 1;
|
|
||||||
system_tx_logs_count += receipt.logs().len() as u64;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if system_tx_count > transaction_index {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.transaction_index = Some(transaction_index - system_tx_count);
|
|
||||||
log.log_index = Some(log_index - system_tx_logs_count);
|
|
||||||
Some(log)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper to convert a serde error into an [`ErrorObject`]
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
#[error("Failed to serialize subscription item: {0}")]
|
|
||||||
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
|
|
||||||
|
|
||||||
impl SubscriptionSerializeError {
|
|
||||||
const fn new(err: serde_json::Error) -> Self {
|
|
||||||
Self(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
|
|
||||||
fn from(value: SubscriptionSerializeError) -> Self {
|
|
||||||
internal_rpc_err(value.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pipe_from_stream<T, St>(
|
|
||||||
sink: SubscriptionSink,
|
|
||||||
mut stream: St,
|
|
||||||
) -> Result<(), ErrorObject<'static>>
|
|
||||||
where
|
|
||||||
St: Stream<Item = T> + Unpin,
|
|
||||||
T: Serialize,
|
|
||||||
{
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
_ = sink.closed() => {
|
|
||||||
// connection dropped
|
|
||||||
break Ok(())
|
|
||||||
},
|
|
||||||
maybe_item = stream.next() => {
|
|
||||||
let item = match maybe_item {
|
|
||||||
Some(item) => item,
|
|
||||||
None => {
|
|
||||||
// stream ended
|
|
||||||
break Ok(())
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let msg = SubscriptionMessage::new(
|
|
||||||
sink.method_name(),
|
|
||||||
sink.subscription_id(),
|
|
||||||
&item
|
|
||||||
).map_err(SubscriptionSerializeError::new)?;
|
|
||||||
|
|
||||||
if sink.send(msg).await.is_err() {
|
|
||||||
break Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn install_hl_node_compliance<Node, EthApi>(
|
|
||||||
ctx: RpcContext<Node, EthApi>,
|
|
||||||
) -> Result<(), eyre::Error>
|
|
||||||
where
|
|
||||||
Node: FullNodeComponents,
|
|
||||||
Node::Provider: BlockIdReader + BlockReader<Block = crate::HlBlock>,
|
|
||||||
EthApi: EthWrapper,
|
|
||||||
{
|
|
||||||
ctx.modules.replace_configured(
|
|
||||||
HlNodeFilterHttp::new(
|
|
||||||
Arc::new(ctx.registry.eth_handlers().filter.clone()),
|
|
||||||
Arc::new(ctx.registry.eth_api().provider().clone()),
|
|
||||||
)
|
|
||||||
.into_rpc(),
|
|
||||||
)?;
|
|
||||||
ctx.modules.replace_configured(
|
|
||||||
HlNodeFilterWs::new(
|
|
||||||
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
|
|
||||||
Arc::new(ctx.registry.eth_api().provider().clone()),
|
|
||||||
Box::new(ctx.node().task_executor().clone()),
|
|
||||||
)
|
|
||||||
.into_rpc(),
|
|
||||||
)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@ -1,11 +1,9 @@
|
|||||||
pub mod call_forwarder;
|
pub mod addons;
|
||||||
pub mod chainspec;
|
pub mod chainspec;
|
||||||
pub mod consensus;
|
pub mod consensus;
|
||||||
mod evm;
|
mod evm;
|
||||||
mod hardforks;
|
mod hardforks;
|
||||||
pub mod hl_node_compliance;
|
|
||||||
pub mod node;
|
pub mod node;
|
||||||
pub mod pseudo_peer;
|
pub mod pseudo_peer;
|
||||||
pub mod tx_forwarder;
|
|
||||||
|
|
||||||
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};
|
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};
|
||||||
|
|||||||
88
src/main.rs
88
src/main.rs
@ -1,15 +1,20 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use reth::builder::NodeHandle;
|
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
|
||||||
|
use reth_db::DatabaseEnv;
|
||||||
use reth_hl::{
|
use reth_hl::{
|
||||||
call_forwarder::{self, CallForwarderApiServer},
|
addons::{
|
||||||
chainspec::parser::HlChainSpecParser,
|
call_forwarder::{self, CallForwarderApiServer},
|
||||||
hl_node_compliance::install_hl_node_compliance,
|
hl_node_compliance::install_hl_node_compliance,
|
||||||
|
tx_forwarder::{self, EthForwarderApiServer},
|
||||||
|
},
|
||||||
|
chainspec::{parser::HlChainSpecParser, HlChainSpec},
|
||||||
node::{
|
node::{
|
||||||
cli::{Cli, HlNodeArgs},
|
cli::{Cli, HlNodeArgs},
|
||||||
storage::tables::Tables,
|
storage::tables::Tables,
|
||||||
HlNode,
|
HlNode,
|
||||||
},
|
},
|
||||||
tx_forwarder::{self, EthForwarderApiServer},
|
|
||||||
};
|
};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@ -26,47 +31,52 @@ fn main() -> eyre::Result<()> {
|
|||||||
std::env::set_var("RUST_BACKTRACE", "1");
|
std::env::set_var("RUST_BACKTRACE", "1");
|
||||||
}
|
}
|
||||||
|
|
||||||
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(|builder, ext| async move {
|
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(
|
||||||
let default_upstream_rpc_url = builder.config().chain.official_rpc_url();
|
|builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>,
|
||||||
builder.builder.database.create_tables_for::<Tables>()?;
|
ext: HlNodeArgs| async move {
|
||||||
|
let default_upstream_rpc_url = builder.config().chain.official_rpc_url();
|
||||||
|
|
||||||
let (node, engine_handle_tx) =
|
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?);
|
||||||
HlNode::new(ext.block_source_args.parse().await?, ext.hl_node_compliant);
|
let NodeHandle { node, node_exit_future: exit_future } = builder
|
||||||
let NodeHandle { node, node_exit_future: exit_future } = builder
|
.node(node)
|
||||||
.node(node)
|
.extend_rpc_modules(move |ctx| {
|
||||||
.extend_rpc_modules(move |ctx| {
|
let upstream_rpc_url =
|
||||||
let upstream_rpc_url =
|
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
|
||||||
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
|
|
||||||
|
|
||||||
ctx.modules.replace_configured(
|
|
||||||
tx_forwarder::EthForwarderExt::new(upstream_rpc_url.clone()).into_rpc(),
|
|
||||||
)?;
|
|
||||||
info!("Transaction will be forwarded to {}", upstream_rpc_url);
|
|
||||||
|
|
||||||
if ext.forward_call {
|
|
||||||
ctx.modules.replace_configured(
|
ctx.modules.replace_configured(
|
||||||
call_forwarder::CallForwarderExt::new(
|
tx_forwarder::EthForwarderExt::new(upstream_rpc_url.clone()).into_rpc(),
|
||||||
upstream_rpc_url.clone(),
|
|
||||||
ctx.registry.eth_api().clone(),
|
|
||||||
)
|
|
||||||
.into_rpc(),
|
|
||||||
)?;
|
)?;
|
||||||
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
|
info!("Transaction will be forwarded to {}", upstream_rpc_url);
|
||||||
}
|
|
||||||
|
|
||||||
if ext.hl_node_compliant {
|
if ext.forward_call {
|
||||||
install_hl_node_compliance(ctx)?;
|
ctx.modules.replace_configured(
|
||||||
info!("hl-node compliant mode enabled");
|
call_forwarder::CallForwarderExt::new(
|
||||||
}
|
upstream_rpc_url.clone(),
|
||||||
|
ctx.registry.eth_api().clone(),
|
||||||
|
)
|
||||||
|
.into_rpc(),
|
||||||
|
)?;
|
||||||
|
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
if ext.hl_node_compliant {
|
||||||
})
|
install_hl_node_compliance(ctx)?;
|
||||||
.launch()
|
info!("hl-node compliant mode enabled");
|
||||||
.await?;
|
}
|
||||||
|
|
||||||
engine_handle_tx.send(node.beacon_engine_handle.clone()).unwrap();
|
Ok(())
|
||||||
|
})
|
||||||
|
.apply(|builder| {
|
||||||
|
builder.db().create_tables_for::<Tables>().expect("create tables");
|
||||||
|
builder
|
||||||
|
})
|
||||||
|
.launch()
|
||||||
|
.await?;
|
||||||
|
|
||||||
exit_future.await
|
engine_handle_tx.send(node.beacon_engine_handle.clone()).unwrap();
|
||||||
})?;
|
|
||||||
|
exit_future.await
|
||||||
|
},
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,8 +1,6 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
chainspec::{parser::HlChainSpecParser, HlChainSpec},
|
chainspec::{parser::HlChainSpecParser, HlChainSpec},
|
||||||
node::{
|
node::{consensus::HlConsensus, evm::config::HlEvmConfig, storage::tables::Tables, HlNode},
|
||||||
consensus::HlConsensus, evm::config::HlEvmConfig, network::HlNetworkPrimitives, HlNode,
|
|
||||||
},
|
|
||||||
pseudo_peer::BlockSourceArgs,
|
pseudo_peer::BlockSourceArgs,
|
||||||
};
|
};
|
||||||
use clap::{Args, Parser};
|
use clap::{Args, Parser};
|
||||||
@ -11,21 +9,26 @@ use reth::{
|
|||||||
builder::{NodeBuilder, WithLaunchContext},
|
builder::{NodeBuilder, WithLaunchContext},
|
||||||
cli::Commands,
|
cli::Commands,
|
||||||
prometheus_exporter::install_prometheus_recorder,
|
prometheus_exporter::install_prometheus_recorder,
|
||||||
version::{LONG_VERSION, SHORT_VERSION},
|
version::version_metadata,
|
||||||
CliRunner,
|
CliRunner,
|
||||||
};
|
};
|
||||||
use reth_chainspec::EthChainSpec;
|
use reth_chainspec::EthChainSpec;
|
||||||
use reth_cli::chainspec::ChainSpecParser;
|
use reth_cli::chainspec::ChainSpecParser;
|
||||||
use reth_cli_commands::launcher::FnLauncher;
|
use reth_cli_commands::{common::EnvironmentArgs, launcher::FnLauncher};
|
||||||
use reth_db::DatabaseEnv;
|
use reth_db::{init_db, mdbx::init_db_for, DatabaseEnv};
|
||||||
use reth_tracing::FileWorkerGuard;
|
use reth_tracing::FileWorkerGuard;
|
||||||
use std::{
|
use std::{
|
||||||
fmt::{self},
|
fmt::{self},
|
||||||
future::Future,
|
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
macro_rules! not_applicable {
|
||||||
|
($command:ident) => {
|
||||||
|
todo!("{} is not applicable for HL", stringify!($command))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Args)]
|
#[derive(Debug, Clone, Args)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct HlNodeArgs {
|
pub struct HlNodeArgs {
|
||||||
@ -58,7 +61,7 @@ pub struct HlNodeArgs {
|
|||||||
///
|
///
|
||||||
/// This is the entrypoint to the executable.
|
/// This is the entrypoint to the executable.
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
#[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)]
|
#[command(author, version =version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)]
|
||||||
pub struct Cli<Spec: ChainSpecParser = HlChainSpecParser, Ext: clap::Args + fmt::Debug = HlNodeArgs>
|
pub struct Cli<Spec: ChainSpecParser = HlChainSpecParser, Ext: clap::Args + fmt::Debug = HlNodeArgs>
|
||||||
{
|
{
|
||||||
/// The command to run
|
/// The command to run
|
||||||
@ -78,20 +81,25 @@ where
|
|||||||
///
|
///
|
||||||
/// This accepts a closure that is used to launch the node via the
|
/// This accepts a closure that is used to launch the node via the
|
||||||
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
|
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
|
||||||
pub fn run<L, Fut>(self, launcher: L) -> eyre::Result<()>
|
pub fn run(
|
||||||
where
|
self,
|
||||||
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
|
launcher: impl AsyncFnOnce(
|
||||||
Fut: Future<Output = eyre::Result<()>>,
|
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
|
||||||
{
|
Ext,
|
||||||
|
) -> eyre::Result<()>,
|
||||||
|
) -> eyre::Result<()> {
|
||||||
self.with_runner(CliRunner::try_default_runtime()?, launcher)
|
self.with_runner(CliRunner::try_default_runtime()?, launcher)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Execute the configured cli command with the provided [`CliRunner`].
|
/// Execute the configured cli command with the provided [`CliRunner`].
|
||||||
pub fn with_runner<L, Fut>(mut self, runner: CliRunner, launcher: L) -> eyre::Result<()>
|
pub fn with_runner(
|
||||||
where
|
mut self,
|
||||||
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
|
runner: CliRunner,
|
||||||
Fut: Future<Output = eyre::Result<()>>,
|
launcher: impl AsyncFnOnce(
|
||||||
{
|
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
|
||||||
|
Ext,
|
||||||
|
) -> eyre::Result<()>,
|
||||||
|
) -> eyre::Result<()> {
|
||||||
// Add network name if available to the logs dir
|
// Add network name if available to the logs dir
|
||||||
if let Some(chain_spec) = self.command.chain_spec() {
|
if let Some(chain_spec) = self.command.chain_spec() {
|
||||||
self.logs.log_file_directory =
|
self.logs.log_file_directory =
|
||||||
@ -115,15 +123,14 @@ where
|
|||||||
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
|
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
|
||||||
}
|
}
|
||||||
Commands::InitState(command) => {
|
Commands::InitState(command) => {
|
||||||
|
// Need to invoke `init_db_for` to create `BlockReadPrecompileCalls` table
|
||||||
|
Self::init_db(&command.env)?;
|
||||||
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
|
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
|
||||||
}
|
}
|
||||||
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
||||||
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<HlNode>()),
|
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<HlNode>()),
|
||||||
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
|
Commands::Stage(command) => {
|
||||||
command.execute::<HlNode, _, _, HlNetworkPrimitives>(ctx, components)
|
runner.run_command_until_exit(|ctx| command.execute::<HlNode, _>(ctx, components))
|
||||||
}),
|
|
||||||
Commands::P2P(command) => {
|
|
||||||
runner.run_until_ctrl_c(command.execute::<HlNetworkPrimitives>())
|
|
||||||
}
|
}
|
||||||
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
|
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
|
||||||
Commands::Recover(command) => {
|
Commands::Recover(command) => {
|
||||||
@ -131,17 +138,15 @@ where
|
|||||||
}
|
}
|
||||||
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()),
|
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()),
|
||||||
Commands::Import(command) => {
|
Commands::Import(command) => {
|
||||||
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _, _>(components))
|
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _>(components))
|
||||||
}
|
}
|
||||||
Commands::Debug(_command) => todo!(),
|
Commands::P2P(_command) => not_applicable!(P2P),
|
||||||
|
Commands::ImportEra(_command) => not_applicable!(ImportEra),
|
||||||
|
Commands::Download(_command) => not_applicable!(Download),
|
||||||
|
Commands::ExportEra(_) => not_applicable!(ExportEra),
|
||||||
|
Commands::ReExecute(_) => not_applicable!(ReExecute),
|
||||||
#[cfg(feature = "dev")]
|
#[cfg(feature = "dev")]
|
||||||
Commands::TestVectors(_command) => todo!(),
|
Commands::TestVectors(_command) => not_applicable!(TestVectors),
|
||||||
Commands::ImportEra(_command) => {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
Commands::Download(_command) => {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,4 +158,12 @@ where
|
|||||||
let guard = self.logs.init_tracing()?;
|
let guard = self.logs.init_tracing()?;
|
||||||
Ok(guard)
|
Ok(guard)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn init_db(env: &EnvironmentArgs<C>) -> eyre::Result<()> {
|
||||||
|
let data_dir = env.datadir.clone().resolve_datadir(env.chain.chain());
|
||||||
|
let db_path = data_dir.db();
|
||||||
|
init_db(db_path.clone(), env.db.database_args())?;
|
||||||
|
init_db_for::<_, Tables>(db_path, env.db.database_args())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
use crate::{hardforks::HlHardforks, node::HlNode, HlBlock, HlBlockBody, HlPrimitives};
|
use crate::{hardforks::HlHardforks, node::HlNode, HlBlock, HlBlockBody, HlPrimitives};
|
||||||
|
use alloy_consensus::Header;
|
||||||
use reth::{
|
use reth::{
|
||||||
api::FullNodeTypes,
|
api::FullNodeTypes,
|
||||||
beacon_consensus::EthBeaconConsensus,
|
beacon_consensus::EthBeaconConsensus,
|
||||||
@ -39,7 +40,10 @@ pub struct HlConsensus<ChainSpec> {
|
|||||||
chain_spec: Arc<ChainSpec>,
|
chain_spec: Arc<ChainSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<ChainSpec: EthChainSpec + HlHardforks> HlConsensus<ChainSpec> {
|
impl<ChainSpec> HlConsensus<ChainSpec>
|
||||||
|
where
|
||||||
|
ChainSpec: EthChainSpec + HlHardforks,
|
||||||
|
{
|
||||||
/// Create a new instance of [`HlConsensus`]
|
/// Create a new instance of [`HlConsensus`]
|
||||||
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
||||||
Self { inner: EthBeaconConsensus::new(chain_spec.clone()), chain_spec }
|
Self { inner: EthBeaconConsensus::new(chain_spec.clone()), chain_spec }
|
||||||
@ -62,15 +66,19 @@ pub fn validate_against_parent_timestamp<H: BlockHeader>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<ChainSpec> {
|
impl<H, ChainSpec> HeaderValidator<H> for HlConsensus<ChainSpec>
|
||||||
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
|
where
|
||||||
|
H: BlockHeader,
|
||||||
|
ChainSpec: EthChainSpec<Header = H> + HlHardforks,
|
||||||
|
{
|
||||||
|
fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError> {
|
||||||
self.inner.validate_header(header)
|
self.inner.validate_header(header)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_header_against_parent(
|
fn validate_header_against_parent(
|
||||||
&self,
|
&self,
|
||||||
header: &SealedHeader,
|
header: &SealedHeader<H>,
|
||||||
parent: &SealedHeader,
|
parent: &SealedHeader<H>,
|
||||||
) -> Result<(), ConsensusError> {
|
) -> Result<(), ConsensusError> {
|
||||||
validate_against_parent_hash_number(header.header(), parent)?;
|
validate_against_parent_hash_number(header.header(), parent)?;
|
||||||
|
|
||||||
@ -83,7 +91,7 @@ impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<Chai
|
|||||||
// )?;
|
// )?;
|
||||||
|
|
||||||
// ensure that the blob gas fields for this block
|
// ensure that the blob gas fields for this block
|
||||||
if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp) {
|
if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) {
|
||||||
validate_against_parent_4844(header.header(), parent.header(), blob_params)?;
|
validate_against_parent_4844(header.header(), parent.header(), blob_params)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,7 +99,10 @@ impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<Chai
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<ChainSpec: EthChainSpec + HlHardforks> Consensus<HlBlock> for HlConsensus<ChainSpec> {
|
impl<ChainSpec> Consensus<HlBlock> for HlConsensus<ChainSpec>
|
||||||
|
where
|
||||||
|
ChainSpec: EthChainSpec<Header = Header> + HlHardforks,
|
||||||
|
{
|
||||||
type Error = ConsensusError;
|
type Error = ConsensusError;
|
||||||
|
|
||||||
fn validate_body_against_header(
|
fn validate_body_against_header(
|
||||||
@ -135,8 +146,9 @@ impl<ChainSpec: EthChainSpec + HlHardforks> Consensus<HlBlock> for HlConsensus<C
|
|||||||
|
|
||||||
mod reth_copy;
|
mod reth_copy;
|
||||||
|
|
||||||
impl<ChainSpec: EthChainSpec<Header = alloy_consensus::Header> + HlHardforks>
|
impl<ChainSpec> FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
|
||||||
FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
|
where
|
||||||
|
ChainSpec: EthChainSpec<Header = Header> + HlHardforks,
|
||||||
{
|
{
|
||||||
fn validate_block_post_execution(
|
fn validate_block_post_execution(
|
||||||
&self,
|
&self,
|
||||||
|
|||||||
@ -1,22 +1,9 @@
|
|||||||
use std::sync::Arc;
|
use crate::{HlBlock, HlPrimitives};
|
||||||
|
|
||||||
use crate::{
|
|
||||||
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
|
|
||||||
HlBlock, HlPrimitives,
|
|
||||||
};
|
|
||||||
use alloy_eips::eip7685::Requests;
|
use alloy_eips::eip7685::Requests;
|
||||||
use alloy_primitives::U256;
|
use alloy_primitives::U256;
|
||||||
use reth::{
|
|
||||||
api::FullNodeTypes,
|
|
||||||
builder::{components::PayloadServiceBuilder, BuilderContext},
|
|
||||||
payload::{PayloadBuilderHandle, PayloadServiceCommand},
|
|
||||||
transaction_pool::TransactionPool,
|
|
||||||
};
|
|
||||||
use reth_evm::ConfigureEvm;
|
|
||||||
use reth_payload_primitives::BuiltPayload;
|
use reth_payload_primitives::BuiltPayload;
|
||||||
use reth_primitives::SealedBlock;
|
use reth_primitives::SealedBlock;
|
||||||
use tokio::sync::{broadcast, mpsc};
|
use std::sync::Arc;
|
||||||
use tracing::warn;
|
|
||||||
|
|
||||||
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
|
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
|
||||||
/// included into [`HlBlock`].
|
/// included into [`HlBlock`].
|
||||||
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
|
|||||||
self.requests.clone()
|
self.requests.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub struct HlPayloadServiceBuilder;
|
|
||||||
|
|
||||||
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
|
|
||||||
where
|
|
||||||
Node: FullNodeTypes<Types = HlNode>,
|
|
||||||
Pool: TransactionPool,
|
|
||||||
Evm: ConfigureEvm,
|
|
||||||
{
|
|
||||||
async fn spawn_payload_builder_service(
|
|
||||||
self,
|
|
||||||
ctx: &BuilderContext<Node>,
|
|
||||||
_pool: Pool,
|
|
||||||
_evm_config: Evm,
|
|
||||||
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
|
|
||||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
ctx.task_executor().spawn_critical("payload builder", async move {
|
|
||||||
let mut subscriptions = Vec::new();
|
|
||||||
|
|
||||||
while let Some(message) = rx.recv().await {
|
|
||||||
match message {
|
|
||||||
PayloadServiceCommand::Subscribe(tx) => {
|
|
||||||
let (events_tx, events_rx) = broadcast::channel(100);
|
|
||||||
// Retain senders to make sure that channels are not getting closed
|
|
||||||
subscriptions.push(events_tx);
|
|
||||||
let _ = tx.send(events_rx);
|
|
||||||
}
|
|
||||||
message => warn!(?message, "Noop payload service received a message"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(PayloadBuilderHandle::new(tx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// impl From<EthBuiltPayload> for HlBuiltPayload {
|
|
||||||
// fn from(value: EthBuiltPayload) -> Self {
|
|
||||||
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
|
|
||||||
// HlBuiltPayload {
|
|
||||||
// id,
|
|
||||||
// block: block.into(),
|
|
||||||
// fees,
|
|
||||||
// requests,
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// pub struct HlPayloadBuilder<Inner> {
|
|
||||||
// inner: Inner,
|
|
||||||
// }
|
|
||||||
|
|
||||||
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
|
|
||||||
// where
|
|
||||||
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
|
|
||||||
// {
|
|
||||||
// type Attributes = Inner::Attributes;
|
|
||||||
// type BuiltPayload = HlBuiltPayload;
|
|
||||||
// type Error = Inner::Error;
|
|
||||||
|
|
||||||
// fn try_build(
|
|
||||||
// &self,
|
|
||||||
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
|
|
||||||
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
|
|
||||||
// let outcome = self.inner.try_build(args)?;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|||||||
@ -6,12 +6,13 @@ use crate::{
|
|||||||
node::{
|
node::{
|
||||||
evm::{executor::is_system_transaction, receipt_builder::RethReceiptBuilder},
|
evm::{executor::is_system_transaction, receipt_builder::RethReceiptBuilder},
|
||||||
primitives::{BlockBody, TransactionSigned},
|
primitives::{BlockBody, TransactionSigned},
|
||||||
|
rpc::engine_api::validator::HlExecutionData,
|
||||||
types::HlExtras,
|
types::HlExtras,
|
||||||
},
|
},
|
||||||
HlBlock, HlBlockBody, HlPrimitives,
|
HlBlock, HlBlockBody, HlPrimitives,
|
||||||
};
|
};
|
||||||
use alloy_consensus::{BlockHeader, Header, Transaction as _, TxReceipt, EMPTY_OMMER_ROOT_HASH};
|
use alloy_consensus::{BlockHeader, Header, Transaction as _, TxReceipt, EMPTY_OMMER_ROOT_HASH};
|
||||||
use alloy_eips::merge::BEACON_NONCE;
|
use alloy_eips::{merge::BEACON_NONCE, Encodable2718};
|
||||||
use alloy_primitives::{Log, U256};
|
use alloy_primitives::{Log, U256};
|
||||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||||
use reth_evm::{
|
use reth_evm::{
|
||||||
@ -19,12 +20,13 @@ use reth_evm::{
|
|||||||
eth::{receipt_builder::ReceiptBuilder, EthBlockExecutionCtx},
|
eth::{receipt_builder::ReceiptBuilder, EthBlockExecutionCtx},
|
||||||
execute::{BlockAssembler, BlockAssemblerInput},
|
execute::{BlockAssembler, BlockAssemblerInput},
|
||||||
precompiles::PrecompilesMap,
|
precompiles::PrecompilesMap,
|
||||||
ConfigureEvm, EvmEnv, EvmFactory, ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded,
|
ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator,
|
||||||
IntoTxEnv, NextBlockEnvAttributes,
|
ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, NextBlockEnvAttributes,
|
||||||
};
|
};
|
||||||
use reth_evm_ethereum::EthBlockAssembler;
|
use reth_evm_ethereum::EthBlockAssembler;
|
||||||
|
use reth_payload_primitives::NewPayloadError;
|
||||||
use reth_primitives::{logs_bloom, BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader};
|
use reth_primitives::{logs_bloom, BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader};
|
||||||
use reth_primitives_traits::proofs;
|
use reth_primitives_traits::{proofs, SignerRecoverable, WithEncoded};
|
||||||
use reth_provider::BlockExecutionResult;
|
use reth_provider::BlockExecutionResult;
|
||||||
use reth_revm::State;
|
use reth_revm::State;
|
||||||
use revm::{
|
use revm::{
|
||||||
@ -69,10 +71,10 @@ where
|
|||||||
let timestamp = evm_env.block_env.timestamp.saturating_to();
|
let timestamp = evm_env.block_env.timestamp.saturating_to();
|
||||||
|
|
||||||
// Filter out system tx receipts
|
// Filter out system tx receipts
|
||||||
let transactions_for_root: Vec<TransactionSigned> =
|
let transactions_for_root: Vec<_> =
|
||||||
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect::<Vec<_>>();
|
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect();
|
||||||
let receipts_for_root: Vec<Receipt> =
|
let receipts_for_root: Vec<_> =
|
||||||
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
|
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect();
|
||||||
|
|
||||||
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
|
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
|
||||||
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
|
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
|
||||||
@ -293,7 +295,6 @@ where
|
|||||||
// configure evm env based on parent block
|
// configure evm env based on parent block
|
||||||
let mut cfg_env =
|
let mut cfg_env =
|
||||||
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
|
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
|
||||||
|
|
||||||
if let Some(blob_params) = &blob_params {
|
if let Some(blob_params) = &blob_params {
|
||||||
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
|
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
|
||||||
}
|
}
|
||||||
@ -374,10 +375,6 @@ where
|
|||||||
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
|
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
|
||||||
) -> ExecutionCtxFor<'a, Self> {
|
) -> ExecutionCtxFor<'a, Self> {
|
||||||
let block_body = block.body();
|
let block_body = block.body();
|
||||||
let extras = HlExtras {
|
|
||||||
read_precompile_calls: block_body.read_precompile_calls.clone(),
|
|
||||||
highest_precompile_address: block_body.highest_precompile_address,
|
|
||||||
};
|
|
||||||
HlBlockExecutionCtx {
|
HlBlockExecutionCtx {
|
||||||
ctx: EthBlockExecutionCtx {
|
ctx: EthBlockExecutionCtx {
|
||||||
parent_hash: block.header().parent_hash,
|
parent_hash: block.header().parent_hash,
|
||||||
@ -385,7 +382,10 @@ where
|
|||||||
ommers: &block.body().ommers,
|
ommers: &block.body().ommers,
|
||||||
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
|
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
|
||||||
},
|
},
|
||||||
extras,
|
extras: HlExtras {
|
||||||
|
read_precompile_calls: block_body.read_precompile_calls.clone(),
|
||||||
|
highest_precompile_address: block_body.highest_precompile_address,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,12 +401,43 @@ where
|
|||||||
ommers: &[],
|
ommers: &[],
|
||||||
withdrawals: attributes.withdrawals.map(Cow::Owned),
|
withdrawals: attributes.withdrawals.map(Cow::Owned),
|
||||||
},
|
},
|
||||||
// TODO: hacky, double check if this is correct
|
extras: HlExtras::default(), // TODO: hacky, double check if this is correct
|
||||||
extras: HlExtras::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
|
||||||
|
fn evm_env_for_payload(&self, payload: &HlExecutionData) -> EvmEnvFor<Self> {
|
||||||
|
self.evm_env(&payload.0.header)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
|
||||||
|
let block = &payload.0;
|
||||||
|
HlBlockExecutionCtx {
|
||||||
|
ctx: EthBlockExecutionCtx {
|
||||||
|
parent_hash: block.header.parent_hash,
|
||||||
|
parent_beacon_block_root: block.header.parent_beacon_block_root,
|
||||||
|
ommers: &block.body.ommers,
|
||||||
|
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
|
||||||
|
},
|
||||||
|
extras: HlExtras {
|
||||||
|
read_precompile_calls: block.body.read_precompile_calls.clone(),
|
||||||
|
highest_precompile_address: block.body.highest_precompile_address,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tx_iterator_for_payload(
|
||||||
|
&self,
|
||||||
|
payload: &HlExecutionData,
|
||||||
|
) -> impl ExecutableTxIterator<Self> {
|
||||||
|
payload.0.body.transactions.clone().into_iter().map(move |tx| {
|
||||||
|
let recovered = tx.try_into_recovered().map_err(NewPayloadError::other)?;
|
||||||
|
Ok::<_, NewPayloadError>(WithEncoded::new(recovered.encoded_2718().into(), recovered))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Map the latest active hardfork at the given timestamp or block number to a [`HlSpecId`].
|
/// Map the latest active hardfork at the given timestamp or block number to a [`HlSpecId`].
|
||||||
pub fn revm_spec_by_timestamp_and_block_number(
|
pub fn revm_spec_by_timestamp_and_block_number(
|
||||||
_chain_spec: impl HlHardforks,
|
_chain_spec: impl HlHardforks,
|
||||||
|
|||||||
@ -72,7 +72,7 @@ fn run_precompile(
|
|||||||
|
|
||||||
match *get {
|
match *get {
|
||||||
ReadPrecompileResult::Ok { gas_used, ref bytes } => {
|
ReadPrecompileResult::Ok { gas_used, ref bytes } => {
|
||||||
Ok(PrecompileOutput { gas_used, bytes: bytes.clone() })
|
Ok(PrecompileOutput { gas_used, bytes: bytes.clone(), reverted: false })
|
||||||
}
|
}
|
||||||
ReadPrecompileResult::OutOfGas => {
|
ReadPrecompileResult::OutOfGas => {
|
||||||
// Use all the gas passed to this precompile
|
// Use all the gas passed to this precompile
|
||||||
@ -181,7 +181,7 @@ where
|
|||||||
// Execute transaction.
|
// Execute transaction.
|
||||||
let ResultAndState { result, mut state } = self
|
let ResultAndState { result, mut state } = self
|
||||||
.evm
|
.evm
|
||||||
.transact(tx)
|
.transact(&tx)
|
||||||
.map_err(|err| BlockExecutionError::evm(err, tx.tx().trie_hash()))?;
|
.map_err(|err| BlockExecutionError::evm(err, tx.tx().trie_hash()))?;
|
||||||
|
|
||||||
if !f(&result).should_commit() {
|
if !f(&result).should_commit() {
|
||||||
|
|||||||
@ -12,7 +12,7 @@ use reth::{
|
|||||||
api::FullNodeTypes,
|
api::FullNodeTypes,
|
||||||
builder::{components::ExecutorBuilder, BuilderContext},
|
builder::{components::ExecutorBuilder, BuilderContext},
|
||||||
};
|
};
|
||||||
use reth_evm::{Evm, EvmEnv};
|
use reth_evm::{Database, Evm, EvmEnv};
|
||||||
use revm::{
|
use revm::{
|
||||||
context::{
|
context::{
|
||||||
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
|
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
|
||||||
@ -21,7 +21,7 @@ use revm::{
|
|||||||
handler::{instructions::EthInstructions, EthPrecompiles, PrecompileProvider},
|
handler::{instructions::EthInstructions, EthPrecompiles, PrecompileProvider},
|
||||||
interpreter::{interpreter::EthInterpreter, InterpreterResult},
|
interpreter::{interpreter::EthInterpreter, InterpreterResult},
|
||||||
state::EvmState,
|
state::EvmState,
|
||||||
Context, Database, ExecuteEvm, InspectEvm, Inspector,
|
Context, ExecuteEvm, InspectEvm, Inspector,
|
||||||
};
|
};
|
||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
@ -75,7 +75,6 @@ where
|
|||||||
DB: Database,
|
DB: Database,
|
||||||
I: Inspector<HlContext<DB>>,
|
I: Inspector<HlContext<DB>>,
|
||||||
P: PrecompileProvider<HlContext<DB>, Output = InterpreterResult>,
|
P: PrecompileProvider<HlContext<DB>, Output = InterpreterResult>,
|
||||||
<DB as revm::Database>::Error: std::marker::Send + std::marker::Sync + 'static,
|
|
||||||
{
|
{
|
||||||
type DB = DB;
|
type DB = DB;
|
||||||
type Tx = HlTxEnv<TxEnv>;
|
type Tx = HlTxEnv<TxEnv>;
|
||||||
@ -127,10 +126,6 @@ where
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn db_mut(&mut self) -> &mut Self::DB {
|
|
||||||
&mut self.journaled_state.database
|
|
||||||
}
|
|
||||||
|
|
||||||
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) {
|
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) {
|
||||||
let Context { block: block_env, cfg: cfg_env, journaled_state, .. } = self.inner.0.ctx;
|
let Context { block: block_env, cfg: cfg_env, journaled_state, .. } = self.inner.0.ctx;
|
||||||
|
|
||||||
@ -141,20 +136,20 @@ where
|
|||||||
self.inspect = enabled;
|
self.inspect = enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn precompiles_mut(&mut self) -> &mut Self::Precompiles {
|
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
|
||||||
&mut self.inner.0.precompiles
|
(
|
||||||
|
&self.inner.0.ctx.journaled_state.database,
|
||||||
|
&self.inner.0.inspector,
|
||||||
|
&self.inner.0.precompiles,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inspector_mut(&mut self) -> &mut Self::Inspector {
|
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
|
||||||
&mut self.inner.0.inspector
|
(
|
||||||
}
|
&mut self.inner.0.ctx.journaled_state.database,
|
||||||
|
&mut self.inner.0.inspector,
|
||||||
fn precompiles(&self) -> &Self::Precompiles {
|
&mut self.inner.0.precompiles,
|
||||||
&self.inner.0.precompiles
|
)
|
||||||
}
|
|
||||||
|
|
||||||
fn inspector(&self) -> &Self::Inspector {
|
|
||||||
&self.inner.0.inspector
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +165,6 @@ where
|
|||||||
type EVM = HlEvmConfig;
|
type EVM = HlEvmConfig;
|
||||||
|
|
||||||
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
|
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
|
||||||
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
|
Ok(HlEvmConfig::hl(ctx.chain_spec()))
|
||||||
Ok(evm_config)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,11 +2,11 @@ use crate::{
|
|||||||
chainspec::HlChainSpec,
|
chainspec::HlChainSpec,
|
||||||
node::{
|
node::{
|
||||||
pool::HlPoolBuilder,
|
pool::HlPoolBuilder,
|
||||||
primitives::{BlockBody, HlBlock, HlBlockBody, HlPrimitives, TransactionSigned},
|
primitives::{HlBlock, HlPrimitives},
|
||||||
rpc::{
|
rpc::{
|
||||||
engine_api::{
|
engine_api::{
|
||||||
builder::HlEngineApiBuilder, payload::HlPayloadTypes,
|
builder::HlEngineApiBuilder, payload::HlPayloadTypes,
|
||||||
validator::HlEngineValidatorBuilder,
|
validator::HlPayloadValidatorBuilder,
|
||||||
},
|
},
|
||||||
HlEthApiBuilder,
|
HlEthApiBuilder,
|
||||||
},
|
},
|
||||||
@ -15,19 +15,18 @@ use crate::{
|
|||||||
pseudo_peer::BlockSourceConfig,
|
pseudo_peer::BlockSourceConfig,
|
||||||
};
|
};
|
||||||
use consensus::HlConsensusBuilder;
|
use consensus::HlConsensusBuilder;
|
||||||
use engine::HlPayloadServiceBuilder;
|
|
||||||
use evm::HlExecutorBuilder;
|
use evm::HlExecutorBuilder;
|
||||||
use network::HlNetworkBuilder;
|
use network::HlNetworkBuilder;
|
||||||
use reth::{
|
use reth::{
|
||||||
api::{FullNodeComponents, FullNodeTypes, NodeTypes},
|
api::{FullNodeTypes, NodeTypes},
|
||||||
builder::{
|
builder::{
|
||||||
components::ComponentsBuilder, rpc::RpcAddOns, DebugNode, Node, NodeAdapter,
|
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
|
||||||
NodeComponentsBuilder,
|
rpc::RpcAddOns,
|
||||||
|
Node, NodeAdapter,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use reth_engine_primitives::BeaconConsensusEngineHandle;
|
use reth_engine_primitives::ConsensusEngineHandle;
|
||||||
use reth_trie_db::MerklePatriciaTrie;
|
use std::{marker::PhantomData, sync::Arc};
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::{oneshot, Mutex};
|
use tokio::sync::{oneshot, Mutex};
|
||||||
|
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
@ -43,31 +42,21 @@ pub mod types;
|
|||||||
|
|
||||||
/// Hl addons configuring RPC types
|
/// Hl addons configuring RPC types
|
||||||
pub type HlNodeAddOns<N> =
|
pub type HlNodeAddOns<N> =
|
||||||
RpcAddOns<N, HlEthApiBuilder, HlEngineValidatorBuilder, HlEngineApiBuilder>;
|
RpcAddOns<N, HlEthApiBuilder, HlPayloadValidatorBuilder, HlEngineApiBuilder>;
|
||||||
|
|
||||||
/// Type configuration for a regular Hl node.
|
/// Type configuration for a regular Hl node.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HlNode {
|
pub struct HlNode {
|
||||||
engine_handle_rx:
|
engine_handle_rx: Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
|
||||||
Arc<Mutex<Option<oneshot::Receiver<BeaconConsensusEngineHandle<HlPayloadTypes>>>>>,
|
|
||||||
block_source_config: BlockSourceConfig,
|
block_source_config: BlockSourceConfig,
|
||||||
hl_node_compliant: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlNode {
|
impl HlNode {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
block_source_config: BlockSourceConfig,
|
block_source_config: BlockSourceConfig,
|
||||||
hl_node_compliant: bool,
|
) -> (Self, oneshot::Sender<ConsensusEngineHandle<HlPayloadTypes>>) {
|
||||||
) -> (Self, oneshot::Sender<BeaconConsensusEngineHandle<HlPayloadTypes>>) {
|
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
(
|
(Self { engine_handle_rx: Arc::new(Mutex::new(Some(rx))), block_source_config }, tx)
|
||||||
Self {
|
|
||||||
engine_handle_rx: Arc::new(Mutex::new(Some(rx))),
|
|
||||||
block_source_config,
|
|
||||||
hl_node_compliant,
|
|
||||||
},
|
|
||||||
tx,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +68,7 @@ impl HlNode {
|
|||||||
) -> ComponentsBuilder<
|
) -> ComponentsBuilder<
|
||||||
Node,
|
Node,
|
||||||
HlPoolBuilder,
|
HlPoolBuilder,
|
||||||
HlPayloadServiceBuilder,
|
NoopPayloadServiceBuilder,
|
||||||
HlNetworkBuilder,
|
HlNetworkBuilder,
|
||||||
HlExecutorBuilder,
|
HlExecutorBuilder,
|
||||||
HlConsensusBuilder,
|
HlConsensusBuilder,
|
||||||
@ -91,7 +80,7 @@ impl HlNode {
|
|||||||
.node_types::<Node>()
|
.node_types::<Node>()
|
||||||
.pool(HlPoolBuilder)
|
.pool(HlPoolBuilder)
|
||||||
.executor(HlExecutorBuilder::default())
|
.executor(HlExecutorBuilder::default())
|
||||||
.payload(HlPayloadServiceBuilder::default())
|
.payload(NoopPayloadServiceBuilder::default())
|
||||||
.network(HlNetworkBuilder {
|
.network(HlNetworkBuilder {
|
||||||
engine_handle_rx: self.engine_handle_rx.clone(),
|
engine_handle_rx: self.engine_handle_rx.clone(),
|
||||||
block_source_config: self.block_source_config.clone(),
|
block_source_config: self.block_source_config.clone(),
|
||||||
@ -103,7 +92,6 @@ impl HlNode {
|
|||||||
impl NodeTypes for HlNode {
|
impl NodeTypes for HlNode {
|
||||||
type Primitives = HlPrimitives;
|
type Primitives = HlPrimitives;
|
||||||
type ChainSpec = HlChainSpec;
|
type ChainSpec = HlChainSpec;
|
||||||
type StateCommitment = MerklePatriciaTrie;
|
|
||||||
type Storage = HlStorage;
|
type Storage = HlStorage;
|
||||||
type Payload = HlPayloadTypes;
|
type Payload = HlPayloadTypes;
|
||||||
}
|
}
|
||||||
@ -115,15 +103,13 @@ where
|
|||||||
type ComponentsBuilder = ComponentsBuilder<
|
type ComponentsBuilder = ComponentsBuilder<
|
||||||
N,
|
N,
|
||||||
HlPoolBuilder,
|
HlPoolBuilder,
|
||||||
HlPayloadServiceBuilder,
|
NoopPayloadServiceBuilder,
|
||||||
HlNetworkBuilder,
|
HlNetworkBuilder,
|
||||||
HlExecutorBuilder,
|
HlExecutorBuilder,
|
||||||
HlConsensusBuilder,
|
HlConsensusBuilder,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
type AddOns = HlNodeAddOns<
|
type AddOns = HlNodeAddOns<NodeAdapter<N>>;
|
||||||
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
fn components_builder(&self) -> Self::ComponentsBuilder {
|
fn components_builder(&self) -> Self::ComponentsBuilder {
|
||||||
Self::components(self)
|
Self::components(self)
|
||||||
@ -131,37 +117,11 @@ where
|
|||||||
|
|
||||||
fn add_ons(&self) -> Self::AddOns {
|
fn add_ons(&self) -> Self::AddOns {
|
||||||
HlNodeAddOns::new(
|
HlNodeAddOns::new(
|
||||||
HlEthApiBuilder { hl_node_compliant: self.hl_node_compliant },
|
HlEthApiBuilder { _nt: PhantomData },
|
||||||
|
Default::default(),
|
||||||
Default::default(),
|
Default::default(),
|
||||||
Default::default(),
|
Default::default(),
|
||||||
Default::default(),
|
Default::default(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> DebugNode<N> for HlNode
|
|
||||||
where
|
|
||||||
N: FullNodeComponents<Types = Self>,
|
|
||||||
{
|
|
||||||
type RpcBlock = alloy_rpc_types::Block;
|
|
||||||
|
|
||||||
fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> HlBlock {
|
|
||||||
let alloy_rpc_types::Block { header, transactions, withdrawals, .. } = rpc_block;
|
|
||||||
HlBlock {
|
|
||||||
header: header.inner,
|
|
||||||
body: HlBlockBody {
|
|
||||||
inner: BlockBody {
|
|
||||||
transactions: transactions
|
|
||||||
.into_transactions()
|
|
||||||
.map(|tx| TransactionSigned::Default(tx.inner.into_inner().into()))
|
|
||||||
.collect(),
|
|
||||||
ommers: Default::default(),
|
|
||||||
withdrawals,
|
|
||||||
},
|
|
||||||
sidecars: None,
|
|
||||||
read_precompile_calls: None,
|
|
||||||
highest_precompile_address: None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -12,7 +12,7 @@ use alloy_consensus::{BlockBody, Header};
|
|||||||
use alloy_primitives::U128;
|
use alloy_primitives::U128;
|
||||||
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
|
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
|
||||||
use futures::{future::Either, stream::FuturesUnordered, StreamExt};
|
use futures::{future::Either, stream::FuturesUnordered, StreamExt};
|
||||||
use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes};
|
use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes};
|
||||||
use reth_eth_wire::NewBlock;
|
use reth_eth_wire::NewBlock;
|
||||||
use reth_network::{
|
use reth_network::{
|
||||||
import::{BlockImportError, BlockImportEvent, BlockImportOutcome, BlockValidation},
|
import::{BlockImportError, BlockImportEvent, BlockImportOutcome, BlockValidation},
|
||||||
@ -55,7 +55,7 @@ where
|
|||||||
Provider: BlockNumReader + Clone,
|
Provider: BlockNumReader + Clone,
|
||||||
{
|
{
|
||||||
/// The handle to communicate with the engine service
|
/// The handle to communicate with the engine service
|
||||||
engine: BeaconConsensusEngineHandle<HlPayloadTypes>,
|
engine: ConsensusEngineHandle<HlPayloadTypes>,
|
||||||
/// The consensus implementation
|
/// The consensus implementation
|
||||||
consensus: Arc<HlConsensus<Provider>>,
|
consensus: Arc<HlConsensus<Provider>>,
|
||||||
/// Receive the new block from the network
|
/// Receive the new block from the network
|
||||||
@ -73,7 +73,7 @@ where
|
|||||||
/// Create a new block import service
|
/// Create a new block import service
|
||||||
pub fn new(
|
pub fn new(
|
||||||
consensus: Arc<HlConsensus<Provider>>,
|
consensus: Arc<HlConsensus<Provider>>,
|
||||||
engine: BeaconConsensusEngineHandle<HlPayloadTypes>,
|
engine: ConsensusEngineHandle<HlPayloadTypes>,
|
||||||
from_network: UnboundedReceiver<IncomingBlock>,
|
from_network: UnboundedReceiver<IncomingBlock>,
|
||||||
to_network: UnboundedSender<ImportEvent>,
|
to_network: UnboundedSender<ImportEvent>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -89,7 +89,6 @@ where
|
|||||||
/// Process a new payload and return the outcome
|
/// Process a new payload and return the outcome
|
||||||
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
|
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
|
||||||
let engine = self.engine.clone();
|
let engine = self.engine.clone();
|
||||||
|
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let sealed_block = block.block.0.block.clone().seal();
|
let sealed_block = block.block.0.block.clone().seal();
|
||||||
let payload = HlPayloadTypes::block_to_payload(sealed_block);
|
let payload = HlPayloadTypes::block_to_payload(sealed_block);
|
||||||
@ -107,7 +106,7 @@ where
|
|||||||
.into(),
|
.into(),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
Err(err) => None,
|
Err(_) => None,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -117,15 +116,10 @@ where
|
|||||||
let engine = self.engine.clone();
|
let engine = self.engine.clone();
|
||||||
let consensus = self.consensus.clone();
|
let consensus = self.consensus.clone();
|
||||||
let sealed_block = block.block.0.block.clone().seal();
|
let sealed_block = block.block.0.block.clone().seal();
|
||||||
let hash = sealed_block.hash();
|
let (hash, number) = (sealed_block.hash(), sealed_block.number());
|
||||||
let number = sealed_block.number();
|
|
||||||
|
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (head_block_hash, current_hash) = match consensus.canonical_head(hash, number) {
|
let (head_block_hash, _) = consensus.canonical_head(hash, number).ok()?;
|
||||||
Ok(hash) => hash,
|
|
||||||
Err(_) => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let state = ForkchoiceState {
|
let state = ForkchoiceState {
|
||||||
head_block_hash,
|
head_block_hash,
|
||||||
safe_block_hash: head_block_hash,
|
safe_block_hash: head_block_hash,
|
||||||
@ -146,18 +140,15 @@ where
|
|||||||
.into(),
|
.into(),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
Err(err) => None,
|
Err(_) => None,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new block import task to the pending imports
|
/// Add a new block import task to the pending imports
|
||||||
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
|
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
|
||||||
let payload_fut = self.new_payload(block.clone(), peer_id);
|
self.pending_imports.push(self.new_payload(block.clone(), peer_id));
|
||||||
self.pending_imports.push(payload_fut);
|
self.pending_imports.push(self.update_fork_choice(block, peer_id));
|
||||||
|
|
||||||
let fcu_fut = self.update_fork_choice(block, peer_id);
|
|
||||||
self.pending_imports.push(fcu_fut);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,11 +167,9 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process completed imports and send events to network
|
// Process completed imports and send events to network
|
||||||
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
|
while let Poll::Ready(Some(Some(outcome))) = this.pending_imports.poll_next_unpin(cx) {
|
||||||
if let Some(outcome) = outcome {
|
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
|
||||||
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
|
return Poll::Ready(Err(Box::new(e)));
|
||||||
return Poll::Ready(Err(Box::new(e)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,22 +177,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn collect_block(height: u64) -> Option<BlockAndReceipts> {
|
|
||||||
let ingest_dir = "/home/user/personal/evm-blocks";
|
|
||||||
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
|
||||||
let s = ((height - 1) / 1_000) * 1_000;
|
|
||||||
let path = format!("{ingest_dir}/{f}/{s}/{height}.rmp.lz4");
|
|
||||||
if std::path::Path::new(&path).exists() {
|
|
||||||
let file = std::fs::File::open(path).unwrap();
|
|
||||||
let file = std::io::BufReader::new(file);
|
|
||||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(file);
|
|
||||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder).unwrap();
|
|
||||||
Some(blocks[0].clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::chainspec::hl::hl_mainnet;
|
use crate::chainspec::hl::hl_mainnet;
|
||||||
@ -277,15 +250,12 @@ mod tests {
|
|||||||
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
|
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn best_block_number(&self) -> Result<u64, ProviderError> {
|
fn best_block_number(&self) -> Result<u64, ProviderError> {
|
||||||
Ok(0)
|
Ok(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_block_number(&self) -> Result<u64, ProviderError> {
|
fn last_block_number(&self) -> Result<u64, ProviderError> {
|
||||||
Ok(0)
|
Ok(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
|
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@ -295,7 +265,6 @@ mod tests {
|
|||||||
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
|
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
|
||||||
Ok(Some(B256::ZERO))
|
Ok(Some(B256::ZERO))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn canonical_hashes_range(
|
fn canonical_hashes_range(
|
||||||
&self,
|
&self,
|
||||||
_start: u64,
|
_start: u64,
|
||||||
@ -315,14 +284,12 @@ mod tests {
|
|||||||
fn both_valid() -> Self {
|
fn both_valid() -> Self {
|
||||||
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
|
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn invalid_new_payload() -> Self {
|
fn invalid_new_payload() -> Self {
|
||||||
Self {
|
Self {
|
||||||
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
|
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
|
||||||
fcu: PayloadStatusEnum::Valid,
|
fcu: PayloadStatusEnum::Valid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn invalid_fcu() -> Self {
|
fn invalid_fcu() -> Self {
|
||||||
Self {
|
Self {
|
||||||
new_payload: PayloadStatusEnum::Valid,
|
new_payload: PayloadStatusEnum::Valid,
|
||||||
@ -341,20 +308,16 @@ mod tests {
|
|||||||
async fn new(responses: EngineResponses) -> Self {
|
async fn new(responses: EngineResponses) -> Self {
|
||||||
let consensus = Arc::new(HlConsensus { provider: MockProvider });
|
let consensus = Arc::new(HlConsensus { provider: MockProvider });
|
||||||
let (to_engine, from_engine) = mpsc::unbounded_channel();
|
let (to_engine, from_engine) = mpsc::unbounded_channel();
|
||||||
let engine_handle = BeaconConsensusEngineHandle::new(to_engine);
|
let engine_handle = ConsensusEngineHandle::new(to_engine);
|
||||||
|
|
||||||
handle_engine_msg(from_engine, responses).await;
|
handle_engine_msg(from_engine, responses).await;
|
||||||
|
|
||||||
let (to_import, from_network) = mpsc::unbounded_channel();
|
let (to_import, from_network) = mpsc::unbounded_channel();
|
||||||
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let handle = ImportHandle::new(to_import, import_outcome);
|
let handle = ImportHandle::new(to_import, import_outcome);
|
||||||
|
|
||||||
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
|
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
|
||||||
tokio::spawn(Box::pin(async move {
|
tokio::spawn(Box::pin(async move {
|
||||||
service.await.unwrap();
|
service.await.unwrap();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
Self { handle }
|
Self { handle }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -12,14 +12,13 @@ use crate::{
|
|||||||
HlBlock,
|
HlBlock,
|
||||||
};
|
};
|
||||||
use alloy_rlp::{Decodable, Encodable};
|
use alloy_rlp::{Decodable, Encodable};
|
||||||
// use handshake::HlHandshake;
|
|
||||||
use reth::{
|
use reth::{
|
||||||
api::{FullNodeTypes, TxTy},
|
api::{FullNodeTypes, TxTy},
|
||||||
builder::{components::NetworkBuilder, BuilderContext},
|
builder::{components::NetworkBuilder, BuilderContext},
|
||||||
transaction_pool::{PoolTransaction, TransactionPool},
|
transaction_pool::{PoolTransaction, TransactionPool},
|
||||||
};
|
};
|
||||||
use reth_discv4::NodeRecord;
|
use reth_discv4::NodeRecord;
|
||||||
use reth_engine_primitives::BeaconConsensusEngineHandle;
|
use reth_engine_primitives::ConsensusEngineHandle;
|
||||||
use reth_eth_wire::{BasicNetworkPrimitives, NewBlock, NewBlockPayload};
|
use reth_eth_wire::{BasicNetworkPrimitives, NewBlock, NewBlockPayload};
|
||||||
use reth_ethereum_primitives::PooledTransactionVariant;
|
use reth_ethereum_primitives::PooledTransactionVariant;
|
||||||
use reth_network::{NetworkConfig, NetworkHandle, NetworkManager};
|
use reth_network::{NetworkConfig, NetworkHandle, NetworkManager};
|
||||||
@ -31,8 +30,7 @@ use tokio::sync::{mpsc, oneshot, Mutex};
|
|||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
pub mod block_import;
|
pub mod block_import;
|
||||||
// pub mod handshake;
|
|
||||||
// pub(crate) mod upgrade_status;
|
|
||||||
/// HL `NewBlock` message value.
|
/// HL `NewBlock` message value.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct HlNewBlock(pub NewBlock<HlBlock>);
|
pub struct HlNewBlock(pub NewBlock<HlBlock>);
|
||||||
@ -70,32 +68,22 @@ mod rlp {
|
|||||||
|
|
||||||
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
|
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
|
||||||
fn from(value: &'a HlNewBlock) -> Self {
|
fn from(value: &'a HlNewBlock) -> Self {
|
||||||
let HlNewBlock(NewBlock {
|
let b = &value.0.block;
|
||||||
block:
|
|
||||||
HlBlock {
|
|
||||||
header,
|
|
||||||
body:
|
|
||||||
HlBlockBody {
|
|
||||||
inner: BlockBody { transactions, ommers, withdrawals },
|
|
||||||
sidecars,
|
|
||||||
read_precompile_calls,
|
|
||||||
highest_precompile_address,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
td,
|
|
||||||
}) = value;
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
block: BlockHelper {
|
block: BlockHelper {
|
||||||
header: Cow::Borrowed(header),
|
header: Cow::Borrowed(&b.header),
|
||||||
transactions: Cow::Borrowed(transactions),
|
transactions: Cow::Borrowed(&b.body.inner.transactions),
|
||||||
ommers: Cow::Borrowed(ommers),
|
ommers: Cow::Borrowed(&b.body.inner.ommers),
|
||||||
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
|
withdrawals: b.body.inner.withdrawals.as_ref().map(Cow::Borrowed),
|
||||||
},
|
},
|
||||||
td: *td,
|
td: value.0.td,
|
||||||
sidecars: sidecars.as_ref().map(Cow::Borrowed),
|
sidecars: b.body.sidecars.as_ref().map(Cow::Borrowed),
|
||||||
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
|
read_precompile_calls: b.body.read_precompile_calls.as_ref().map(Cow::Borrowed),
|
||||||
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
|
highest_precompile_address: b
|
||||||
|
.body
|
||||||
|
.highest_precompile_address
|
||||||
|
.as_ref()
|
||||||
|
.map(Cow::Borrowed),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,30 +100,24 @@ mod rlp {
|
|||||||
|
|
||||||
impl Decodable for HlNewBlock {
|
impl Decodable for HlNewBlock {
|
||||||
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
||||||
let HlNewBlockHelper {
|
let h = HlNewBlockHelper::decode(buf)?;
|
||||||
block: BlockHelper { header, transactions, ommers, withdrawals },
|
|
||||||
td,
|
|
||||||
sidecars,
|
|
||||||
read_precompile_calls,
|
|
||||||
highest_precompile_address,
|
|
||||||
} = HlNewBlockHelper::decode(buf)?;
|
|
||||||
|
|
||||||
Ok(HlNewBlock(NewBlock {
|
Ok(HlNewBlock(NewBlock {
|
||||||
block: HlBlock {
|
block: HlBlock {
|
||||||
header: header.into_owned(),
|
header: h.block.header.into_owned(),
|
||||||
body: HlBlockBody {
|
body: HlBlockBody {
|
||||||
inner: BlockBody {
|
inner: BlockBody {
|
||||||
transactions: transactions.into_owned(),
|
transactions: h.block.transactions.into_owned(),
|
||||||
ommers: ommers.into_owned(),
|
ommers: h.block.ommers.into_owned(),
|
||||||
withdrawals: withdrawals.map(|w| w.into_owned()),
|
withdrawals: h.block.withdrawals.map(|w| w.into_owned()),
|
||||||
},
|
},
|
||||||
sidecars: sidecars.map(|s| s.into_owned()),
|
sidecars: h.sidecars.map(|s| s.into_owned()),
|
||||||
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
|
read_precompile_calls: h.read_precompile_calls.map(|s| s.into_owned()),
|
||||||
highest_precompile_address: highest_precompile_address
|
highest_precompile_address: h
|
||||||
|
.highest_precompile_address
|
||||||
.map(|s| s.into_owned()),
|
.map(|s| s.into_owned()),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
td,
|
td: h.td,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -157,7 +139,7 @@ pub type HlNetworkPrimitives =
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct HlNetworkBuilder {
|
pub struct HlNetworkBuilder {
|
||||||
pub(crate) engine_handle_rx:
|
pub(crate) engine_handle_rx:
|
||||||
Arc<Mutex<Option<oneshot::Receiver<BeaconConsensusEngineHandle<HlPayloadTypes>>>>>,
|
Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
|
||||||
|
|
||||||
pub(crate) block_source_config: BlockSourceConfig,
|
pub(crate) block_source_config: BlockSourceConfig,
|
||||||
}
|
}
|
||||||
@ -173,41 +155,32 @@ impl HlNetworkBuilder {
|
|||||||
where
|
where
|
||||||
Node: FullNodeTypes<Types = HlNode>,
|
Node: FullNodeTypes<Types = HlNode>,
|
||||||
{
|
{
|
||||||
let Self { engine_handle_rx, .. } = self;
|
|
||||||
|
|
||||||
let network_builder = ctx.network_config_builder()?;
|
|
||||||
|
|
||||||
let (to_import, from_network) = mpsc::unbounded_channel();
|
let (to_import, from_network) = mpsc::unbounded_channel();
|
||||||
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let handle = ImportHandle::new(to_import, import_outcome);
|
let handle = ImportHandle::new(to_import, import_outcome);
|
||||||
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
|
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
|
||||||
|
|
||||||
ctx.task_executor().spawn_critical("block import", async move {
|
ctx.task_executor().spawn_critical("block import", async move {
|
||||||
let handle = engine_handle_rx
|
let handle = self
|
||||||
|
.engine_handle_rx
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
.take()
|
.take()
|
||||||
.expect("node should only be launched once")
|
.expect("node should only be launched once")
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
|
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
|
||||||
});
|
});
|
||||||
|
|
||||||
let network_builder = network_builder
|
Ok(ctx.build_network_config(
|
||||||
.disable_dns_discovery()
|
ctx.network_config_builder()?
|
||||||
.disable_nat()
|
.disable_dns_discovery()
|
||||||
.boot_nodes(boot_nodes())
|
.disable_nat()
|
||||||
.set_head(ctx.head())
|
.boot_nodes(boot_nodes())
|
||||||
.with_pow()
|
.set_head(ctx.head())
|
||||||
.block_import(Box::new(HlBlockImport::new(handle)));
|
.with_pow()
|
||||||
// .discovery(discv4)
|
.block_import(Box::new(HlBlockImport::new(handle))),
|
||||||
// .eth_rlpx_handshake(Arc::new(HlHandshake::default()));
|
))
|
||||||
|
|
||||||
let network_config = ctx.build_network_config(network_builder);
|
|
||||||
|
|
||||||
Ok(network_config)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,23 +203,29 @@ where
|
|||||||
pool: Pool,
|
pool: Pool,
|
||||||
) -> eyre::Result<Self::Network> {
|
) -> eyre::Result<Self::Network> {
|
||||||
let block_source_config = self.block_source_config.clone();
|
let block_source_config = self.block_source_config.clone();
|
||||||
let network_config = self.network_config(ctx)?;
|
let handle =
|
||||||
let network = NetworkManager::builder(network_config).await?;
|
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
|
||||||
let handle = ctx.start_network(network, pool);
|
|
||||||
let local_node_record = handle.local_node_record();
|
let local_node_record = handle.local_node_record();
|
||||||
let chain_spec = ctx.chain_spec();
|
|
||||||
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
|
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
|
||||||
|
|
||||||
let next_block_number =
|
let next_block_number = ctx
|
||||||
ctx.provider().get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number
|
.provider()
|
||||||
+ 1;
|
.get_stage_checkpoint(StageId::Finish)?
|
||||||
|
.unwrap_or_default()
|
||||||
|
.block_number +
|
||||||
|
1;
|
||||||
|
|
||||||
|
let chain_spec = ctx.chain_spec();
|
||||||
ctx.task_executor().spawn_critical("pseudo peer", async move {
|
ctx.task_executor().spawn_critical("pseudo peer", async move {
|
||||||
let block_source =
|
start_pseudo_peer(
|
||||||
block_source_config.create_cached_block_source(next_block_number).await;
|
chain_spec.clone(),
|
||||||
start_pseudo_peer(chain_spec, local_node_record.to_string(), block_source)
|
local_node_record.to_string(),
|
||||||
.await
|
block_source_config
|
||||||
.unwrap();
|
.create_cached_block_source((*chain_spec).clone(), next_block_number)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(handle)
|
Ok(handle)
|
||||||
|
|||||||
282
src/node/pool.rs
282
src/node/pool.rs
@ -10,43 +10,30 @@ use crate::node::{primitives::TransactionSigned, HlNode};
|
|||||||
use alloy_consensus::{
|
use alloy_consensus::{
|
||||||
error::ValueError, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844,
|
error::ValueError, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844,
|
||||||
};
|
};
|
||||||
use alloy_eips::{
|
use alloy_eips::{eip7702::SignedAuthorization, Typed2718};
|
||||||
eip4844::BlobAndProofV2, eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization,
|
|
||||||
Typed2718,
|
|
||||||
};
|
|
||||||
use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256};
|
use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256};
|
||||||
use alloy_rpc_types::AccessList;
|
use alloy_rpc_types::AccessList;
|
||||||
use alloy_rpc_types_engine::BlobAndProofV1;
|
|
||||||
use reth::{
|
use reth::{
|
||||||
api::FullNodeTypes,
|
api::FullNodeTypes, builder::components::PoolBuilder, transaction_pool::PoolTransaction,
|
||||||
builder::components::PoolBuilder,
|
|
||||||
transaction_pool::{PoolResult, PoolSize, PoolTransaction, TransactionOrigin, TransactionPool},
|
|
||||||
};
|
};
|
||||||
use reth_eth_wire::HandleMempoolData;
|
|
||||||
use reth_ethereum_primitives::PooledTransactionVariant;
|
use reth_ethereum_primitives::PooledTransactionVariant;
|
||||||
use reth_primitives::Recovered;
|
use reth_primitives::Recovered;
|
||||||
use reth_primitives_traits::InMemorySize;
|
use reth_primitives_traits::InMemorySize;
|
||||||
use reth_transaction_pool::{
|
use reth_transaction_pool::{noop::NoopTransactionPool, EthPoolTransaction};
|
||||||
error::InvalidPoolTransactionError, AllPoolTransactions, AllTransactionsEvents,
|
use std::sync::Arc;
|
||||||
BestTransactions, BestTransactionsAttributes, BlobStoreError, BlockInfo, EthPoolTransaction,
|
|
||||||
GetPooledTransactionLimit, NewBlobSidecar, NewTransactionEvent, PropagatedTransactions,
|
|
||||||
TransactionEvents, TransactionListenerKind, ValidPoolTransaction,
|
|
||||||
};
|
|
||||||
use std::{collections::HashSet, sync::Arc};
|
|
||||||
use tokio::sync::mpsc::{self, Receiver};
|
|
||||||
|
|
||||||
pub struct HlPoolBuilder;
|
pub struct HlPoolBuilder;
|
||||||
impl<Node> PoolBuilder<Node> for HlPoolBuilder
|
impl<Node> PoolBuilder<Node> for HlPoolBuilder
|
||||||
where
|
where
|
||||||
Node: FullNodeTypes<Types = HlNode>,
|
Node: FullNodeTypes<Types = HlNode>,
|
||||||
{
|
{
|
||||||
type Pool = HlTransactionPool;
|
type Pool = NoopTransactionPool<HlPooledTransaction>;
|
||||||
|
|
||||||
async fn build_pool(
|
async fn build_pool(
|
||||||
self,
|
self,
|
||||||
_ctx: &reth::builder::BuilderContext<Node>,
|
_ctx: &reth::builder::BuilderContext<Node>,
|
||||||
) -> eyre::Result<Self::Pool> {
|
) -> eyre::Result<Self::Pool> {
|
||||||
Ok(HlTransactionPool)
|
Ok(NoopTransactionPool::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,16 +111,6 @@ impl PoolTransaction for HlPooledTransaction {
|
|||||||
type Consensus = TransactionSigned;
|
type Consensus = TransactionSigned;
|
||||||
type Pooled = PooledTransactionVariant;
|
type Pooled = PooledTransactionVariant;
|
||||||
|
|
||||||
fn try_from_consensus(
|
|
||||||
_tx: Recovered<Self::Consensus>,
|
|
||||||
) -> Result<Self, Self::TryFromConsensusError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clone_into_consensus(&self) -> Recovered<Self::Consensus> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_consensus(self) -> Recovered<Self::Consensus> {
|
fn into_consensus(self) -> Recovered<Self::Consensus> {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
@ -161,13 +138,6 @@ impl PoolTransaction for HlPooledTransaction {
|
|||||||
fn encoded_length(&self) -> usize {
|
fn encoded_length(&self) -> usize {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ensure_max_init_code_size(
|
|
||||||
&self,
|
|
||||||
_max_init_code_size: usize,
|
|
||||||
) -> Result<(), InvalidPoolTransactionError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthPoolTransaction for HlPooledTransaction {
|
impl EthPoolTransaction for HlPooledTransaction {
|
||||||
@ -197,243 +167,3 @@ impl EthPoolTransaction for HlPooledTransaction {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
|
||||||
pub struct HlTransactionPool;
|
|
||||||
impl TransactionPool for HlTransactionPool {
|
|
||||||
type Transaction = HlPooledTransaction;
|
|
||||||
|
|
||||||
fn pool_size(&self) -> PoolSize {
|
|
||||||
PoolSize::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_info(&self) -> BlockInfo {
|
|
||||||
BlockInfo::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_transaction_and_subscribe(
|
|
||||||
&self,
|
|
||||||
_origin: TransactionOrigin,
|
|
||||||
_transaction: Self::Transaction,
|
|
||||||
) -> PoolResult<TransactionEvents> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_transaction(
|
|
||||||
&self,
|
|
||||||
_origin: TransactionOrigin,
|
|
||||||
_transaction: Self::Transaction,
|
|
||||||
) -> PoolResult<TxHash> {
|
|
||||||
Ok(TxHash::default())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_transactions(
|
|
||||||
&self,
|
|
||||||
_origin: TransactionOrigin,
|
|
||||||
_transactions: Vec<Self::Transaction>,
|
|
||||||
) -> Vec<PoolResult<TxHash>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction_event_listener(&self, _tx_hash: TxHash) -> Option<TransactionEvents> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn all_transactions_event_listener(&self) -> AllTransactionsEvents<Self::Transaction> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pending_transactions_listener_for(
|
|
||||||
&self,
|
|
||||||
_kind: TransactionListenerKind,
|
|
||||||
) -> Receiver<TxHash> {
|
|
||||||
mpsc::channel(1).1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blob_transaction_sidecars_listener(&self) -> Receiver<NewBlobSidecar> {
|
|
||||||
mpsc::channel(1).1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_transactions_listener_for(
|
|
||||||
&self,
|
|
||||||
_kind: TransactionListenerKind,
|
|
||||||
) -> Receiver<NewTransactionEvent<Self::Transaction>> {
|
|
||||||
mpsc::channel(1).1
|
|
||||||
}
|
|
||||||
fn pooled_transaction_hashes(&self) -> Vec<TxHash> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn pooled_transaction_hashes_max(&self, _max: usize) -> Vec<TxHash> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn pooled_transactions_max(
|
|
||||||
&self,
|
|
||||||
_max: usize,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn get_pooled_transaction_elements(
|
|
||||||
&self,
|
|
||||||
_tx_hashes: Vec<TxHash>,
|
|
||||||
_limit: GetPooledTransactionLimit,
|
|
||||||
) -> Vec<<Self::Transaction as PoolTransaction>::Pooled> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn get_pooled_transaction_element(
|
|
||||||
&self,
|
|
||||||
_tx_hash: TxHash,
|
|
||||||
) -> Option<Recovered<<Self::Transaction as PoolTransaction>::Pooled>> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn best_transactions(
|
|
||||||
&self,
|
|
||||||
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
|
|
||||||
Box::new(std::iter::empty())
|
|
||||||
}
|
|
||||||
fn best_transactions_with_attributes(
|
|
||||||
&self,
|
|
||||||
_best_transactions_attributes: BestTransactionsAttributes,
|
|
||||||
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
|
|
||||||
Box::new(std::iter::empty())
|
|
||||||
}
|
|
||||||
fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn pending_transactions_max(
|
|
||||||
&self,
|
|
||||||
_max: usize,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn all_transactions(&self) -> AllPoolTransactions<Self::Transaction> {
|
|
||||||
AllPoolTransactions::default()
|
|
||||||
}
|
|
||||||
fn remove_transactions(
|
|
||||||
&self,
|
|
||||||
_hashes: Vec<TxHash>,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn remove_transactions_and_descendants(
|
|
||||||
&self,
|
|
||||||
_hashes: Vec<TxHash>,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn remove_transactions_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn retain_unknown<A>(&self, _announcement: &mut A)
|
|
||||||
where
|
|
||||||
A: HandleMempoolData,
|
|
||||||
{
|
|
||||||
// do nothing
|
|
||||||
}
|
|
||||||
fn get(&self, _tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn get_all(&self, _txs: Vec<TxHash>) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn on_propagated(&self, _txs: PropagatedTransactions) {
|
|
||||||
// do nothing
|
|
||||||
}
|
|
||||||
fn get_transactions_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn get_pending_transactions_with_predicate(
|
|
||||||
&self,
|
|
||||||
_predicate: impl FnMut(&ValidPoolTransaction<Self::Transaction>) -> bool,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn get_pending_transactions_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
fn get_queued_transactions_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_highest_transaction_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn get_highest_consecutive_transaction_by_sender(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
_on_chain_nonce: u64,
|
|
||||||
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn get_transaction_by_sender_and_nonce(
|
|
||||||
&self,
|
|
||||||
_sender: Address,
|
|
||||||
_nonce: u64,
|
|
||||||
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn get_transactions_by_origin(
|
|
||||||
&self,
|
|
||||||
_origin: TransactionOrigin,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_pending_transactions_by_origin(
|
|
||||||
&self,
|
|
||||||
_origin: TransactionOrigin,
|
|
||||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn unique_senders(&self) -> HashSet<Address> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_blob(
|
|
||||||
&self,
|
|
||||||
_tx_hash: TxHash,
|
|
||||||
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_all_blobs(
|
|
||||||
&self,
|
|
||||||
_tx_hashes: Vec<TxHash>,
|
|
||||||
) -> Result<Vec<(TxHash, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_all_blobs_exact(
|
|
||||||
&self,
|
|
||||||
_tx_hashes: Vec<TxHash>,
|
|
||||||
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_blobs_for_versioned_hashes_v1(
|
|
||||||
&self,
|
|
||||||
_versioned_hashes: &[B256],
|
|
||||||
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
fn get_blobs_for_versioned_hashes_v2(
|
|
||||||
&self,
|
|
||||||
_versioned_hashes: &[B256],
|
|
||||||
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -68,19 +68,15 @@ impl BlockBodyTrait for HlBlockBody {
|
|||||||
fn transactions(&self) -> &[Self::Transaction] {
|
fn transactions(&self) -> &[Self::Transaction] {
|
||||||
BlockBodyTrait::transactions(&self.inner)
|
BlockBodyTrait::transactions(&self.inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_ethereum_body(self) -> BlockBody {
|
fn into_ethereum_body(self) -> BlockBody {
|
||||||
self.inner
|
self.inner
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_transactions(self) -> Vec<Self::Transaction> {
|
fn into_transactions(self) -> Vec<Self::Transaction> {
|
||||||
self.inner.into_transactions()
|
self.inner.into_transactions()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
|
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
|
||||||
self.inner.withdrawals()
|
self.inner.withdrawals()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
|
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
|
||||||
self.inner.ommers()
|
self.inner.ommers()
|
||||||
}
|
}
|
||||||
@ -116,15 +112,12 @@ impl Block for HlBlock {
|
|||||||
fn new(header: Self::Header, body: Self::Body) -> Self {
|
fn new(header: Self::Header, body: Self::Body) -> Self {
|
||||||
Self { header, body }
|
Self { header, body }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn header(&self) -> &Self::Header {
|
fn header(&self) -> &Self::Header {
|
||||||
&self.header
|
&self.header
|
||||||
}
|
}
|
||||||
|
|
||||||
fn body(&self) -> &Self::Body {
|
fn body(&self) -> &Self::Body {
|
||||||
&self.body
|
&self.body
|
||||||
}
|
}
|
||||||
|
|
||||||
fn split(self) -> (Self::Header, Self::Body) {
|
fn split(self) -> (Self::Header, Self::Body) {
|
||||||
(self.header, self.body)
|
(self.header, self.body)
|
||||||
}
|
}
|
||||||
@ -179,7 +172,6 @@ mod rlp {
|
|||||||
read_precompile_calls,
|
read_precompile_calls,
|
||||||
highest_precompile_address,
|
highest_precompile_address,
|
||||||
} = value;
|
} = value;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
transactions: Cow::Borrowed(transactions),
|
transactions: Cow::Borrowed(transactions),
|
||||||
ommers: Cow::Borrowed(ommers),
|
ommers: Cow::Borrowed(ommers),
|
||||||
@ -203,7 +195,6 @@ mod rlp {
|
|||||||
highest_precompile_address,
|
highest_precompile_address,
|
||||||
},
|
},
|
||||||
} = value;
|
} = value;
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
header: Cow::Borrowed(header),
|
header: Cow::Borrowed(header),
|
||||||
transactions: Cow::Borrowed(transactions),
|
transactions: Cow::Borrowed(transactions),
|
||||||
@ -220,7 +211,6 @@ mod rlp {
|
|||||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||||
BlockBodyHelper::from(self).encode(out);
|
BlockBodyHelper::from(self).encode(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn length(&self) -> usize {
|
fn length(&self) -> usize {
|
||||||
BlockBodyHelper::from(self).length()
|
BlockBodyHelper::from(self).length()
|
||||||
}
|
}
|
||||||
@ -253,7 +243,6 @@ mod rlp {
|
|||||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||||
BlockHelper::from(self).encode(out);
|
BlockHelper::from(self).encode(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn length(&self) -> usize {
|
fn length(&self) -> usize {
|
||||||
BlockHelper::from(self).length()
|
BlockHelper::from(self).length()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,11 +1,12 @@
|
|||||||
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
|
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
|
||||||
//! except that it supports pseudo signer for system transactions.
|
//! except that it supports pseudo signer for system transactions.
|
||||||
use alloy_consensus::{
|
use alloy_consensus::{
|
||||||
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, SignableTransaction, Signed,
|
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, EthereumTypedTransaction,
|
||||||
Transaction as TransactionTrait, TransactionEnvelope, TxEip1559, TxEip2930, TxEip4844,
|
SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559,
|
||||||
TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction,
|
TxEip2930, TxEip4844, TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction,
|
||||||
};
|
};
|
||||||
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718};
|
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718};
|
||||||
|
use alloy_network::TxSigner;
|
||||||
use alloy_primitives::{address, Address, TxHash, U256};
|
use alloy_primitives::{address, Address, TxHash, U256};
|
||||||
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
|
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
|
||||||
use alloy_signer::Signature;
|
use alloy_signer::Signature;
|
||||||
@ -21,7 +22,7 @@ use reth_primitives_traits::{
|
|||||||
};
|
};
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
transaction::{FromConsensusTx, TryIntoTxEnv},
|
transaction::{FromConsensusTx, TryIntoTxEnv},
|
||||||
EthTxEnvError, TryIntoSimTx,
|
EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx,
|
||||||
};
|
};
|
||||||
use revm::context::{BlockEnv, CfgEnv, TxEnv};
|
use revm::context::{BlockEnv, CfgEnv, TxEnv};
|
||||||
|
|
||||||
@ -59,22 +60,19 @@ impl SignerRecoverable for TransactionSigned {
|
|||||||
}
|
}
|
||||||
self.inner().recover_signer_unchecked()
|
self.inner().recover_signer_unchecked()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn recover_unchecked_with_buf(&self, buf: &mut Vec<u8>) -> Result<Address, RecoveryError> {
|
||||||
|
if self.is_system_transaction() {
|
||||||
|
return Ok(s_to_address(self.signature().s()));
|
||||||
|
}
|
||||||
|
self.inner().recover_unchecked_with_buf(buf)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SignedTransaction for TransactionSigned {
|
impl SignedTransaction for TransactionSigned {
|
||||||
fn tx_hash(&self) -> &TxHash {
|
fn tx_hash(&self) -> &TxHash {
|
||||||
self.inner().tx_hash()
|
self.inner().tx_hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recover_signer_unchecked_with_buf(
|
|
||||||
&self,
|
|
||||||
buf: &mut Vec<u8>,
|
|
||||||
) -> Result<Address, RecoveryError> {
|
|
||||||
if self.is_system_transaction() {
|
|
||||||
return Ok(s_to_address(self.signature().s()));
|
|
||||||
}
|
|
||||||
self.inner().recover_signer_unchecked_with_buf(buf)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@ -116,11 +114,6 @@ impl reth_codecs::Compact for TransactionSigned {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
|
|
||||||
let (tx, signer) = value.into_parts();
|
|
||||||
Recovered::new_unchecked(tx.into_inner(), signer)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromRecoveredTx<TransactionSigned> for TxEnv {
|
impl FromRecoveredTx<TransactionSigned> for TxEnv {
|
||||||
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
|
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
|
||||||
TxEnv::from_recovered_tx(&tx.inner(), sender)
|
TxEnv::from_recovered_tx(&tx.inner(), sender)
|
||||||
@ -194,20 +187,6 @@ impl SerdeBincodeCompat for TransactionSigned {
|
|||||||
|
|
||||||
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
|
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
|
||||||
|
|
||||||
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
|
|
||||||
fn from(value: TransactionSigned) -> Self {
|
|
||||||
value.into_inner()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
|
|
||||||
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
|
|
||||||
|
|
||||||
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
|
|
||||||
value.into_inner().try_into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<TransactionSigned>
|
impl TryFrom<TransactionSigned>
|
||||||
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
|
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
|
||||||
{
|
{
|
||||||
@ -296,3 +275,32 @@ impl FromConsensusTx<TransactionSigned> for Transaction {
|
|||||||
Self::from_transaction(Recovered::new_unchecked(tx.into_inner().into(), signer), tx_info)
|
Self::from_transaction(Recovered::new_unchecked(tx.into_inner().into(), signer), tx_info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SignableTxRequest<TransactionSigned> for TransactionRequest {
|
||||||
|
async fn try_build_and_sign(
|
||||||
|
self,
|
||||||
|
signer: impl TxSigner<Signature> + Send,
|
||||||
|
) -> Result<TransactionSigned, SignTxRequestError> {
|
||||||
|
let mut tx =
|
||||||
|
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
|
||||||
|
let signature = signer.sign_transaction(&mut tx).await?;
|
||||||
|
let signed = match tx {
|
||||||
|
EthereumTypedTransaction::Legacy(tx) => {
|
||||||
|
EthereumTxEnvelope::Legacy(tx.into_signed(signature))
|
||||||
|
}
|
||||||
|
EthereumTypedTransaction::Eip2930(tx) => {
|
||||||
|
EthereumTxEnvelope::Eip2930(tx.into_signed(signature))
|
||||||
|
}
|
||||||
|
EthereumTypedTransaction::Eip1559(tx) => {
|
||||||
|
EthereumTxEnvelope::Eip1559(tx.into_signed(signature))
|
||||||
|
}
|
||||||
|
EthereumTypedTransaction::Eip4844(tx) => {
|
||||||
|
EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature))
|
||||||
|
}
|
||||||
|
EthereumTypedTransaction::Eip7702(tx) => {
|
||||||
|
EthereumTxEnvelope::Eip7702(tx.into_signed(signature))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(TransactionSigned::Default(signed))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -1,263 +1,56 @@
|
|||||||
use std::{future::Future, sync::Arc};
|
use crate::node::rpc::HlEthApi;
|
||||||
|
use reth::rpc::server_types::eth::{
|
||||||
use crate::{
|
builder::config::PendingBlockKind, error::FromEvmError, EthApiError, PendingBlock,
|
||||||
chainspec::HlChainSpec,
|
|
||||||
node::{
|
|
||||||
primitives::TransactionSigned,
|
|
||||||
rpc::{HlEthApi, HlNodeCore},
|
|
||||||
},
|
|
||||||
HlBlock,
|
|
||||||
};
|
|
||||||
use alloy_consensus::{BlockHeader, ReceiptEnvelope, TxType};
|
|
||||||
use alloy_primitives::B256;
|
|
||||||
use reth::{
|
|
||||||
api::NodeTypes,
|
|
||||||
builder::FullNodeComponents,
|
|
||||||
primitives::{Receipt, SealedHeader, TransactionMeta},
|
|
||||||
providers::{BlockReaderIdExt, ProviderHeader, ReceiptProvider, TransactionsProvider},
|
|
||||||
rpc::{
|
|
||||||
eth::EthApiTypes,
|
|
||||||
server_types::eth::{
|
|
||||||
error::FromEvmError, receipt::build_receipt, EthApiError, PendingBlock,
|
|
||||||
},
|
|
||||||
types::{BlockId, TransactionReceipt},
|
|
||||||
},
|
|
||||||
transaction_pool::{PoolTransaction, TransactionPool},
|
|
||||||
};
|
|
||||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
|
||||||
use reth_evm::{ConfigureEvm, NextBlockEnvAttributes};
|
|
||||||
use reth_primitives::{NodePrimitives, SealedBlock};
|
|
||||||
use reth_primitives_traits::{BlockBody as _, RecoveredBlock, SignedTransaction as _};
|
|
||||||
use reth_provider::{
|
|
||||||
BlockIdReader, BlockReader, ChainSpecProvider, HeaderProvider, ProviderBlock, ProviderReceipt,
|
|
||||||
ProviderTx, StateProviderFactory,
|
|
||||||
};
|
};
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking},
|
helpers::{
|
||||||
types::RpcTypes,
|
pending_block::PendingEnvBuilder, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt,
|
||||||
FromEthApiError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, RpcReceipt,
|
},
|
||||||
|
RpcConvert, RpcNodeCore,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn is_system_tx(tx: &TransactionSigned) -> bool {
|
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
|
||||||
tx.is_system_transaction()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> EthBlocks for HlEthApi<N>
|
|
||||||
where
|
where
|
||||||
Self: LoadBlock<
|
N: RpcNodeCore,
|
||||||
Error = EthApiError,
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
NetworkTypes: RpcTypes<Receipt = TransactionReceipt>,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
Provider: BlockReader<Transaction = TransactionSigned, Receipt = Receipt>,
|
|
||||||
>,
|
|
||||||
N: HlNodeCore<Provider: ChainSpecProvider<ChainSpec = HlChainSpec> + HeaderProvider>,
|
|
||||||
{
|
{
|
||||||
async fn block_receipts(
|
|
||||||
&self,
|
|
||||||
block_id: BlockId,
|
|
||||||
) -> Result<Option<Vec<RpcReceipt<Self::NetworkTypes>>>, Self::Error>
|
|
||||||
where
|
|
||||||
Self: LoadReceipt,
|
|
||||||
{
|
|
||||||
if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? {
|
|
||||||
let block_number = block.number();
|
|
||||||
let base_fee = block.base_fee_per_gas();
|
|
||||||
let block_hash = block.hash();
|
|
||||||
let excess_blob_gas = block.excess_blob_gas();
|
|
||||||
let timestamp = block.timestamp();
|
|
||||||
let blob_params = self.provider().chain_spec().blob_params_at_timestamp(timestamp);
|
|
||||||
|
|
||||||
return block
|
|
||||||
.body()
|
|
||||||
.transactions()
|
|
||||||
.iter()
|
|
||||||
.zip(receipts.iter())
|
|
||||||
.filter(|(tx, _)| !is_system_tx(tx))
|
|
||||||
.enumerate()
|
|
||||||
.map(|(idx, (tx, receipt))| {
|
|
||||||
let meta = TransactionMeta {
|
|
||||||
tx_hash: *tx.tx_hash(),
|
|
||||||
index: idx as u64,
|
|
||||||
block_hash,
|
|
||||||
block_number,
|
|
||||||
base_fee,
|
|
||||||
excess_blob_gas,
|
|
||||||
timestamp,
|
|
||||||
};
|
|
||||||
build_receipt(tx, meta, receipt, &receipts, blob_params, |receipt_with_bloom| {
|
|
||||||
match receipt.tx_type {
|
|
||||||
TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom),
|
|
||||||
TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom),
|
|
||||||
TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom),
|
|
||||||
TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom),
|
|
||||||
TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, Self::Error>>()
|
|
||||||
.map(Some);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> LoadBlock for HlEthApi<N>
|
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadPendingBlock
|
N: RpcNodeCore,
|
||||||
+ SpawnBlocking
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
+ RpcNodeCoreExt<
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
Pool: TransactionPool<
|
|
||||||
Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>,
|
|
||||||
>,
|
|
||||||
> + RpcNodeCore<Provider: BlockReader<Block = crate::HlBlock>>,
|
|
||||||
N: HlNodeCore,
|
|
||||||
{
|
{
|
||||||
fn recovered_block(
|
|
||||||
&self,
|
|
||||||
block_id: BlockId,
|
|
||||||
) -> impl Future<
|
|
||||||
Output = Result<
|
|
||||||
Option<Arc<RecoveredBlock<<Self::Provider as BlockReader>::Block>>>,
|
|
||||||
Self::Error,
|
|
||||||
>,
|
|
||||||
> + Send {
|
|
||||||
let hl_node_compliant = self.hl_node_compliant;
|
|
||||||
async move {
|
|
||||||
// Copy of LoadBlock::recovered_block, but with --hl-node-compliant support
|
|
||||||
if block_id.is_pending() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_hash = match self
|
|
||||||
.provider()
|
|
||||||
.block_hash_for_id(block_id)
|
|
||||||
.map_err(Self::Error::from_eth_err)?
|
|
||||||
{
|
|
||||||
Some(block_hash) => block_hash,
|
|
||||||
None => return Ok(None),
|
|
||||||
};
|
|
||||||
|
|
||||||
let recovered_block = self
|
|
||||||
.cache()
|
|
||||||
.get_recovered_block(block_hash)
|
|
||||||
.await
|
|
||||||
.map_err(Self::Error::from_eth_err)?;
|
|
||||||
|
|
||||||
if let Some(recovered_block) = recovered_block {
|
|
||||||
let recovered_block = if hl_node_compliant {
|
|
||||||
filter_if_hl_node_compliant(&recovered_block)
|
|
||||||
} else {
|
|
||||||
(*recovered_block).clone()
|
|
||||||
};
|
|
||||||
return Ok(Some(std::sync::Arc::new(recovered_block)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_if_hl_node_compliant(
|
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
|
||||||
recovered_block: &RecoveredBlock<HlBlock>,
|
|
||||||
) -> RecoveredBlock<HlBlock> {
|
|
||||||
let sealed_block = recovered_block.sealed_block();
|
|
||||||
let transactions = sealed_block.body().transactions();
|
|
||||||
let to_skip = transactions
|
|
||||||
.iter()
|
|
||||||
.position(|tx| !tx.is_system_transaction())
|
|
||||||
.unwrap_or(transactions.len());
|
|
||||||
|
|
||||||
let mut new_block: HlBlock = sealed_block.clone_block();
|
|
||||||
new_block.body.transactions.drain(..to_skip);
|
|
||||||
let new_sealed_block = SealedBlock::new_unchecked(new_block, sealed_block.hash());
|
|
||||||
let new_senders = recovered_block.senders()[to_skip..].to_vec();
|
|
||||||
|
|
||||||
RecoveredBlock::new_sealed(new_sealed_block, new_senders)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> LoadPendingBlock for HlEthApi<N>
|
|
||||||
where
|
where
|
||||||
Self: SpawnBlocking
|
N: RpcNodeCore,
|
||||||
+ EthApiTypes<
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
NetworkTypes: RpcTypes<
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
Header = alloy_rpc_types_eth::Header<ProviderHeader<Self::Provider>>,
|
|
||||||
>,
|
|
||||||
Error: FromEvmError<Self::Evm>,
|
|
||||||
RpcConvert: RpcConvert<Network = Self::NetworkTypes>,
|
|
||||||
>,
|
|
||||||
N: RpcNodeCore<
|
|
||||||
Provider: BlockReaderIdExt
|
|
||||||
+ ChainSpecProvider<ChainSpec: EthChainSpec + EthereumHardforks>
|
|
||||||
+ StateProviderFactory,
|
|
||||||
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = ProviderTx<N::Provider>>>,
|
|
||||||
Evm: ConfigureEvm<
|
|
||||||
Primitives = <Self as RpcNodeCore>::Primitives,
|
|
||||||
NextBlockEnvCtx: From<NextBlockEnvAttributes>,
|
|
||||||
>,
|
|
||||||
Primitives: NodePrimitives<
|
|
||||||
BlockHeader = ProviderHeader<Self::Provider>,
|
|
||||||
SignedTx = ProviderTx<Self::Provider>,
|
|
||||||
Receipt = ProviderReceipt<Self::Provider>,
|
|
||||||
Block = ProviderBlock<Self::Provider>,
|
|
||||||
>,
|
|
||||||
>,
|
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn pending_block(
|
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> {
|
||||||
&self,
|
|
||||||
) -> &tokio::sync::Mutex<
|
|
||||||
Option<PendingBlock<ProviderBlock<Self::Provider>, ProviderReceipt<Self::Provider>>>,
|
|
||||||
> {
|
|
||||||
self.inner.eth_api.pending_block()
|
self.inner.eth_api.pending_block()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_env_attributes(
|
#[inline]
|
||||||
&self,
|
fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm> {
|
||||||
parent: &SealedHeader<ProviderHeader<Self::Provider>>,
|
self.inner.eth_api.pending_env_builder()
|
||||||
) -> Result<<Self::Evm as reth_evm::ConfigureEvm>::NextBlockEnvCtx, Self::Error> {
|
}
|
||||||
Ok(NextBlockEnvAttributes {
|
|
||||||
timestamp: parent.timestamp().saturating_add(12),
|
#[inline]
|
||||||
suggested_fee_recipient: parent.beneficiary(),
|
fn pending_block_kind(&self) -> PendingBlockKind {
|
||||||
prev_randao: B256::random(),
|
self.inner.eth_api.pending_block_kind()
|
||||||
gas_limit: parent.gas_limit(),
|
|
||||||
parent_beacon_block_root: parent.parent_beacon_block_root(),
|
|
||||||
withdrawals: None,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> LoadReceipt for HlEthApi<N>
|
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: Send + Sync,
|
N: RpcNodeCore,
|
||||||
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>,
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
Self::Provider:
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
TransactionsProvider<Transaction = TransactionSigned> + ReceiptProvider<Receipt = Receipt>,
|
|
||||||
{
|
{
|
||||||
async fn build_transaction_receipt(
|
|
||||||
&self,
|
|
||||||
tx: TransactionSigned,
|
|
||||||
meta: TransactionMeta,
|
|
||||||
receipt: Receipt,
|
|
||||||
) -> Result<RpcReceipt<Self::NetworkTypes>, Self::Error> {
|
|
||||||
let hash = meta.block_hash;
|
|
||||||
// get all receipts for the block
|
|
||||||
let all_receipts = self
|
|
||||||
.cache()
|
|
||||||
.get_receipts(hash)
|
|
||||||
.await
|
|
||||||
.map_err(Self::Error::from_eth_err)?
|
|
||||||
.ok_or(EthApiError::HeaderNotFound(hash.into()))?;
|
|
||||||
let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp);
|
|
||||||
|
|
||||||
build_receipt(&tx, meta, &receipt, &all_receipts, blob_params, |receipt_with_bloom| {
|
|
||||||
match receipt.tx_type {
|
|
||||||
TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom),
|
|
||||||
TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom),
|
|
||||||
TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom),
|
|
||||||
TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom),
|
|
||||||
TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,51 +1,32 @@
|
|||||||
use super::{HlEthApi, HlNodeCore};
|
use super::HlEthApi;
|
||||||
use crate::evm::transaction::HlTxEnv;
|
|
||||||
use alloy_rpc_types::TransactionRequest;
|
|
||||||
use reth::rpc::server_types::eth::EthApiError;
|
use reth::rpc::server_types::eth::EthApiError;
|
||||||
use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor};
|
use reth_evm::TxEnvFor;
|
||||||
use reth_primitives::NodePrimitives;
|
|
||||||
use reth_provider::{ProviderError, ProviderHeader, ProviderTx};
|
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking},
|
helpers::{estimate::EstimateCall, Call, EthCall},
|
||||||
FromEvmError, FullEthApiTypes, RpcConvert, RpcTypes,
|
FromEvmError, RpcConvert, RpcNodeCore,
|
||||||
};
|
};
|
||||||
use revm::context::TxEnv;
|
|
||||||
|
|
||||||
impl<N> EthCall for HlEthApi<N>
|
impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: EstimateCall + LoadBlock + FullEthApiTypes,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore,
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EstimateCall for HlEthApi<N>
|
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: Call,
|
N: RpcNodeCore,
|
||||||
Self::Error: From<EthApiError>,
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
N: HlNodeCore,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> Call for HlEthApi<N>
|
impl<N, Rpc> Call for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadState<
|
N: RpcNodeCore,
|
||||||
Evm: ConfigureEvm<
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
Primitives: NodePrimitives<
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||||
BlockHeader = ProviderHeader<Self::Provider>,
|
|
||||||
SignedTx = ProviderTx<Self::Provider>,
|
|
||||||
>,
|
|
||||||
BlockExecutorFactory: BlockExecutorFactory<
|
|
||||||
EvmFactory: EvmFactory<Tx = HlTxEnv<TxEnv>>,
|
|
||||||
>,
|
|
||||||
>,
|
|
||||||
RpcConvert: RpcConvert<TxEnv = TxEnvFor<Self::Evm>, Network = Self::NetworkTypes>,
|
|
||||||
NetworkTypes: RpcTypes<TransactionRequest: From<TransactionRequest>>,
|
|
||||||
Error: FromEvmError<Self::Evm>
|
|
||||||
+ From<<Self::RpcConvert as RpcConvert>::Error>
|
|
||||||
+ From<ProviderError>,
|
|
||||||
> + SpawnBlocking,
|
|
||||||
Self::Error: From<EthApiError>,
|
|
||||||
N: HlNodeCore,
|
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn call_gas_limit(&self) -> u64 {
|
fn call_gas_limit(&self) -> u64 {
|
||||||
|
|||||||
@ -6,20 +6,15 @@ use crate::{
|
|||||||
use alloy_consensus::BlockHeader;
|
use alloy_consensus::BlockHeader;
|
||||||
use alloy_eips::eip4895::Withdrawal;
|
use alloy_eips::eip4895::Withdrawal;
|
||||||
use alloy_primitives::B256;
|
use alloy_primitives::B256;
|
||||||
use alloy_rpc_types_engine::{PayloadAttributes, PayloadError};
|
use alloy_rpc_types_engine::PayloadError;
|
||||||
use reth::{
|
use reth::{
|
||||||
api::{FullNodeComponents, NodeTypes},
|
api::{FullNodeComponents, NodeTypes},
|
||||||
builder::{rpc::EngineValidatorBuilder, AddOnsContext},
|
builder::{rpc::PayloadValidatorBuilder, AddOnsContext},
|
||||||
consensus::ConsensusError,
|
|
||||||
};
|
|
||||||
use reth_engine_primitives::{EngineValidator, ExecutionPayload, PayloadValidator};
|
|
||||||
use reth_payload_primitives::{
|
|
||||||
EngineApiMessageVersion, EngineObjectValidationError, NewPayloadError, PayloadOrAttributes,
|
|
||||||
PayloadTypes,
|
|
||||||
};
|
};
|
||||||
|
use reth_engine_primitives::{ExecutionPayload, PayloadValidator};
|
||||||
|
use reth_payload_primitives::NewPayloadError;
|
||||||
use reth_primitives::{RecoveredBlock, SealedBlock};
|
use reth_primitives::{RecoveredBlock, SealedBlock};
|
||||||
use reth_primitives_traits::Block as _;
|
use reth_primitives_traits::Block as _;
|
||||||
use reth_trie_common::HashedPostState;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -27,27 +22,27 @@ use super::payload::HlPayloadTypes;
|
|||||||
|
|
||||||
#[derive(Debug, Default, Clone)]
|
#[derive(Debug, Default, Clone)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct HlEngineValidatorBuilder;
|
pub struct HlPayloadValidatorBuilder;
|
||||||
|
|
||||||
impl<Node, Types> EngineValidatorBuilder<Node> for HlEngineValidatorBuilder
|
impl<Node, Types> PayloadValidatorBuilder<Node> for HlPayloadValidatorBuilder
|
||||||
where
|
where
|
||||||
Types: NodeTypes<ChainSpec = HlChainSpec, Payload = HlPayloadTypes, Primitives = HlPrimitives>,
|
Types: NodeTypes<ChainSpec = HlChainSpec, Payload = HlPayloadTypes, Primitives = HlPrimitives>,
|
||||||
Node: FullNodeComponents<Types = Types>,
|
Node: FullNodeComponents<Types = Types>,
|
||||||
{
|
{
|
||||||
type Validator = HlEngineValidator;
|
type Validator = HlPayloadValidator;
|
||||||
|
|
||||||
async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result<Self::Validator> {
|
async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result<Self::Validator> {
|
||||||
Ok(HlEngineValidator::new(Arc::new(ctx.config.chain.clone().as_ref().clone())))
|
Ok(HlPayloadValidator::new(Arc::new(ctx.config.chain.clone().as_ref().clone())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validator for Optimism engine API.
|
/// Validator for HyperEVM engine API.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HlEngineValidator {
|
pub struct HlPayloadValidator {
|
||||||
inner: HlExecutionPayloadValidator<HlChainSpec>,
|
inner: HlExecutionPayloadValidator<HlChainSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlEngineValidator {
|
impl HlPayloadValidator {
|
||||||
/// Instantiates a new validator.
|
/// Instantiates a new validator.
|
||||||
pub fn new(chain_spec: Arc<HlChainSpec>) -> Self {
|
pub fn new(chain_spec: Arc<HlChainSpec>) -> Self {
|
||||||
Self { inner: HlExecutionPayloadValidator { inner: chain_spec } }
|
Self { inner: HlExecutionPayloadValidator { inner: chain_spec } }
|
||||||
@ -87,47 +82,17 @@ impl ExecutionPayload for HlExecutionData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PayloadValidator for HlEngineValidator {
|
impl PayloadValidator<HlPayloadTypes> for HlPayloadValidator {
|
||||||
type Block = HlBlock;
|
type Block = HlBlock;
|
||||||
type ExecutionData = HlExecutionData;
|
|
||||||
|
|
||||||
fn ensure_well_formed_payload(
|
fn ensure_well_formed_payload(
|
||||||
&self,
|
&self,
|
||||||
payload: Self::ExecutionData,
|
payload: HlExecutionData,
|
||||||
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
|
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
|
||||||
let sealed_block =
|
let sealed_block =
|
||||||
self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?;
|
self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?;
|
||||||
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
|
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_block_post_execution_with_hashed_state(
|
|
||||||
&self,
|
|
||||||
_state_updates: &HashedPostState,
|
|
||||||
_block: &RecoveredBlock<Self::Block>,
|
|
||||||
) -> Result<(), ConsensusError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Types> EngineValidator<Types> for HlEngineValidator
|
|
||||||
where
|
|
||||||
Types: PayloadTypes<PayloadAttributes = PayloadAttributes, ExecutionData = HlExecutionData>,
|
|
||||||
{
|
|
||||||
fn validate_version_specific_fields(
|
|
||||||
&self,
|
|
||||||
_version: EngineApiMessageVersion,
|
|
||||||
_payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, PayloadAttributes>,
|
|
||||||
) -> Result<(), EngineObjectValidationError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ensure_well_formed_attributes(
|
|
||||||
&self,
|
|
||||||
_version: EngineApiMessageVersion,
|
|
||||||
_attributes: &PayloadAttributes,
|
|
||||||
) -> Result<(), EngineObjectValidationError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Execution payload validator.
|
/// Execution payload validator.
|
||||||
@ -158,7 +123,7 @@ where
|
|||||||
return Err(PayloadError::BlockHash {
|
return Err(PayloadError::BlockHash {
|
||||||
execution: sealed_block.hash(),
|
execution: sealed_block.hash(),
|
||||||
consensus: expected_hash,
|
consensus: expected_hash,
|
||||||
})?;
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(sealed_block)
|
Ok(sealed_block)
|
||||||
|
|||||||
@ -1,104 +1,92 @@
|
|||||||
use alloy_network::Ethereum;
|
use alloy_network::Ethereum;
|
||||||
use alloy_primitives::U256;
|
use alloy_primitives::U256;
|
||||||
use reth::{
|
use reth::{
|
||||||
|
api::{FullNodeTypes, HeaderTy, NodeTypes, PrimitivesTy},
|
||||||
builder::{
|
builder::{
|
||||||
rpc::{EthApiBuilder, EthApiCtx},
|
rpc::{EthApiBuilder, EthApiCtx},
|
||||||
FullNodeComponents,
|
FullNodeComponents,
|
||||||
},
|
},
|
||||||
chainspec::EthChainSpec,
|
|
||||||
primitives::EthereumHardforks,
|
|
||||||
providers::ChainSpecProvider,
|
|
||||||
rpc::{
|
rpc::{
|
||||||
eth::{core::EthApiInner, DevSigner, FullEthApiServer},
|
eth::{core::EthApiInner, DevSigner, FullEthApiServer},
|
||||||
server_types::eth::{EthApiError, EthStateCache, FeeHistoryCache, GasPriceOracle},
|
server_types::eth::{
|
||||||
|
receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache,
|
||||||
|
GasPriceOracle,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
tasks::{
|
tasks::{
|
||||||
pool::{BlockingTaskGuard, BlockingTaskPool},
|
pool::{BlockingTaskGuard, BlockingTaskPool},
|
||||||
TaskSpawner,
|
TaskSpawner,
|
||||||
},
|
},
|
||||||
transaction_pool::TransactionPool,
|
|
||||||
};
|
};
|
||||||
use reth_evm::ConfigureEvm;
|
use reth_evm::ConfigureEvm;
|
||||||
use reth_network::NetworkInfo;
|
use reth_provider::{ChainSpecProvider, ProviderHeader, ProviderTx};
|
||||||
use reth_primitives::NodePrimitives;
|
use reth_rpc::RpcTypes;
|
||||||
use reth_provider::{
|
|
||||||
BlockNumReader, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt,
|
|
||||||
ProviderTx, StageCheckpointReader, StateProviderFactory,
|
|
||||||
};
|
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
helpers::{
|
helpers::{
|
||||||
AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState,
|
pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees,
|
||||||
SpawnBlocking, Trace,
|
EthState, LoadFee, LoadState, SpawnBlocking, Trace,
|
||||||
},
|
},
|
||||||
EthApiTypes, FromEvmError, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
|
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
|
||||||
|
SignableTxRequest,
|
||||||
};
|
};
|
||||||
use std::{fmt, sync::Arc};
|
use std::{fmt, marker::PhantomData, sync::Arc};
|
||||||
|
|
||||||
|
use crate::chainspec::HlChainSpec;
|
||||||
|
|
||||||
mod block;
|
mod block;
|
||||||
mod call;
|
mod call;
|
||||||
pub mod engine_api;
|
pub mod engine_api;
|
||||||
mod transaction;
|
mod transaction;
|
||||||
|
|
||||||
/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`HlEthApi`].
|
|
||||||
pub trait HlNodeCore: RpcNodeCore<Provider: BlockReader> {}
|
|
||||||
impl<T> HlNodeCore for T where T: RpcNodeCore<Provider: BlockReader> {}
|
|
||||||
|
|
||||||
/// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API.
|
|
||||||
pub type EthApiNodeBackend<N> = EthApiInner<
|
|
||||||
<N as RpcNodeCore>::Provider,
|
|
||||||
<N as RpcNodeCore>::Pool,
|
|
||||||
<N as RpcNodeCore>::Network,
|
|
||||||
<N as RpcNodeCore>::Evm,
|
|
||||||
>;
|
|
||||||
|
|
||||||
/// Container type `HlEthApi`
|
/// Container type `HlEthApi`
|
||||||
#[allow(missing_debug_implementations)]
|
pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
|
||||||
pub(crate) struct HlEthApiInner<N: HlNodeCore> {
|
|
||||||
/// Gateway to node's core components.
|
/// Gateway to node's core components.
|
||||||
pub(crate) eth_api: EthApiNodeBackend<N>,
|
pub(crate) eth_api: EthApiInner<N, Rpc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HlRpcConvert<N, NetworkT> =
|
||||||
|
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct HlEthApi<N: HlNodeCore> {
|
pub struct HlEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
|
||||||
/// Gateway to node's core components.
|
/// Gateway to node's core components.
|
||||||
pub(crate) inner: Arc<HlEthApiInner<N>>,
|
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
|
||||||
/// Converter for RPC types.
|
|
||||||
tx_resp_builder: RpcConverter<Ethereum, N::Evm, EthApiError, ()>,
|
|
||||||
/// Whether the node is in HL node compliant mode.
|
|
||||||
pub(crate) hl_node_compliant: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: HlNodeCore> fmt::Debug for HlEthApi<N> {
|
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
|
||||||
|
where
|
||||||
|
N: RpcNodeCore,
|
||||||
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.debug_struct("HlEthApi").finish_non_exhaustive()
|
f.debug_struct("HlEthApi").finish_non_exhaustive()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthApiTypes for HlEthApi<N>
|
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: Send + Sync,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
N::Evm: std::fmt::Debug,
|
|
||||||
{
|
{
|
||||||
type Error = EthApiError;
|
type Error = EthApiError;
|
||||||
type NetworkTypes = Ethereum;
|
type NetworkTypes = Rpc::Network;
|
||||||
type RpcConvert = RpcConverter<Ethereum, N::Evm, EthApiError, ()>;
|
type RpcConvert = Rpc;
|
||||||
|
|
||||||
fn tx_resp_builder(&self) -> &Self::RpcConvert {
|
fn tx_resp_builder(&self) -> &Self::RpcConvert {
|
||||||
&self.tx_resp_builder
|
self.inner.eth_api.tx_resp_builder()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> RpcNodeCore for HlEthApi<N>
|
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
N: HlNodeCore,
|
N: RpcNodeCore,
|
||||||
|
Rpc: RpcConvert<Primitives = N::Primitives>,
|
||||||
{
|
{
|
||||||
type Primitives = N::Primitives;
|
type Primitives = N::Primitives;
|
||||||
type Provider = N::Provider;
|
type Provider = N::Provider;
|
||||||
type Pool = N::Pool;
|
type Pool = N::Pool;
|
||||||
type Evm = <N as RpcNodeCore>::Evm;
|
type Evm = N::Evm;
|
||||||
type Network = <N as RpcNodeCore>::Network;
|
type Network = N::Network;
|
||||||
type PayloadBuilder = ();
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn pool(&self) -> &Self::Pool {
|
fn pool(&self) -> &Self::Pool {
|
||||||
@ -115,37 +103,30 @@ where
|
|||||||
self.inner.eth_api.network()
|
self.inner.eth_api.network()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn payload_builder(&self) -> &Self::PayloadBuilder {
|
|
||||||
&()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn provider(&self) -> &Self::Provider {
|
fn provider(&self) -> &Self::Provider {
|
||||||
self.inner.eth_api.provider()
|
self.inner.eth_api.provider()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> RpcNodeCoreExt for HlEthApi<N>
|
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
N: HlNodeCore,
|
N: RpcNodeCore,
|
||||||
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn cache(&self) -> &EthStateCache<ProviderBlock<N::Provider>, ProviderReceipt<N::Provider>> {
|
fn cache(&self) -> &EthStateCache<N::Primitives> {
|
||||||
self.inner.eth_api.cache()
|
self.inner.eth_api.cache()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthApiSpec for HlEthApi<N>
|
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
N: HlNodeCore<
|
N: RpcNodeCore,
|
||||||
Provider: ChainSpecProvider<ChainSpec: EthereumHardforks>
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
+ BlockNumReader
|
|
||||||
+ StageCheckpointReader,
|
|
||||||
Network: NetworkInfo,
|
|
||||||
>,
|
|
||||||
{
|
{
|
||||||
type Transaction = ProviderTx<Self::Provider>;
|
type Transaction = ProviderTx<Self::Provider>;
|
||||||
|
type Rpc = Rpc::Network;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn starting_block(&self) -> U256 {
|
fn starting_block(&self) -> U256 {
|
||||||
@ -153,16 +134,15 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn signers(&self) -> &parking_lot::RwLock<Vec<Box<dyn EthSigner<ProviderTx<Self::Provider>>>>> {
|
fn signers(&self) -> &SignersForApi<Self> {
|
||||||
self.inner.eth_api.signers()
|
self.inner.eth_api.signers()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> SpawnBlocking for HlEthApi<N>
|
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: Send + Sync + Clone + 'static,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
N::Evm: std::fmt::Debug,
|
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn io_task_spawner(&self) -> impl TaskSpawner {
|
fn io_task_spawner(&self) -> impl TaskSpawner {
|
||||||
@ -180,14 +160,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> LoadFee for HlEthApi<N>
|
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadBlock<Provider = N::Provider>,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore<
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
Provider: BlockReaderIdExt
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
+ ChainSpecProvider<ChainSpec: EthChainSpec + EthereumHardforks>
|
|
||||||
+ StateProviderFactory,
|
|
||||||
>,
|
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> {
|
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> {
|
||||||
@ -195,25 +172,22 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn fee_history_cache(&self) -> &FeeHistoryCache {
|
fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> {
|
||||||
self.inner.eth_api.fee_history_cache()
|
self.inner.eth_api.fee_history_cache()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> LoadState for HlEthApi<N>
|
impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
N: HlNodeCore<
|
N: RpcNodeCore,
|
||||||
Provider: StateProviderFactory + ChainSpecProvider<ChainSpec: EthereumHardforks>,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
Pool: TransactionPool,
|
|
||||||
>,
|
|
||||||
N::Evm: std::fmt::Debug,
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthState for HlEthApi<N>
|
impl<N, Rpc> EthState for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadState + SpawnBlocking,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn max_proof_window(&self) -> u64 {
|
fn max_proof_window(&self) -> u64 {
|
||||||
@ -221,36 +195,28 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthFees for HlEthApi<N>
|
impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadFee<
|
N: RpcNodeCore,
|
||||||
Provider: ChainSpecProvider<
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
ChainSpec: EthChainSpec<Header = ProviderHeader<Self::Provider>>,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
>,
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N, Rpc> Trace for HlEthApi<N, Rpc>
|
||||||
|
where
|
||||||
|
N: RpcNodeCore,
|
||||||
|
EthApiError: FromEvmError<N::Evm>,
|
||||||
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
|
||||||
|
where
|
||||||
|
N: RpcNodeCore,
|
||||||
|
Rpc: RpcConvert<
|
||||||
|
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
|
||||||
>,
|
>,
|
||||||
N: HlNodeCore,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> Trace for HlEthApi<N>
|
|
||||||
where
|
|
||||||
Self: RpcNodeCore<Provider: BlockReader>
|
|
||||||
+ LoadState<
|
|
||||||
Evm: ConfigureEvm<
|
|
||||||
Primitives: NodePrimitives<
|
|
||||||
BlockHeader = ProviderHeader<Self::Provider>,
|
|
||||||
SignedTx = ProviderTx<Self::Provider>,
|
|
||||||
>,
|
|
||||||
>,
|
|
||||||
Error: FromEvmError<Self::Evm>,
|
|
||||||
>,
|
|
||||||
N: HlNodeCore,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> AddDevSigners for HlEthApi<N>
|
|
||||||
where
|
|
||||||
N: HlNodeCore,
|
|
||||||
{
|
{
|
||||||
fn with_dev_accounts(&self) {
|
fn with_dev_accounts(&self) {
|
||||||
*self.inner.eth_api.signers().write() = DevSigner::random_signers(20)
|
*self.inner.eth_api.signers().write() = DevSigner::random_signers(20)
|
||||||
@ -258,40 +224,41 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Builds [`HlEthApi`] for HL.
|
/// Builds [`HlEthApi`] for HL.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct HlEthApiBuilder {
|
pub struct HlEthApiBuilder<NetworkT = Ethereum> {
|
||||||
/// Whether the node is in HL node compliant mode.
|
/// Marker for network types.
|
||||||
pub(crate) hl_node_compliant: bool,
|
pub(crate) _nt: PhantomData<NetworkT>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthApiBuilder<N> for HlEthApiBuilder
|
impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
|
||||||
where
|
fn default() -> Self {
|
||||||
N: FullNodeComponents,
|
Self { _nt: PhantomData }
|
||||||
HlEthApi<N>: FullEthApiServer<Provider = N::Provider, Pool = N::Pool>,
|
}
|
||||||
{
|
}
|
||||||
type EthApi = HlEthApi<N>;
|
|
||||||
|
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
|
||||||
async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> {
|
where
|
||||||
let eth_api = reth::rpc::eth::EthApiBuilder::new(
|
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>
|
||||||
ctx.components.provider().clone(),
|
+ RpcNodeCore<
|
||||||
ctx.components.pool().clone(),
|
Primitives = PrimitivesTy<N::Types>,
|
||||||
ctx.components.network().clone(),
|
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,
|
||||||
ctx.components.evm_config().clone(),
|
>,
|
||||||
)
|
NetworkT: RpcTypes,
|
||||||
.eth_cache(ctx.cache)
|
HlRpcConvert<N, NetworkT>: RpcConvert<Network = NetworkT, Primitives = PrimitivesTy<N::Types>>,
|
||||||
.task_spawner(ctx.components.task_executor().clone())
|
HlEthApi<N, HlRpcConvert<N, NetworkT>>: FullEthApiServer<
|
||||||
.gas_cap(ctx.config.rpc_gas_cap.into())
|
Provider = <N as FullNodeTypes>::Provider,
|
||||||
.max_simulate_blocks(ctx.config.rpc_max_simulate_blocks)
|
Pool = <N as FullNodeComponents>::Pool,
|
||||||
.eth_proof_window(ctx.config.eth_proof_window)
|
> + AddDevSigners,
|
||||||
.fee_history_cache_config(ctx.config.fee_history_cache)
|
{
|
||||||
.proof_permits(ctx.config.proof_permits)
|
type EthApi = HlEthApi<N, HlRpcConvert<N, NetworkT>>;
|
||||||
.build_inner();
|
|
||||||
|
async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> {
|
||||||
Ok(HlEthApi {
|
let provider = FullNodeComponents::provider(ctx.components);
|
||||||
inner: Arc::new(HlEthApiInner { eth_api }),
|
let rpc_converter =
|
||||||
tx_resp_builder: Default::default(),
|
RpcConverter::new(EthReceiptConverter::<HlChainSpec>::new(provider.chain_spec()));
|
||||||
hl_node_compliant: self.hl_node_compliant,
|
let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner();
|
||||||
})
|
|
||||||
|
Ok(HlEthApi { inner: Arc::new(HlEthApiInner { eth_api }) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,51 +1,28 @@
|
|||||||
use super::HlNodeCore;
|
|
||||||
use crate::node::rpc::HlEthApi;
|
use crate::node::rpc::HlEthApi;
|
||||||
use alloy_primitives::{Bytes, B256};
|
use alloy_primitives::{Bytes, B256};
|
||||||
use reth::{
|
use reth::rpc::server_types::eth::EthApiError;
|
||||||
rpc::server_types::eth::utils::recover_raw_transaction,
|
|
||||||
transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool},
|
|
||||||
};
|
|
||||||
use reth_provider::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider};
|
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking},
|
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
|
||||||
FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt,
|
RpcConvert, RpcNodeCore,
|
||||||
};
|
};
|
||||||
|
|
||||||
impl<N> LoadTransaction for HlEthApi<N>
|
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore<Provider: TransactionsProvider, Pool: TransactionPool>,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
Self::Pool: TransactionPool,
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N> EthTransactions for HlEthApi<N>
|
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
|
||||||
where
|
where
|
||||||
Self: LoadTransaction<Provider: BlockReaderIdExt>,
|
N: RpcNodeCore,
|
||||||
N: HlNodeCore<Provider: BlockReader<Transaction = ProviderTx<Self::Provider>>>,
|
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||||
{
|
{
|
||||||
fn signers(&self) -> &parking_lot::RwLock<Vec<Box<dyn EthSigner<ProviderTx<Self::Provider>>>>> {
|
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
|
||||||
self.inner.eth_api.signers()
|
self.inner.eth_api.signers()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Decodes and recovers the transaction and submits it to the pool.
|
async fn send_raw_transaction(&self, _tx: Bytes) -> Result<B256, Self::Error> {
|
||||||
///
|
unreachable!()
|
||||||
/// Returns the hash of the transaction.
|
|
||||||
async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256, Self::Error> {
|
|
||||||
let recovered = recover_raw_transaction(&tx)?;
|
|
||||||
|
|
||||||
// broadcast raw transaction to subscribers if there is any.
|
|
||||||
self.inner.eth_api.broadcast_raw_transaction(tx);
|
|
||||||
|
|
||||||
let pool_transaction = <Self::Pool as TransactionPool>::Transaction::from_pooled(recovered);
|
|
||||||
|
|
||||||
// submit the transaction to the pool with a `Local` origin
|
|
||||||
let hash = self
|
|
||||||
.pool()
|
|
||||||
.add_transaction(TransactionOrigin::Local, pool_transaction)
|
|
||||||
.await
|
|
||||||
.map_err(Self::Error::from_eth_err)?;
|
|
||||||
|
|
||||||
Ok(hash)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,8 +3,7 @@ use eyre::{Error, Result};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
pub(crate) const MAINNET_CHAIN_ID: u64 = 999;
|
use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID};
|
||||||
pub(crate) const TESTNET_CHAIN_ID: u64 = 998;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
struct EvmContract {
|
struct EvmContract {
|
||||||
|
|||||||
@ -7,7 +7,7 @@ use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
|
|||||||
use bytes::BufMut;
|
use bytes::BufMut;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{node::spot_meta::MAINNET_CHAIN_ID, HlBlock};
|
use crate::HlBlock;
|
||||||
|
|
||||||
pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>);
|
pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>);
|
||||||
|
|
||||||
@ -50,13 +50,13 @@ pub struct BlockAndReceipts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockAndReceipts {
|
impl BlockAndReceipts {
|
||||||
pub fn to_reth_block(self) -> HlBlock {
|
pub fn to_reth_block(self, chain_id: u64) -> HlBlock {
|
||||||
let EvmBlock::Reth115(block) = self.block;
|
let EvmBlock::Reth115(block) = self.block;
|
||||||
block.to_reth_block(
|
block.to_reth_block(
|
||||||
self.read_precompile_calls.clone(),
|
self.read_precompile_calls.clone(),
|
||||||
self.highest_precompile_address,
|
self.highest_precompile_address,
|
||||||
self.system_txs.clone(),
|
self.system_txs.clone(),
|
||||||
MAINNET_CHAIN_ID,
|
chain_id,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,3 +1,7 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::pseudo_peer::HlNodeBlockSourceArgs;
|
||||||
|
|
||||||
use super::config::BlockSourceConfig;
|
use super::config::BlockSourceConfig;
|
||||||
use clap::{Args, Parser};
|
use clap::{Args, Parser};
|
||||||
use reth_node_core::args::LogArgs;
|
use reth_node_core::args::LogArgs;
|
||||||
@ -13,7 +17,7 @@ pub struct BlockSourceArgs {
|
|||||||
block_source: Option<String>,
|
block_source: Option<String>,
|
||||||
|
|
||||||
#[arg(long, alias = "local-ingest-dir")]
|
#[arg(long, alias = "local-ingest-dir")]
|
||||||
block_source_from_node: Option<String>,
|
local_ingest_dir: Option<String>,
|
||||||
|
|
||||||
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
|
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
|
||||||
#[arg(long, default_value_t = false)]
|
#[arg(long, default_value_t = false)]
|
||||||
@ -22,6 +26,19 @@ pub struct BlockSourceArgs {
|
|||||||
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
|
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
local: bool,
|
local: bool,
|
||||||
|
|
||||||
|
/// Interval for polling new blocks in S3 in milliseconds.
|
||||||
|
#[arg(id = "s3.polling-interval", long = "s3.polling-interval", default_value = "25")]
|
||||||
|
s3_polling_interval: u64,
|
||||||
|
|
||||||
|
/// Maximum allowed delay for the hl-node block source in milliseconds.
|
||||||
|
/// If this threshold is exceeded, the client falls back to other sources.
|
||||||
|
#[arg(
|
||||||
|
id = "local.fallback-threshold",
|
||||||
|
long = "local.fallback-threshold",
|
||||||
|
default_value = "5000"
|
||||||
|
)]
|
||||||
|
local_fallback_threshold: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockSourceArgs {
|
impl BlockSourceArgs {
|
||||||
@ -33,7 +50,10 @@ impl BlockSourceArgs {
|
|||||||
|
|
||||||
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
|
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
|
||||||
if self.s3 {
|
if self.s3 {
|
||||||
return Ok(BlockSourceConfig::s3_default().await);
|
return Ok(BlockSourceConfig::s3_default(Duration::from_millis(
|
||||||
|
self.s3_polling_interval,
|
||||||
|
))
|
||||||
|
.await);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.local {
|
if self.local {
|
||||||
@ -47,18 +67,25 @@ impl BlockSourceArgs {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(bucket) = value.strip_prefix("s3://") {
|
if let Some(bucket) = value.strip_prefix("s3://") {
|
||||||
Ok(BlockSourceConfig::s3(bucket.to_string()).await)
|
Ok(BlockSourceConfig::s3(
|
||||||
|
bucket.to_string(),
|
||||||
|
Duration::from_millis(self.s3_polling_interval),
|
||||||
|
)
|
||||||
|
.await)
|
||||||
} else {
|
} else {
|
||||||
Ok(BlockSourceConfig::local(value.into()))
|
Ok(BlockSourceConfig::local(value.into()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
|
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
|
||||||
let Some(block_source_from_node) = self.block_source_from_node.as_ref() else {
|
let Some(local_ingest_dir) = self.local_ingest_dir.as_ref() else {
|
||||||
return config;
|
return config;
|
||||||
};
|
};
|
||||||
|
|
||||||
config.with_block_source_from_node(block_source_from_node.to_string())
|
config.with_block_source_from_node(HlNodeBlockSourceArgs {
|
||||||
|
root: local_ingest_dir.into(),
|
||||||
|
fallback_threshold: Duration::from_millis(self.local_fallback_threshold),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,34 +1,38 @@
|
|||||||
use super::{
|
use crate::chainspec::HlChainSpec;
|
||||||
consts::DEFAULT_S3_BUCKET,
|
|
||||||
sources::{
|
use super::sources::{
|
||||||
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, LocalBlockSource, S3BlockSource,
|
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, HlNodeBlockSourceArgs,
|
||||||
},
|
LocalBlockSource, S3BlockSource,
|
||||||
};
|
};
|
||||||
use aws_config::BehaviorVersion;
|
use aws_config::BehaviorVersion;
|
||||||
use std::{env::home_dir, path::PathBuf, sync::Arc};
|
use std::{env::home_dir, path::PathBuf, sync::Arc, time::Duration};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct BlockSourceConfig {
|
pub struct BlockSourceConfig {
|
||||||
pub source_type: BlockSourceType,
|
pub source_type: BlockSourceType,
|
||||||
pub block_source_from_node: Option<String>,
|
pub block_source_from_node: Option<HlNodeBlockSourceArgs>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum BlockSourceType {
|
pub enum BlockSourceType {
|
||||||
S3 { bucket: String },
|
S3Default { polling_interval: Duration },
|
||||||
|
S3 { bucket: String, polling_interval: Duration },
|
||||||
Local { path: PathBuf },
|
Local { path: PathBuf },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockSourceConfig {
|
impl BlockSourceConfig {
|
||||||
pub async fn s3_default() -> Self {
|
pub async fn s3_default(polling_interval: Duration) -> Self {
|
||||||
Self {
|
Self {
|
||||||
source_type: BlockSourceType::S3 { bucket: DEFAULT_S3_BUCKET.to_string() },
|
source_type: BlockSourceType::S3Default { polling_interval },
|
||||||
block_source_from_node: None,
|
block_source_from_node: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn s3(bucket: String) -> Self {
|
pub async fn s3(bucket: String, polling_interval: Duration) -> Self {
|
||||||
Self { source_type: BlockSourceType::S3 { bucket }, block_source_from_node: None }
|
Self {
|
||||||
|
source_type: BlockSourceType::S3 { bucket, polling_interval },
|
||||||
|
block_source_from_node: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn local(path: PathBuf) -> Self {
|
pub fn local(path: PathBuf) -> Self {
|
||||||
@ -48,21 +52,21 @@ impl BlockSourceConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_block_source_from_node(mut self, block_source_from_node: String) -> Self {
|
pub fn with_block_source_from_node(
|
||||||
|
mut self,
|
||||||
|
block_source_from_node: HlNodeBlockSourceArgs,
|
||||||
|
) -> Self {
|
||||||
self.block_source_from_node = Some(block_source_from_node);
|
self.block_source_from_node = Some(block_source_from_node);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_block_source(&self) -> BlockSourceBoxed {
|
pub async fn create_block_source(&self, chain_spec: HlChainSpec) -> BlockSourceBoxed {
|
||||||
match &self.source_type {
|
match &self.source_type {
|
||||||
BlockSourceType::S3 { bucket } => {
|
BlockSourceType::S3Default { polling_interval } => {
|
||||||
let client = aws_sdk_s3::Client::new(
|
s3_block_source(chain_spec.official_s3_bucket(), *polling_interval).await
|
||||||
&aws_config::defaults(BehaviorVersion::latest())
|
}
|
||||||
.region("ap-northeast-1")
|
BlockSourceType::S3 { bucket, polling_interval } => {
|
||||||
.load()
|
s3_block_source(bucket, *polling_interval).await
|
||||||
.await,
|
|
||||||
);
|
|
||||||
Arc::new(Box::new(S3BlockSource::new(client, bucket.clone())))
|
|
||||||
}
|
}
|
||||||
BlockSourceType::Local { path } => {
|
BlockSourceType::Local { path } => {
|
||||||
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
|
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
|
||||||
@ -82,16 +86,28 @@ impl BlockSourceConfig {
|
|||||||
Arc::new(Box::new(
|
Arc::new(Box::new(
|
||||||
HlNodeBlockSource::new(
|
HlNodeBlockSource::new(
|
||||||
fallback_block_source,
|
fallback_block_source,
|
||||||
PathBuf::from(block_source_from_node.clone()),
|
block_source_from_node.clone(),
|
||||||
next_block_number,
|
next_block_number,
|
||||||
)
|
)
|
||||||
.await,
|
.await,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_cached_block_source(&self, next_block_number: u64) -> BlockSourceBoxed {
|
pub async fn create_cached_block_source(
|
||||||
let block_source = self.create_block_source().await;
|
&self,
|
||||||
let block_source = self.create_block_source_from_node(next_block_number, block_source).await;
|
chain_spec: HlChainSpec,
|
||||||
|
next_block_number: u64,
|
||||||
|
) -> BlockSourceBoxed {
|
||||||
|
let block_source = self.create_block_source(chain_spec).await;
|
||||||
|
let block_source =
|
||||||
|
self.create_block_source_from_node(next_block_number, block_source).await;
|
||||||
Arc::new(Box::new(CachedBlockSource::new(block_source)))
|
Arc::new(Box::new(CachedBlockSource::new(block_source)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn s3_block_source(bucket: impl AsRef<str>, polling_interval: Duration) -> BlockSourceBoxed {
|
||||||
|
let client = aws_sdk_s3::Client::new(
|
||||||
|
&aws_config::defaults(BehaviorVersion::latest()).region("ap-northeast-1").load().await,
|
||||||
|
);
|
||||||
|
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string(), polling_interval)))
|
||||||
|
}
|
||||||
|
|||||||
@ -1,2 +0,0 @@
|
|||||||
pub const MAX_CONCURRENCY: usize = 100;
|
|
||||||
pub const DEFAULT_S3_BUCKET: &str = "hl-mainnet-evm-blocks";
|
|
||||||
@ -1,36 +0,0 @@
|
|||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum PseudoPeerError {
|
|
||||||
#[error("Block source error: {0}")]
|
|
||||||
BlockSource(String),
|
|
||||||
|
|
||||||
#[error("Network error: {0}")]
|
|
||||||
Network(#[from] reth_network::error::NetworkError),
|
|
||||||
|
|
||||||
#[error("Configuration error: {0}")]
|
|
||||||
Config(String),
|
|
||||||
|
|
||||||
#[error("AWS S3 error: {0}")]
|
|
||||||
S3(#[from] aws_sdk_s3::Error),
|
|
||||||
|
|
||||||
#[error("IO error: {0}")]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
|
|
||||||
#[error("Serialization error: {0}")]
|
|
||||||
Serialization(#[from] rmp_serde::encode::Error),
|
|
||||||
|
|
||||||
#[error("Deserialization error: {0}")]
|
|
||||||
Deserialization(#[from] rmp_serde::decode::Error),
|
|
||||||
|
|
||||||
#[error("Compression error: {0}")]
|
|
||||||
Compression(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<eyre::Error> for PseudoPeerError {
|
|
||||||
fn from(err: eyre::Error) -> Self {
|
|
||||||
PseudoPeerError::Config(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, PseudoPeerError>;
|
|
||||||
@ -5,33 +5,25 @@
|
|||||||
|
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod consts;
|
|
||||||
pub mod error;
|
|
||||||
pub mod network;
|
pub mod network;
|
||||||
pub mod service;
|
pub mod service;
|
||||||
pub mod sources;
|
pub mod sources;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
pub use cli::*;
|
pub use cli::*;
|
||||||
pub use config::*;
|
pub use config::*;
|
||||||
pub use error::*;
|
|
||||||
pub use network::*;
|
pub use network::*;
|
||||||
pub use service::*;
|
pub use service::*;
|
||||||
pub use sources::*;
|
pub use sources::*;
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
/// Re-export commonly used types
|
/// Re-export commonly used types
|
||||||
pub mod prelude {
|
pub mod prelude {
|
||||||
pub use super::{
|
pub use super::{
|
||||||
config::BlockSourceConfig,
|
config::BlockSourceConfig,
|
||||||
error::{PseudoPeerError, Result},
|
|
||||||
service::{BlockPoller, PseudoPeer},
|
service::{BlockPoller, PseudoPeer},
|
||||||
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
|
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
|
||||||
};
|
};
|
||||||
@ -50,6 +42,7 @@ pub async fn start_pseudo_peer(
|
|||||||
|
|
||||||
// Create network manager
|
// Create network manager
|
||||||
let (mut network, start_tx) = create_network_manager::<BlockSourceBoxed>(
|
let (mut network, start_tx) = create_network_manager::<BlockSourceBoxed>(
|
||||||
|
(*chain_spec).clone(),
|
||||||
destination_peer,
|
destination_peer,
|
||||||
block_source.clone(),
|
block_source.clone(),
|
||||||
blockhash_cache.clone(),
|
blockhash_cache.clone(),
|
||||||
|
|||||||
@ -1,16 +1,16 @@
|
|||||||
use super::service::{BlockHashCache, BlockPoller};
|
use super::service::{BlockHashCache, BlockPoller};
|
||||||
use crate::{
|
use crate::{chainspec::HlChainSpec, node::network::HlNetworkPrimitives, HlPrimitives};
|
||||||
chainspec::{parser::chain_value_parser, HlChainSpec},
|
|
||||||
node::network::HlNetworkPrimitives,
|
|
||||||
HlPrimitives,
|
|
||||||
};
|
|
||||||
use reth_network::{
|
use reth_network::{
|
||||||
config::{rng_secret_key, SecretKey},
|
config::{rng_secret_key, SecretKey},
|
||||||
NetworkConfig, NetworkManager, PeersConfig,
|
NetworkConfig, NetworkManager, PeersConfig,
|
||||||
};
|
};
|
||||||
use reth_network_peers::TrustedPeer;
|
use reth_network_peers::TrustedPeer;
|
||||||
use reth_provider::test_utils::NoopProvider;
|
use reth_provider::test_utils::NoopProvider;
|
||||||
use std::{str::FromStr, sync::Arc};
|
use std::{
|
||||||
|
net::{Ipv4Addr, SocketAddr},
|
||||||
|
str::FromStr,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
pub struct NetworkBuilder {
|
pub struct NetworkBuilder {
|
||||||
@ -19,6 +19,7 @@ pub struct NetworkBuilder {
|
|||||||
boot_nodes: Vec<TrustedPeer>,
|
boot_nodes: Vec<TrustedPeer>,
|
||||||
discovery_port: u16,
|
discovery_port: u16,
|
||||||
listener_port: u16,
|
listener_port: u16,
|
||||||
|
chain_spec: HlChainSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for NetworkBuilder {
|
impl Default for NetworkBuilder {
|
||||||
@ -29,29 +30,19 @@ impl Default for NetworkBuilder {
|
|||||||
boot_nodes: vec![],
|
boot_nodes: vec![],
|
||||||
discovery_port: 0,
|
discovery_port: 0,
|
||||||
listener_port: 0,
|
listener_port: 0,
|
||||||
|
chain_spec: HlChainSpec::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkBuilder {
|
impl NetworkBuilder {
|
||||||
pub fn with_secret(mut self, secret: SecretKey) -> Self {
|
|
||||||
self.secret = secret;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
|
|
||||||
self.peer_config = peer_config;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
|
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
|
||||||
self.boot_nodes = boot_nodes;
|
self.boot_nodes = boot_nodes;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_ports(mut self, discovery_port: u16, listener_port: u16) -> Self {
|
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
|
||||||
self.discovery_port = discovery_port;
|
self.chain_spec = chain_spec;
|
||||||
self.listener_port = listener_port;
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,15 +54,17 @@ impl NetworkBuilder {
|
|||||||
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
|
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
|
||||||
.boot_nodes(self.boot_nodes)
|
.boot_nodes(self.boot_nodes)
|
||||||
.peer_config(self.peer_config)
|
.peer_config(self.peer_config)
|
||||||
.discovery_port(self.discovery_port)
|
.discovery_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.discovery_port))
|
||||||
.listener_port(self.listener_port);
|
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
|
||||||
|
let chain_id = self.chain_spec.inner.chain().id();
|
||||||
|
|
||||||
let (block_poller, start_tx) = BlockPoller::new_suspended(block_source, blockhash_cache);
|
let (block_poller, start_tx) =
|
||||||
|
BlockPoller::new_suspended(chain_id, block_source, blockhash_cache);
|
||||||
let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::<
|
let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::<
|
||||||
HlChainSpec,
|
HlChainSpec,
|
||||||
HlPrimitives,
|
HlPrimitives,
|
||||||
>::new(
|
>::new(
|
||||||
chain_value_parser("mainnet").unwrap(),
|
self.chain_spec.into(),
|
||||||
)));
|
)));
|
||||||
|
|
||||||
let network = NetworkManager::new(config).await.map_err(|e| eyre::eyre!(e))?;
|
let network = NetworkManager::new(config).await.map_err(|e| eyre::eyre!(e))?;
|
||||||
@ -80,12 +73,14 @@ impl NetworkBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_network_manager<BS>(
|
pub async fn create_network_manager<BS>(
|
||||||
|
chain_spec: HlChainSpec,
|
||||||
destination_peer: String,
|
destination_peer: String,
|
||||||
block_source: Arc<Box<dyn super::sources::BlockSource>>,
|
block_source: Arc<Box<dyn super::sources::BlockSource>>,
|
||||||
blockhash_cache: BlockHashCache,
|
blockhash_cache: BlockHashCache,
|
||||||
) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> {
|
) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> {
|
||||||
NetworkBuilder::default()
|
NetworkBuilder::default()
|
||||||
.with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()])
|
.with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()])
|
||||||
|
.with_chain_spec(chain_spec)
|
||||||
.build::<BS>(block_source, blockhash_cache)
|
.build::<BS>(block_source, blockhash_cache)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,7 +26,6 @@ use std::{
|
|||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex},
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
time::Duration,
|
|
||||||
};
|
};
|
||||||
use tokio::{sync::mpsc, task::JoinHandle};
|
use tokio::{sync::mpsc, task::JoinHandle};
|
||||||
use tracing::{debug, info};
|
use tracing::{debug, info};
|
||||||
@ -42,15 +41,15 @@ pub fn new_blockhash_cache() -> BlockHashCache {
|
|||||||
/// A block poller that polls blocks from `BlockSource` and sends them to the `block_tx`
|
/// A block poller that polls blocks from `BlockSource` and sends them to the `block_tx`
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BlockPoller {
|
pub struct BlockPoller {
|
||||||
|
chain_id: u64,
|
||||||
block_rx: mpsc::Receiver<(u64, BlockAndReceipts)>,
|
block_rx: mpsc::Receiver<(u64, BlockAndReceipts)>,
|
||||||
task: JoinHandle<eyre::Result<()>>,
|
task: JoinHandle<eyre::Result<()>>,
|
||||||
blockhash_cache: BlockHashCache,
|
blockhash_cache: BlockHashCache,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockPoller {
|
impl BlockPoller {
|
||||||
const POLL_INTERVAL: Duration = Duration::from_millis(25);
|
|
||||||
|
|
||||||
pub fn new_suspended<BS: BlockSource>(
|
pub fn new_suspended<BS: BlockSource>(
|
||||||
|
chain_id: u64,
|
||||||
block_source: BS,
|
block_source: BS,
|
||||||
blockhash_cache: BlockHashCache,
|
blockhash_cache: BlockHashCache,
|
||||||
) -> (Self, mpsc::Sender<()>) {
|
) -> (Self, mpsc::Sender<()>) {
|
||||||
@ -59,7 +58,7 @@ impl BlockPoller {
|
|||||||
let (block_tx, block_rx) = mpsc::channel(100);
|
let (block_tx, block_rx) = mpsc::channel(100);
|
||||||
let block_tx_clone = block_tx.clone();
|
let block_tx_clone = block_tx.clone();
|
||||||
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx_clone));
|
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx_clone));
|
||||||
(Self { block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
|
(Self { chain_id, block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
@ -75,19 +74,20 @@ impl BlockPoller {
|
|||||||
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
|
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
|
||||||
info!("Starting block poller");
|
info!("Starting block poller");
|
||||||
|
|
||||||
let latest_block_number = block_source
|
let polling_interval = block_source.polling_interval();
|
||||||
|
let mut next_block_number = block_source
|
||||||
.find_latest_block_number()
|
.find_latest_block_number()
|
||||||
.await
|
.await
|
||||||
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
|
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
|
||||||
|
|
||||||
let mut next_block_number = latest_block_number;
|
|
||||||
loop {
|
loop {
|
||||||
let Ok(block) = block_source.collect_block(next_block_number).await else {
|
match block_source.collect_block(next_block_number).await {
|
||||||
tokio::time::sleep(Self::POLL_INTERVAL).await;
|
Ok(block) => {
|
||||||
continue;
|
block_tx_clone.send((next_block_number, block)).await?;
|
||||||
};
|
next_block_number += 1;
|
||||||
block_tx_clone.send((next_block_number, block)).await?;
|
}
|
||||||
next_block_number += 1;
|
Err(_) => tokio::time::sleep(polling_interval).await,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,7 +98,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
|
|||||||
match Pin::new(&mut self.block_rx).poll_recv(_cx) {
|
match Pin::new(&mut self.block_rx).poll_recv(_cx) {
|
||||||
Poll::Ready(Some((number, block))) => {
|
Poll::Ready(Some((number, block))) => {
|
||||||
debug!("Polled block: {}", number);
|
debug!("Polled block: {}", number);
|
||||||
let reth_block = block.to_reth_block();
|
let reth_block = block.to_reth_block(self.chain_id);
|
||||||
let hash = reth_block.header.hash_slow();
|
let hash = reth_block.header.hash_slow();
|
||||||
self.blockhash_cache.write().insert(hash, number);
|
self.blockhash_cache.write().insert(hash, number);
|
||||||
let td = U128::from(reth_block.header.difficulty);
|
let td = U128::from(reth_block.header.difficulty);
|
||||||
@ -109,8 +109,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
|
|||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
Poll::Ready(None) => Poll::Pending,
|
Poll::Ready(None) | Poll::Pending => Poll::Pending,
|
||||||
Poll::Pending => Poll::Pending,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,18 +154,18 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
block_numbers: impl IntoIterator<Item = u64>,
|
block_numbers: impl IntoIterator<Item = u64>,
|
||||||
) -> Vec<BlockAndReceipts> {
|
) -> Vec<BlockAndReceipts> {
|
||||||
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
|
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
|
||||||
let blocks = futures::stream::iter(block_numbers)
|
futures::stream::iter(block_numbers)
|
||||||
.map(async |number| self.collect_block(number).await.unwrap())
|
.map(async |number| self.collect_block(number).await.unwrap())
|
||||||
.buffered(self.block_source.recommended_chunk_size() as usize)
|
.buffered(self.block_source.recommended_chunk_size() as usize)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.await;
|
.await
|
||||||
blocks
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn process_eth_request(
|
pub async fn process_eth_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
eth_req: IncomingEthRequest<HlNetworkPrimitives>,
|
eth_req: IncomingEthRequest<HlNetworkPrimitives>,
|
||||||
) -> eyre::Result<()> {
|
) -> eyre::Result<()> {
|
||||||
|
let chain_id = self.chain_spec.inner.chain().id();
|
||||||
match eth_req {
|
match eth_req {
|
||||||
IncomingEthRequest::GetBlockHeaders {
|
IncomingEthRequest::GetBlockHeaders {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -176,7 +175,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
debug!(
|
debug!(
|
||||||
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
|
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
let number = match start_block {
|
let number = match start_block {
|
||||||
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
|
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
|
||||||
HashOrNumber::Number(number) => number,
|
HashOrNumber::Number(number) => number,
|
||||||
@ -189,7 +187,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|block| block.to_reth_block().header.clone())
|
.map(|block| block.to_reth_block(chain_id).header.clone())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let _ = response.send(Ok(BlockHeaders(block_headers)));
|
let _ = response.send(Ok(BlockHeaders(block_headers)));
|
||||||
@ -207,17 +205,13 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
.collect_blocks(numbers)
|
.collect_blocks(numbers)
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|block| block.to_reth_block().body)
|
.map(|block| block.to_reth_block(chain_id).body)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let _ = response.send(Ok(BlockBodies(block_bodies)));
|
let _ = response.send(Ok(BlockBodies(block_bodies)));
|
||||||
}
|
}
|
||||||
IncomingEthRequest::GetNodeData { .. } => {
|
IncomingEthRequest::GetNodeData { .. } => debug!("GetNodeData request: {eth_req:?}"),
|
||||||
debug!("GetNodeData request: {eth_req:?}");
|
eth_req => debug!("New eth protocol request: {eth_req:?}"),
|
||||||
}
|
|
||||||
eth_req => {
|
|
||||||
debug!("New eth protocol request: {eth_req:?}");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -248,7 +242,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
|
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
|
||||||
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
|
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
|
||||||
// official RPC. The latter is much easier but has 300/day rate limit.
|
// official RPC. The latter is much easier but has 300/day rate limit.
|
||||||
|
|
||||||
use jsonrpsee::http_client::HttpClientBuilder;
|
use jsonrpsee::http_client::HttpClientBuilder;
|
||||||
use jsonrpsee_core::client::ClientT;
|
use jsonrpsee_core::client::ClientT;
|
||||||
|
|
||||||
@ -256,7 +249,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
let client =
|
let client =
|
||||||
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
|
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
|
||||||
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
|
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
|
||||||
|
|
||||||
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
|
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
|
||||||
self.cache_blocks([(hash, target_block.header.number)]);
|
self.cache_blocks([(hash, target_block.header.number)]);
|
||||||
Ok(target_block.header.number)
|
Ok(target_block.header.number)
|
||||||
@ -269,9 +261,10 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
|
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
|
||||||
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
|
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
|
||||||
}
|
}
|
||||||
return Some(block_number);
|
Some(block_number)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Backfill the cache with blocks to find the target hash
|
/// Backfill the cache with blocks to find the target hash
|
||||||
@ -316,10 +309,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
|
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
|
||||||
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
|
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
|
||||||
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
|
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
|
||||||
|
{
|
||||||
self.if_hit_then_warm_around.lock().unwrap().insert(start);
|
let mut guard = self.if_hit_then_warm_around.lock().unwrap();
|
||||||
self.if_hit_then_warm_around.lock().unwrap().insert(end);
|
guard.insert(start);
|
||||||
|
guard.insert(end);
|
||||||
|
}
|
||||||
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
|
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
|
||||||
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
|
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
|
||||||
}
|
}
|
||||||
@ -345,15 +339,12 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug!("Backfilling from {start_number} to {end_number}");
|
debug!("Backfilling from {start_number} to {end_number}");
|
||||||
|
|
||||||
// Collect blocks and cache them
|
// Collect blocks and cache them
|
||||||
let blocks = self.collect_blocks(uncached_block_numbers).await;
|
let blocks = self.collect_blocks(uncached_block_numbers).await;
|
||||||
let block_map: HashMap<B256, u64> =
|
let block_map: HashMap<B256, u64> =
|
||||||
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
|
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
|
||||||
|
|
||||||
let maybe_block_number = block_map.get(&target_hash).copied();
|
let maybe_block_number = block_map.get(&target_hash).copied();
|
||||||
self.cache_blocks(block_map);
|
self.cache_blocks(block_map);
|
||||||
|
|
||||||
Ok(maybe_block_number)
|
Ok(maybe_block_number)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
48
src/pseudo_peer/sources/cached.rs
Normal file
48
src/pseudo_peer/sources/cached.rs
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
use super::{BlockSource, BlockSourceBoxed};
|
||||||
|
use crate::node::types::BlockAndReceipts;
|
||||||
|
use futures::{future::BoxFuture, FutureExt};
|
||||||
|
use reth_network::cache::LruMap;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
/// Block source wrapper that caches blocks in memory
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CachedBlockSource {
|
||||||
|
block_source: BlockSourceBoxed,
|
||||||
|
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedBlockSource {
|
||||||
|
const CACHE_LIMIT: u32 = 100000;
|
||||||
|
|
||||||
|
pub fn new(block_source: BlockSourceBoxed) -> Self {
|
||||||
|
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSource for CachedBlockSource {
|
||||||
|
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||||
|
let block_source = self.block_source.clone();
|
||||||
|
let cache = self.cache.clone();
|
||||||
|
async move {
|
||||||
|
if let Some(block) = cache.write().unwrap().get(&height) {
|
||||||
|
return Ok(block.clone());
|
||||||
|
}
|
||||||
|
let block = block_source.collect_block(height).await?;
|
||||||
|
cache.write().unwrap().insert(height, block.clone());
|
||||||
|
Ok(block)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||||
|
self.block_source.find_latest_block_number()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recommended_chunk_size(&self) -> u64 {
|
||||||
|
self.block_source.recommended_chunk_size()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn polling_interval(&self) -> std::time::Duration {
|
||||||
|
self.block_source.polling_interval()
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,634 +0,0 @@
|
|||||||
use super::{BlockSource, BlockSourceBoxed};
|
|
||||||
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use rangemap::RangeInclusiveMap;
|
|
||||||
use reth_network::cache::LruMap;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{
|
|
||||||
fs::File,
|
|
||||||
io::{BufRead, BufReader, Read, Seek, SeekFrom},
|
|
||||||
ops::RangeInclusive,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
use time::{macros::format_description, Date, Duration, OffsetDateTime, Time};
|
|
||||||
use tokio::sync::Mutex;
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
const TAIL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(25);
|
|
||||||
const HOURLY_SUBDIR: &str = "hourly";
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct LocalBlocksCache {
|
|
||||||
cache: LruMap<u64, BlockAndReceipts>,
|
|
||||||
// Lightweight range map to track the ranges of blocks in the local ingest directory
|
|
||||||
ranges: RangeInclusiveMap<u64, PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalBlocksCache {
|
|
||||||
// 3660 blocks per hour
|
|
||||||
const CACHE_SIZE: u32 = 8000;
|
|
||||||
|
|
||||||
fn new() -> Self {
|
|
||||||
Self { cache: LruMap::new(Self::CACHE_SIZE), ranges: RangeInclusiveMap::new() }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_scan_result(&mut self, scan_result: ScanResult) {
|
|
||||||
for blk in scan_result.new_blocks {
|
|
||||||
let EvmBlock::Reth115(b) = &blk.block;
|
|
||||||
self.cache.insert(b.header.header.number, blk);
|
|
||||||
}
|
|
||||||
for range in scan_result.new_block_ranges {
|
|
||||||
self.ranges.insert(range, scan_result.path.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
struct LocalBlockAndReceipts(String, BlockAndReceipts);
|
|
||||||
|
|
||||||
struct ScanResult {
|
|
||||||
path: PathBuf,
|
|
||||||
next_expected_height: u64,
|
|
||||||
new_blocks: Vec<BlockAndReceipts>,
|
|
||||||
new_block_ranges: Vec<RangeInclusive<u64>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ScanOptions {
|
|
||||||
start_height: u64,
|
|
||||||
only_load_ranges: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
|
|
||||||
let LocalBlockAndReceipts(_block_timestamp, parsed_block): LocalBlockAndReceipts =
|
|
||||||
serde_json::from_str(line)?;
|
|
||||||
let height = match &parsed_block.block {
|
|
||||||
EvmBlock::Reth115(b) => b.header.header.number,
|
|
||||||
};
|
|
||||||
Ok((parsed_block, height))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
|
|
||||||
let file = File::open(path).expect("Failed to open hour file path");
|
|
||||||
let reader = BufReader::new(file);
|
|
||||||
|
|
||||||
let ScanOptions { start_height, only_load_ranges } = options;
|
|
||||||
|
|
||||||
let mut new_blocks = Vec::new();
|
|
||||||
let mut last_height = start_height;
|
|
||||||
let lines: Vec<String> = reader.lines().collect::<Result<_, _>>().unwrap();
|
|
||||||
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
|
|
||||||
|
|
||||||
let mut block_ranges = Vec::new();
|
|
||||||
let mut current_range: Option<(u64, u64)> = None;
|
|
||||||
|
|
||||||
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
|
|
||||||
if line_idx < *last_line || line.trim().is_empty() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
match line_to_evm_block(line) {
|
|
||||||
Ok((parsed_block, height)) => {
|
|
||||||
if height >= start_height {
|
|
||||||
last_height = last_height.max(height);
|
|
||||||
if !only_load_ranges {
|
|
||||||
new_blocks.push(parsed_block);
|
|
||||||
}
|
|
||||||
*last_line = line_idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
match current_range {
|
|
||||||
Some((start, end)) if end + 1 == height => {
|
|
||||||
current_range = Some((start, height));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
if let Some((start, end)) = current_range.take() {
|
|
||||||
block_ranges.push(start..=end);
|
|
||||||
}
|
|
||||||
current_range = Some((height, height));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some((start, end)) = current_range {
|
|
||||||
block_ranges.push(start..=end);
|
|
||||||
}
|
|
||||||
|
|
||||||
ScanResult {
|
|
||||||
path: path.to_path_buf(),
|
|
||||||
next_expected_height: last_height + 1,
|
|
||||||
new_blocks,
|
|
||||||
new_block_ranges: block_ranges,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn date_from_datetime(dt: OffsetDateTime) -> String {
|
|
||||||
dt.format(&format_description!("[year][month][day]")).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Block source that monitors the local ingest directory for the HL node.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct HlNodeBlockSource {
|
|
||||||
pub fallback: BlockSourceBoxed,
|
|
||||||
pub local_ingest_dir: PathBuf,
|
|
||||||
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, // height → block
|
|
||||||
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>, // for rate limiting requests to fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSource for HlNodeBlockSource {
|
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let now = OffsetDateTime::now_utc();
|
|
||||||
|
|
||||||
if let Some(block) = self.try_collect_local_block(height).await {
|
|
||||||
self.update_last_fetch(height, now).await;
|
|
||||||
return Ok(block);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some((last_height, last_poll_time)) = *self.last_local_fetch.lock().await {
|
|
||||||
let more_recent = last_height < height;
|
|
||||||
let too_soon = now - last_poll_time < Self::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK;
|
|
||||||
if more_recent && too_soon {
|
|
||||||
return Err(eyre::eyre!(
|
|
||||||
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let block = self.fallback.collect_block(height).await?;
|
|
||||||
self.update_last_fetch(height, now).await;
|
|
||||||
Ok(block)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let Some(dir) = Self::find_latest_hourly_file(&self.local_ingest_dir) else {
|
|
||||||
warn!(
|
|
||||||
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
|
|
||||||
self.local_ingest_dir
|
|
||||||
);
|
|
||||||
return self.fallback.find_latest_block_number().await;
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut file = File::open(&dir).expect("Failed to open hour file path");
|
|
||||||
if let Some((_, height)) = read_last_complete_line(&mut file) {
|
|
||||||
info!("Latest block number: {} with path {}", height, dir.display());
|
|
||||||
Some(height)
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
|
|
||||||
file
|
|
||||||
);
|
|
||||||
self.fallback.find_latest_block_number().await
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recommended_chunk_size(&self) -> u64 {
|
|
||||||
self.fallback.recommended_chunk_size()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
|
|
||||||
const CHUNK_SIZE: u64 = 50000;
|
|
||||||
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
|
|
||||||
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
|
|
||||||
let mut last_line = Vec::new();
|
|
||||||
|
|
||||||
while pos > 0 {
|
|
||||||
let read_size = std::cmp::min(pos, CHUNK_SIZE);
|
|
||||||
buf.resize(read_size as usize, 0);
|
|
||||||
|
|
||||||
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
|
|
||||||
read.read_exact(&mut buf).unwrap();
|
|
||||||
|
|
||||||
last_line = [buf.clone(), last_line].concat();
|
|
||||||
|
|
||||||
if last_line.ends_with(b"\n") {
|
|
||||||
last_line.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
|
|
||||||
let candidate = &last_line[idx + 1..];
|
|
||||||
if let Ok((evm_block, height)) =
|
|
||||||
line_to_evm_block(str::from_utf8(candidate).unwrap())
|
|
||||||
{
|
|
||||||
return Some((evm_block, height));
|
|
||||||
}
|
|
||||||
// Incomplete line; truncate and continue
|
|
||||||
last_line.truncate(idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
if pos < read_size {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
pos -= read_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HlNodeBlockSource {
|
|
||||||
/// [HlNodeBlockSource] picks the faster one between local ingest directory and s3/ingest-dir.
|
|
||||||
/// But if we immediately fallback to s3/ingest-dir, in case of S3, it may cause unnecessary
|
|
||||||
/// requests to S3 while it'll return 404.
|
|
||||||
///
|
|
||||||
/// To avoid unnecessary fallback, we set a short threshold period.
|
|
||||||
/// This threshold is several times longer than the expected block time, reducing redundant fallback attempts.
|
|
||||||
pub(crate) const MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK: Duration = Duration::milliseconds(5000);
|
|
||||||
|
|
||||||
async fn update_last_fetch(&self, height: u64, now: OffsetDateTime) {
|
|
||||||
let mut last_fetch = self.last_local_fetch.lock().await;
|
|
||||||
if let Some((last_height, _)) = *last_fetch {
|
|
||||||
if last_height >= height {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*last_fetch = Some((height, now));
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_collect_local_block(&self, height: u64) -> Option<BlockAndReceipts> {
|
|
||||||
let mut u_cache = self.local_blocks_cache.lock().await;
|
|
||||||
if let Some(block) = u_cache.cache.remove(&height) {
|
|
||||||
return Some(block);
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = u_cache.ranges.get(&height).cloned()?;
|
|
||||||
|
|
||||||
info!("Loading block data from {:?}", path);
|
|
||||||
u_cache.load_scan_result(scan_hour_file(
|
|
||||||
&path,
|
|
||||||
&mut 0,
|
|
||||||
ScanOptions { start_height: 0, only_load_ranges: false },
|
|
||||||
));
|
|
||||||
u_cache.cache.get(&height).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
|
|
||||||
let dt_part = path.parent()?.file_name()?.to_str()?;
|
|
||||||
let hour_part = path.file_name()?.to_str()?;
|
|
||||||
|
|
||||||
let hour: u8 = hour_part.parse().ok()?;
|
|
||||||
Some(OffsetDateTime::new_utc(
|
|
||||||
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
|
|
||||||
Time::from_hms(hour, 0, 0).ok()?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
|
|
||||||
let dir = root.join(HOURLY_SUBDIR);
|
|
||||||
let mut files = Vec::new();
|
|
||||||
|
|
||||||
for entry in std::fs::read_dir(dir).ok()? {
|
|
||||||
let file = entry.ok()?.path();
|
|
||||||
let subfiles: Vec<_> = std::fs::read_dir(&file)
|
|
||||||
.ok()?
|
|
||||||
.filter_map(|f| f.ok().map(|f| f.path()))
|
|
||||||
.filter(|p| Self::datetime_from_path(p).is_some())
|
|
||||||
.collect();
|
|
||||||
files.extend(subfiles);
|
|
||||||
}
|
|
||||||
|
|
||||||
files.sort();
|
|
||||||
Some(files)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
|
|
||||||
Self::all_hourly_files(root)?.last().cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_backfill_local_blocks(
|
|
||||||
root: &Path,
|
|
||||||
cache: &Arc<Mutex<LocalBlocksCache>>,
|
|
||||||
cutoff_height: u64,
|
|
||||||
) -> eyre::Result<()> {
|
|
||||||
let mut u_cache = cache.lock().await;
|
|
||||||
|
|
||||||
for subfile in Self::all_hourly_files(root).unwrap_or_default() {
|
|
||||||
let mut file = File::open(&subfile).expect("Failed to open hour file path");
|
|
||||||
|
|
||||||
if let Some((_, height)) = read_last_complete_line(&mut file) {
|
|
||||||
if height < cutoff_height {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("Failed to parse last line of file, fallback to slow path: {:?}", subfile);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut scan_result = scan_hour_file(
|
|
||||||
&subfile,
|
|
||||||
&mut 0,
|
|
||||||
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
|
|
||||||
);
|
|
||||||
// Only store the block ranges for now; actual block data will be loaded lazily later to optimize memory usage
|
|
||||||
scan_result.new_blocks.clear();
|
|
||||||
u_cache.load_scan_result(scan_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
if u_cache.ranges.is_empty() {
|
|
||||||
warn!("No ranges found in {:?}", root);
|
|
||||||
} else {
|
|
||||||
let (min, _) = u_cache.ranges.first_range_value().unwrap();
|
|
||||||
let (max, _) = u_cache.ranges.last_range_value().unwrap();
|
|
||||||
info!(
|
|
||||||
"Populated {} ranges (min: {}, max: {})",
|
|
||||||
u_cache.ranges.len(),
|
|
||||||
min.start(),
|
|
||||||
max.end()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn start_local_ingest_loop(&self, current_head: u64) {
|
|
||||||
let root = self.local_ingest_dir.to_owned();
|
|
||||||
let cache = self.local_blocks_cache.clone();
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut next_height = current_head;
|
|
||||||
|
|
||||||
// Wait for the first hourly file to be created
|
|
||||||
let mut dt = loop {
|
|
||||||
if let Some(latest_file) = Self::find_latest_hourly_file(&root) {
|
|
||||||
break Self::datetime_from_path(&latest_file).unwrap();
|
|
||||||
}
|
|
||||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut hour = dt.hour();
|
|
||||||
let mut day_str = date_from_datetime(dt);
|
|
||||||
let mut last_line = 0;
|
|
||||||
|
|
||||||
info!("Starting local ingest loop from height: {:?}", current_head);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
|
||||||
|
|
||||||
if hour_file.exists() {
|
|
||||||
let scan_result = scan_hour_file(
|
|
||||||
&hour_file,
|
|
||||||
&mut last_line,
|
|
||||||
ScanOptions { start_height: next_height, only_load_ranges: false },
|
|
||||||
);
|
|
||||||
next_height = scan_result.next_expected_height;
|
|
||||||
|
|
||||||
let mut u_cache = cache.lock().await;
|
|
||||||
u_cache.load_scan_result(scan_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
let now = OffsetDateTime::now_utc();
|
|
||||||
|
|
||||||
if dt + Duration::HOUR < now {
|
|
||||||
dt += Duration::HOUR;
|
|
||||||
hour = dt.hour();
|
|
||||||
day_str = date_from_datetime(dt);
|
|
||||||
last_line = 0;
|
|
||||||
info!(
|
|
||||||
"Moving to a new file. {:?}",
|
|
||||||
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
|
|
||||||
let _ = Self::try_backfill_local_blocks(
|
|
||||||
&self.local_ingest_dir,
|
|
||||||
&self.local_blocks_cache,
|
|
||||||
next_block_number,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
self.start_local_ingest_loop(next_block_number).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new(
|
|
||||||
fallback: BlockSourceBoxed,
|
|
||||||
local_ingest_dir: PathBuf,
|
|
||||||
next_block_number: u64,
|
|
||||||
) -> Self {
|
|
||||||
let block_source = HlNodeBlockSource {
|
|
||||||
fallback,
|
|
||||||
local_ingest_dir,
|
|
||||||
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new())),
|
|
||||||
last_local_fetch: Arc::new(Mutex::new(None)),
|
|
||||||
};
|
|
||||||
block_source.run(next_block_number).await.unwrap();
|
|
||||||
block_source
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::node::types::reth_compat;
|
|
||||||
use crate::node::types::ReadPrecompileCalls;
|
|
||||||
use crate::pseudo_peer::sources::LocalBlockSource;
|
|
||||||
use alloy_consensus::{BlockBody, Header};
|
|
||||||
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
|
|
||||||
use std::io::Write;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_datetime_from_path() {
|
|
||||||
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
|
|
||||||
let dt = HlNodeBlockSource::datetime_from_path(path).unwrap();
|
|
||||||
println!("{dt:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_backfill() {
|
|
||||||
let test_path = Path::new("/root/evm_block_and_receipts");
|
|
||||||
if !test_path.exists() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let cache = Arc::new(Mutex::new(LocalBlocksCache::new()));
|
|
||||||
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
|
|
||||||
|
|
||||||
let u_cache = cache.lock().await;
|
|
||||||
println!("{:?}", u_cache.ranges);
|
|
||||||
assert_eq!(
|
|
||||||
u_cache.ranges.get(&9735058),
|
|
||||||
Some(&test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn scan_result_from_single_block(block: BlockAndReceipts) -> ScanResult {
|
|
||||||
let height = match &block.block {
|
|
||||||
EvmBlock::Reth115(b) => b.header.header.number,
|
|
||||||
};
|
|
||||||
ScanResult {
|
|
||||||
path: PathBuf::from("/nonexistent-block"),
|
|
||||||
next_expected_height: height + 1,
|
|
||||||
new_blocks: vec![block],
|
|
||||||
new_block_ranges: vec![height..=height],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn empty_block(
|
|
||||||
number: u64,
|
|
||||||
timestamp: u64,
|
|
||||||
extra_data: &'static [u8],
|
|
||||||
) -> LocalBlockAndReceipts {
|
|
||||||
let extra_data = Bytes::from_static(extra_data);
|
|
||||||
let res = BlockAndReceipts {
|
|
||||||
block: EvmBlock::Reth115(reth_compat::SealedBlock {
|
|
||||||
header: reth_compat::SealedHeader {
|
|
||||||
header: Header {
|
|
||||||
parent_hash: B256::ZERO,
|
|
||||||
ommers_hash: B256::ZERO,
|
|
||||||
beneficiary: Address::ZERO,
|
|
||||||
state_root: B256::ZERO,
|
|
||||||
transactions_root: B256::ZERO,
|
|
||||||
receipts_root: B256::ZERO,
|
|
||||||
logs_bloom: Bloom::ZERO,
|
|
||||||
difficulty: U256::ZERO,
|
|
||||||
number,
|
|
||||||
gas_limit: 0,
|
|
||||||
gas_used: 0,
|
|
||||||
timestamp,
|
|
||||||
extra_data,
|
|
||||||
mix_hash: B256::ZERO,
|
|
||||||
nonce: B64::ZERO,
|
|
||||||
base_fee_per_gas: None,
|
|
||||||
withdrawals_root: None,
|
|
||||||
blob_gas_used: None,
|
|
||||||
excess_blob_gas: None,
|
|
||||||
parent_beacon_block_root: None,
|
|
||||||
requests_hash: None,
|
|
||||||
},
|
|
||||||
hash: B256::ZERO,
|
|
||||||
},
|
|
||||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
|
||||||
}),
|
|
||||||
receipts: vec![],
|
|
||||||
system_txs: vec![],
|
|
||||||
read_precompile_calls: ReadPrecompileCalls(vec![]),
|
|
||||||
highest_precompile_address: None,
|
|
||||||
};
|
|
||||||
LocalBlockAndReceipts(timestamp.to_string(), res)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, File)> {
|
|
||||||
let now = OffsetDateTime::now_utc();
|
|
||||||
let day_str = date_from_datetime(now);
|
|
||||||
let hour = now.hour();
|
|
||||||
|
|
||||||
let temp_dir = tempfile::tempdir()?;
|
|
||||||
let path = temp_dir.path().join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
|
||||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
|
||||||
|
|
||||||
Ok((temp_dir, File::create(path)?))
|
|
||||||
}
|
|
||||||
|
|
||||||
struct BlockSourceHierarchy {
|
|
||||||
block_source: HlNodeBlockSource,
|
|
||||||
_temp_dir: tempfile::TempDir,
|
|
||||||
file1: File,
|
|
||||||
current_block: LocalBlockAndReceipts,
|
|
||||||
future_block_hl_node: LocalBlockAndReceipts,
|
|
||||||
future_block_fallback: LocalBlockAndReceipts,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
|
|
||||||
// Setup fallback block source
|
|
||||||
let block_source_fallback = HlNodeBlockSource::new(
|
|
||||||
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
|
|
||||||
PathBuf::from("/nonexistent"),
|
|
||||||
1000000,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
|
|
||||||
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
|
|
||||||
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
|
|
||||||
|
|
||||||
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
|
|
||||||
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
|
|
||||||
|
|
||||||
let block_source = HlNodeBlockSource::new(
|
|
||||||
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
|
|
||||||
temp_dir1.path().to_path_buf(),
|
|
||||||
1000000,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
block_source_fallback
|
|
||||||
.local_blocks_cache
|
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
|
|
||||||
|
|
||||||
Ok(BlockSourceHierarchy {
|
|
||||||
block_source,
|
|
||||||
_temp_dir: temp_dir1,
|
|
||||||
file1,
|
|
||||||
current_block: block_hl_node_0,
|
|
||||||
future_block_hl_node: block_hl_node_1,
|
|
||||||
future_block_fallback: block_fallback_1,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
|
|
||||||
let hierarchy = setup_block_source_hierarchy().await?;
|
|
||||||
let BlockSourceHierarchy {
|
|
||||||
block_source,
|
|
||||||
current_block,
|
|
||||||
future_block_hl_node,
|
|
||||||
mut file1,
|
|
||||||
..
|
|
||||||
} = hierarchy;
|
|
||||||
|
|
||||||
let block = block_source.collect_block(1000000).await.unwrap();
|
|
||||||
assert_eq!(block, current_block.1);
|
|
||||||
|
|
||||||
let block = block_source.collect_block(1000001).await;
|
|
||||||
assert!(block.is_err());
|
|
||||||
|
|
||||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
|
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
||||||
|
|
||||||
let block = block_source.collect_block(1000001).await.unwrap();
|
|
||||||
assert_eq!(block, future_block_hl_node.1);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
|
|
||||||
let hierarchy = setup_block_source_hierarchy().await?;
|
|
||||||
let BlockSourceHierarchy {
|
|
||||||
block_source,
|
|
||||||
current_block,
|
|
||||||
future_block_fallback,
|
|
||||||
mut file1,
|
|
||||||
..
|
|
||||||
} = hierarchy;
|
|
||||||
|
|
||||||
let block = block_source.collect_block(1000000).await.unwrap();
|
|
||||||
assert_eq!(block, current_block.1);
|
|
||||||
|
|
||||||
tokio::time::sleep(HlNodeBlockSource::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK.unsigned_abs())
|
|
||||||
.await;
|
|
||||||
|
|
||||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
|
|
||||||
let block = block_source.collect_block(1000001).await.unwrap();
|
|
||||||
assert_eq!(block, future_block_fallback.1);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
51
src/pseudo_peer/sources/hl_node/cache.rs
Normal file
51
src/pseudo_peer/sources/hl_node/cache.rs
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
use super::scan::ScanResult;
|
||||||
|
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
||||||
|
use rangemap::RangeInclusiveMap;
|
||||||
|
use reth_network::cache::LruMap;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct LocalBlocksCache {
|
||||||
|
cache: LruMap<u64, BlockAndReceipts>,
|
||||||
|
ranges: RangeInclusiveMap<u64, PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LocalBlocksCache {
|
||||||
|
pub fn new(cache_size: u32) -> Self {
|
||||||
|
Self { cache: LruMap::new(cache_size), ranges: RangeInclusiveMap::new() }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_scan_result(&mut self, scan_result: ScanResult) {
|
||||||
|
for blk in scan_result.new_blocks {
|
||||||
|
let EvmBlock::Reth115(b) = &blk.block;
|
||||||
|
self.cache.insert(b.header.header.number, blk);
|
||||||
|
}
|
||||||
|
for range in scan_result.new_block_ranges {
|
||||||
|
self.ranges.insert(range, scan_result.path.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
|
||||||
|
self.cache.remove(&height)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {
|
||||||
|
self.ranges.get(&height).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn log_range_summary(&self, root: &Path) {
|
||||||
|
if self.ranges.is_empty() {
|
||||||
|
warn!("No ranges found in {:?}", root);
|
||||||
|
} else {
|
||||||
|
let (min, max) =
|
||||||
|
(self.ranges.first_range_value().unwrap(), self.ranges.last_range_value().unwrap());
|
||||||
|
info!(
|
||||||
|
"Populated {} ranges (min: {}, max: {})",
|
||||||
|
self.ranges.len(),
|
||||||
|
min.0.start(),
|
||||||
|
max.0.end()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
67
src/pseudo_peer/sources/hl_node/file_ops.rs
Normal file
67
src/pseudo_peer/sources/hl_node/file_ops.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
use super::{scan::Scanner, time_utils::TimeUtils, HOURLY_SUBDIR};
|
||||||
|
use crate::node::types::BlockAndReceipts;
|
||||||
|
use std::{
|
||||||
|
fs::File,
|
||||||
|
io::{Read, Seek, SeekFrom},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct FileOperations;
|
||||||
|
|
||||||
|
impl FileOperations {
|
||||||
|
pub fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
|
||||||
|
let mut files = Vec::new();
|
||||||
|
for entry in std::fs::read_dir(root.join(HOURLY_SUBDIR)).ok()? {
|
||||||
|
let dir = entry.ok()?.path();
|
||||||
|
if let Ok(subentries) = std::fs::read_dir(&dir) {
|
||||||
|
files.extend(
|
||||||
|
subentries
|
||||||
|
.filter_map(|f| f.ok().map(|f| f.path()))
|
||||||
|
.filter(|p| TimeUtils::datetime_from_path(p).is_some()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
files.sort();
|
||||||
|
Some(files)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
|
||||||
|
Self::all_hourly_files(root)?.into_iter().last()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_last_block_from_file(path: &Path) -> Option<(BlockAndReceipts, u64)> {
|
||||||
|
let mut file = File::open(path).ok()?;
|
||||||
|
Self::read_last_complete_line(&mut file)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
|
||||||
|
const CHUNK_SIZE: u64 = 50000;
|
||||||
|
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
|
||||||
|
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
|
||||||
|
let mut last_line = Vec::new();
|
||||||
|
|
||||||
|
while pos > 0 {
|
||||||
|
let read_size = pos.min(CHUNK_SIZE);
|
||||||
|
buf.resize(read_size as usize, 0);
|
||||||
|
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
|
||||||
|
read.read_exact(&mut buf).unwrap();
|
||||||
|
last_line = [buf.clone(), last_line].concat();
|
||||||
|
if last_line.ends_with(b"\n") {
|
||||||
|
last_line.pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
|
||||||
|
let candidate = &last_line[idx + 1..];
|
||||||
|
if let Ok(result) = Scanner::line_to_evm_block(str::from_utf8(candidate).unwrap()) {
|
||||||
|
return Some(result);
|
||||||
|
}
|
||||||
|
last_line.truncate(idx);
|
||||||
|
}
|
||||||
|
if pos < read_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
pos -= read_size;
|
||||||
|
}
|
||||||
|
Scanner::line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
231
src/pseudo_peer/sources/hl_node/mod.rs
Normal file
231
src/pseudo_peer/sources/hl_node/mod.rs
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
mod cache;
|
||||||
|
mod file_ops;
|
||||||
|
mod scan;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
mod time_utils;
|
||||||
|
|
||||||
|
use self::{
|
||||||
|
cache::LocalBlocksCache,
|
||||||
|
file_ops::FileOperations,
|
||||||
|
scan::{ScanOptions, Scanner},
|
||||||
|
time_utils::TimeUtils,
|
||||||
|
};
|
||||||
|
use super::{BlockSource, BlockSourceBoxed};
|
||||||
|
use crate::node::types::BlockAndReceipts;
|
||||||
|
use futures::future::BoxFuture;
|
||||||
|
use std::{
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Arc,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
const HOURLY_SUBDIR: &str = "hourly";
|
||||||
|
const CACHE_SIZE: u32 = 8000; // 3660 blocks per hour
|
||||||
|
const ONE_HOUR: Duration = Duration::from_secs(60 * 60);
|
||||||
|
const TAIL_INTERVAL: Duration = Duration::from_millis(25);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct HlNodeBlockSourceArgs {
|
||||||
|
pub root: PathBuf,
|
||||||
|
pub fallback_threshold: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Block source that monitors the local ingest directory for the HL node.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct HlNodeBlockSource {
|
||||||
|
pub fallback: BlockSourceBoxed,
|
||||||
|
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
|
||||||
|
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
|
||||||
|
pub args: HlNodeBlockSourceArgs,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSource for HlNodeBlockSource {
|
||||||
|
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||||
|
let fallback = self.fallback.clone();
|
||||||
|
let args = self.args.clone();
|
||||||
|
let local_blocks_cache = self.local_blocks_cache.clone();
|
||||||
|
let last_local_fetch = self.last_local_fetch.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let now = OffsetDateTime::now_utc();
|
||||||
|
|
||||||
|
if let Some(block) = Self::try_collect_local_block(local_blocks_cache, height).await {
|
||||||
|
Self::update_last_fetch(last_local_fetch, height, now).await;
|
||||||
|
return Ok(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some((last_height, last_poll_time)) = *last_local_fetch.lock().await {
|
||||||
|
let more_recent = last_height < height;
|
||||||
|
let too_soon = now - last_poll_time < args.fallback_threshold;
|
||||||
|
if more_recent && too_soon {
|
||||||
|
return Err(eyre::eyre!(
|
||||||
|
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let block = fallback.collect_block(height).await?;
|
||||||
|
Self::update_last_fetch(last_local_fetch, height, now).await;
|
||||||
|
Ok(block)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||||
|
let fallback = self.fallback.clone();
|
||||||
|
let args = self.args.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let Some(dir) = FileOperations::find_latest_hourly_file(&args.root) else {
|
||||||
|
warn!(
|
||||||
|
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
|
||||||
|
args.root
|
||||||
|
);
|
||||||
|
return fallback.find_latest_block_number().await;
|
||||||
|
};
|
||||||
|
|
||||||
|
match FileOperations::read_last_block_from_file(&dir) {
|
||||||
|
Some((_, height)) => {
|
||||||
|
info!("Latest block number: {} with path {}", height, dir.display());
|
||||||
|
Some(height)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
warn!(
|
||||||
|
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
|
||||||
|
dir
|
||||||
|
);
|
||||||
|
fallback.find_latest_block_number().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recommended_chunk_size(&self) -> u64 {
|
||||||
|
self.fallback.recommended_chunk_size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HlNodeBlockSource {
|
||||||
|
async fn update_last_fetch(
|
||||||
|
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
|
||||||
|
height: u64,
|
||||||
|
now: OffsetDateTime,
|
||||||
|
) {
|
||||||
|
let mut last_fetch = last_local_fetch.lock().await;
|
||||||
|
if last_fetch.is_none_or(|(h, _)| h < height) {
|
||||||
|
*last_fetch = Some((height, now));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_collect_local_block(
|
||||||
|
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
|
||||||
|
height: u64,
|
||||||
|
) -> Option<BlockAndReceipts> {
|
||||||
|
let mut u_cache = local_blocks_cache.lock().await;
|
||||||
|
if let Some(block) = u_cache.get_block(height) {
|
||||||
|
return Some(block);
|
||||||
|
}
|
||||||
|
let path = u_cache.get_path_for_height(height)?;
|
||||||
|
info!("Loading block data from {:?}", path);
|
||||||
|
let scan_result = Scanner::scan_hour_file(
|
||||||
|
&path,
|
||||||
|
&mut 0,
|
||||||
|
ScanOptions { start_height: 0, only_load_ranges: false },
|
||||||
|
);
|
||||||
|
u_cache.load_scan_result(scan_result);
|
||||||
|
u_cache.get_block(height)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_backfill_local_blocks(
|
||||||
|
root: &Path,
|
||||||
|
cache: &Arc<Mutex<LocalBlocksCache>>,
|
||||||
|
cutoff_height: u64,
|
||||||
|
) -> eyre::Result<()> {
|
||||||
|
let mut u_cache = cache.lock().await;
|
||||||
|
for subfile in FileOperations::all_hourly_files(root).unwrap_or_default() {
|
||||||
|
if let Some((_, height)) = FileOperations::read_last_block_from_file(&subfile) {
|
||||||
|
if height < cutoff_height {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Failed to parse last line of file: {:?}", subfile);
|
||||||
|
}
|
||||||
|
let mut scan_result = Scanner::scan_hour_file(
|
||||||
|
&subfile,
|
||||||
|
&mut 0,
|
||||||
|
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
|
||||||
|
);
|
||||||
|
scan_result.new_blocks.clear(); // Only store ranges, load data lazily
|
||||||
|
u_cache.load_scan_result(scan_result);
|
||||||
|
}
|
||||||
|
u_cache.log_range_summary(root);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_local_ingest_loop(&self, current_head: u64) {
|
||||||
|
let root = self.args.root.to_owned();
|
||||||
|
let cache = self.local_blocks_cache.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut next_height = current_head;
|
||||||
|
let mut dt = loop {
|
||||||
|
if let Some(f) = FileOperations::find_latest_hourly_file(&root) {
|
||||||
|
break TimeUtils::datetime_from_path(&f).unwrap();
|
||||||
|
}
|
||||||
|
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||||
|
};
|
||||||
|
let (mut hour, mut day_str, mut last_line) =
|
||||||
|
(dt.hour(), TimeUtils::date_from_datetime(dt), 0);
|
||||||
|
info!("Starting local ingest loop from height: {}", current_head);
|
||||||
|
loop {
|
||||||
|
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
||||||
|
if hour_file.exists() {
|
||||||
|
let scan_result = Scanner::scan_hour_file(
|
||||||
|
&hour_file,
|
||||||
|
&mut last_line,
|
||||||
|
ScanOptions { start_height: next_height, only_load_ranges: false },
|
||||||
|
);
|
||||||
|
next_height = scan_result.next_expected_height;
|
||||||
|
cache.lock().await.load_scan_result(scan_result);
|
||||||
|
}
|
||||||
|
let now = OffsetDateTime::now_utc();
|
||||||
|
if dt + ONE_HOUR < now {
|
||||||
|
dt += ONE_HOUR;
|
||||||
|
(hour, day_str, last_line) = (dt.hour(), TimeUtils::date_from_datetime(dt), 0);
|
||||||
|
info!(
|
||||||
|
"Moving to new file: {:?}",
|
||||||
|
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
|
||||||
|
let _ = Self::try_backfill_local_blocks(
|
||||||
|
&self.args.root,
|
||||||
|
&self.local_blocks_cache,
|
||||||
|
next_block_number,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
self.start_local_ingest_loop(next_block_number).await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new(
|
||||||
|
fallback: BlockSourceBoxed,
|
||||||
|
args: HlNodeBlockSourceArgs,
|
||||||
|
next_block_number: u64,
|
||||||
|
) -> Self {
|
||||||
|
let block_source = Self {
|
||||||
|
fallback,
|
||||||
|
args,
|
||||||
|
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
|
||||||
|
last_local_fetch: Arc::new(Mutex::new(None)),
|
||||||
|
};
|
||||||
|
block_source.run(next_block_number).await.unwrap();
|
||||||
|
block_source
|
||||||
|
}
|
||||||
|
}
|
||||||
91
src/pseudo_peer/sources/hl_node/scan.rs
Normal file
91
src/pseudo_peer/sources/hl_node/scan.rs
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{
|
||||||
|
fs::File,
|
||||||
|
io::{BufRead, BufReader},
|
||||||
|
ops::RangeInclusive,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct LocalBlockAndReceipts(pub String, pub BlockAndReceipts);
|
||||||
|
|
||||||
|
pub struct ScanResult {
|
||||||
|
pub path: PathBuf,
|
||||||
|
pub next_expected_height: u64,
|
||||||
|
pub new_blocks: Vec<BlockAndReceipts>,
|
||||||
|
pub new_block_ranges: Vec<RangeInclusive<u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ScanOptions {
|
||||||
|
pub start_height: u64,
|
||||||
|
pub only_load_ranges: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Scanner;
|
||||||
|
|
||||||
|
impl Scanner {
|
||||||
|
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
|
||||||
|
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
|
||||||
|
serde_json::from_str(line)?;
|
||||||
|
let height = match &parsed_block.block {
|
||||||
|
EvmBlock::Reth115(b) => b.header.header.number,
|
||||||
|
};
|
||||||
|
Ok((parsed_block, height))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
|
||||||
|
let lines: Vec<String> =
|
||||||
|
BufReader::new(File::open(path).expect("Failed to open hour file"))
|
||||||
|
.lines()
|
||||||
|
.collect::<Result<_, _>>()
|
||||||
|
.unwrap();
|
||||||
|
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
|
||||||
|
let mut new_blocks = Vec::new();
|
||||||
|
let mut last_height = options.start_height;
|
||||||
|
let mut block_ranges = Vec::new();
|
||||||
|
let mut current_range: Option<(u64, u64)> = None;
|
||||||
|
|
||||||
|
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
|
||||||
|
if line_idx < *last_line || line.trim().is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
match Self::line_to_evm_block(line) {
|
||||||
|
Ok((parsed_block, height)) => {
|
||||||
|
if height >= options.start_height {
|
||||||
|
last_height = last_height.max(height);
|
||||||
|
if !options.only_load_ranges {
|
||||||
|
new_blocks.push(parsed_block);
|
||||||
|
}
|
||||||
|
*last_line = line_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
match current_range {
|
||||||
|
Some((start, end)) if end + 1 == height => {
|
||||||
|
current_range = Some((start, height))
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
if let Some((start, end)) = current_range.take() {
|
||||||
|
block_ranges.push(start..=end);
|
||||||
|
}
|
||||||
|
current_range = Some((height, height));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some((start, end)) = current_range {
|
||||||
|
block_ranges.push(start..=end);
|
||||||
|
}
|
||||||
|
ScanResult {
|
||||||
|
path: path.to_path_buf(),
|
||||||
|
next_expected_height: last_height + 1,
|
||||||
|
new_blocks,
|
||||||
|
new_block_ranges: block_ranges,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
195
src/pseudo_peer/sources/hl_node/tests.rs
Normal file
195
src/pseudo_peer/sources/hl_node/tests.rs
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
use super::*;
|
||||||
|
use crate::{
|
||||||
|
node::types::{reth_compat, ReadPrecompileCalls},
|
||||||
|
pseudo_peer::sources::{hl_node::scan::LocalBlockAndReceipts, LocalBlockSource},
|
||||||
|
};
|
||||||
|
use alloy_consensus::{BlockBody, Header};
|
||||||
|
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
|
||||||
|
use std::{io::Write, time::Duration};
|
||||||
|
|
||||||
|
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_datetime_from_path() {
|
||||||
|
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
|
||||||
|
let dt = TimeUtils::datetime_from_path(path).unwrap();
|
||||||
|
println!("{dt:?}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_backfill() {
|
||||||
|
let test_path = Path::new("/root/evm_block_and_receipts");
|
||||||
|
if !test_path.exists() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let cache = Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE)));
|
||||||
|
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
|
||||||
|
|
||||||
|
let u_cache = cache.lock().await;
|
||||||
|
assert_eq!(
|
||||||
|
u_cache.get_path_for_height(9735058),
|
||||||
|
Some(test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_result_from_single_block(block: BlockAndReceipts) -> scan::ScanResult {
|
||||||
|
use crate::node::types::EvmBlock;
|
||||||
|
let height = match &block.block {
|
||||||
|
EvmBlock::Reth115(b) => b.header.header.number,
|
||||||
|
};
|
||||||
|
scan::ScanResult {
|
||||||
|
path: PathBuf::from("/nonexistent-block"),
|
||||||
|
next_expected_height: height + 1,
|
||||||
|
new_blocks: vec![block],
|
||||||
|
new_block_ranges: vec![height..=height],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn empty_block(number: u64, timestamp: u64, extra_data: &'static [u8]) -> LocalBlockAndReceipts {
|
||||||
|
use crate::node::types::EvmBlock;
|
||||||
|
LocalBlockAndReceipts(
|
||||||
|
timestamp.to_string(),
|
||||||
|
BlockAndReceipts {
|
||||||
|
block: EvmBlock::Reth115(reth_compat::SealedBlock {
|
||||||
|
header: reth_compat::SealedHeader {
|
||||||
|
header: Header {
|
||||||
|
parent_hash: B256::ZERO,
|
||||||
|
ommers_hash: B256::ZERO,
|
||||||
|
beneficiary: Address::ZERO,
|
||||||
|
state_root: B256::ZERO,
|
||||||
|
transactions_root: B256::ZERO,
|
||||||
|
receipts_root: B256::ZERO,
|
||||||
|
logs_bloom: Bloom::ZERO,
|
||||||
|
difficulty: U256::ZERO,
|
||||||
|
number,
|
||||||
|
gas_limit: 0,
|
||||||
|
gas_used: 0,
|
||||||
|
timestamp,
|
||||||
|
extra_data: Bytes::from_static(extra_data),
|
||||||
|
mix_hash: B256::ZERO,
|
||||||
|
nonce: B64::ZERO,
|
||||||
|
base_fee_per_gas: None,
|
||||||
|
withdrawals_root: None,
|
||||||
|
blob_gas_used: None,
|
||||||
|
excess_blob_gas: None,
|
||||||
|
parent_beacon_block_root: None,
|
||||||
|
requests_hash: None,
|
||||||
|
},
|
||||||
|
hash: B256::ZERO,
|
||||||
|
},
|
||||||
|
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||||
|
}),
|
||||||
|
receipts: vec![],
|
||||||
|
system_txs: vec![],
|
||||||
|
read_precompile_calls: ReadPrecompileCalls(vec![]),
|
||||||
|
highest_precompile_address: None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, std::fs::File)> {
|
||||||
|
let now = OffsetDateTime::now_utc();
|
||||||
|
let temp_dir = tempfile::tempdir()?;
|
||||||
|
let path = temp_dir
|
||||||
|
.path()
|
||||||
|
.join(HOURLY_SUBDIR)
|
||||||
|
.join(TimeUtils::date_from_datetime(now))
|
||||||
|
.join(format!("{}", now.hour()));
|
||||||
|
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||||
|
Ok((temp_dir, std::fs::File::create(path)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
struct BlockSourceHierarchy {
|
||||||
|
block_source: HlNodeBlockSource,
|
||||||
|
_temp_dir: tempfile::TempDir,
|
||||||
|
file1: std::fs::File,
|
||||||
|
current_block: LocalBlockAndReceipts,
|
||||||
|
future_block_hl_node: LocalBlockAndReceipts,
|
||||||
|
future_block_fallback: LocalBlockAndReceipts,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
|
||||||
|
// Setup fallback block source
|
||||||
|
let block_source_fallback = HlNodeBlockSource::new(
|
||||||
|
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
|
||||||
|
HlNodeBlockSourceArgs {
|
||||||
|
root: { PathBuf::from("/nonexistent") },
|
||||||
|
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
|
||||||
|
},
|
||||||
|
1000000,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
|
||||||
|
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
|
||||||
|
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
|
||||||
|
|
||||||
|
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
|
||||||
|
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
|
||||||
|
|
||||||
|
let block_source = HlNodeBlockSource::new(
|
||||||
|
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
|
||||||
|
HlNodeBlockSourceArgs {
|
||||||
|
root: temp_dir1.path().to_path_buf(),
|
||||||
|
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
|
||||||
|
},
|
||||||
|
1000000,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
block_source_fallback
|
||||||
|
.local_blocks_cache
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
|
||||||
|
|
||||||
|
Ok(BlockSourceHierarchy {
|
||||||
|
block_source,
|
||||||
|
_temp_dir: temp_dir1,
|
||||||
|
file1,
|
||||||
|
current_block: block_hl_node_0,
|
||||||
|
future_block_hl_node: block_hl_node_1,
|
||||||
|
future_block_fallback: block_fallback_1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
|
||||||
|
let hierarchy = setup_block_source_hierarchy().await?;
|
||||||
|
let BlockSourceHierarchy {
|
||||||
|
block_source, current_block, future_block_hl_node, mut file1, ..
|
||||||
|
} = hierarchy;
|
||||||
|
|
||||||
|
let block = block_source.collect_block(1000000).await.unwrap();
|
||||||
|
assert_eq!(block, current_block.1);
|
||||||
|
|
||||||
|
let block = block_source.collect_block(1000001).await;
|
||||||
|
assert!(block.is_err());
|
||||||
|
|
||||||
|
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
|
let block = block_source.collect_block(1000001).await.unwrap();
|
||||||
|
assert_eq!(block, future_block_hl_node.1);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
|
||||||
|
let hierarchy = setup_block_source_hierarchy().await?;
|
||||||
|
let BlockSourceHierarchy {
|
||||||
|
block_source, current_block, future_block_fallback, mut file1, ..
|
||||||
|
} = hierarchy;
|
||||||
|
|
||||||
|
let block = block_source.collect_block(1000000).await.unwrap();
|
||||||
|
assert_eq!(block, current_block.1);
|
||||||
|
|
||||||
|
tokio::time::sleep(DEFAULT_FALLBACK_THRESHOLD_FOR_TEST).await;
|
||||||
|
|
||||||
|
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
|
||||||
|
let block = block_source.collect_block(1000001).await.unwrap();
|
||||||
|
assert_eq!(block, future_block_fallback.1);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
19
src/pseudo_peer/sources/hl_node/time_utils.rs
Normal file
19
src/pseudo_peer/sources/hl_node/time_utils.rs
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use time::{macros::format_description, Date, OffsetDateTime, Time};
|
||||||
|
|
||||||
|
pub struct TimeUtils;
|
||||||
|
|
||||||
|
impl TimeUtils {
|
||||||
|
pub fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
|
||||||
|
let (dt_part, hour_part) =
|
||||||
|
(path.parent()?.file_name()?.to_str()?, path.file_name()?.to_str()?);
|
||||||
|
Some(OffsetDateTime::new_utc(
|
||||||
|
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
|
||||||
|
Time::from_hms(hour_part.parse().ok()?, 0, 0).ok()?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn date_from_datetime(dt: OffsetDateTime) -> String {
|
||||||
|
dt.format(&format_description!("[year][month][day]")).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
64
src/pseudo_peer/sources/local.rs
Normal file
64
src/pseudo_peer/sources/local.rs
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
use super::{utils, BlockSource};
|
||||||
|
use crate::node::types::BlockAndReceipts;
|
||||||
|
use eyre::Context;
|
||||||
|
use futures::{future::BoxFuture, FutureExt};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
/// Block source that reads blocks from local filesystem (--ingest-dir)
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct LocalBlockSource {
|
||||||
|
dir: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LocalBlockSource {
|
||||||
|
pub fn new(dir: impl Into<PathBuf>) -> Self {
|
||||||
|
Self { dir: dir.into() }
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
||||||
|
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
||||||
|
let files = files
|
||||||
|
.into_iter()
|
||||||
|
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
|
||||||
|
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
utils::name_with_largest_number(&files, is_dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSource for LocalBlockSource {
|
||||||
|
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||||
|
let dir = self.dir.clone();
|
||||||
|
async move {
|
||||||
|
let path = dir.join(utils::rmp_path(height));
|
||||||
|
let file = tokio::fs::read(&path)
|
||||||
|
.await
|
||||||
|
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
|
||||||
|
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
|
||||||
|
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||||
|
Ok(blocks[0].clone())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||||
|
let dir = self.dir.clone();
|
||||||
|
async move {
|
||||||
|
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
|
||||||
|
let (_, second_level) =
|
||||||
|
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
|
||||||
|
let (block_number, third_level) =
|
||||||
|
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
|
||||||
|
|
||||||
|
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||||
|
Some(block_number)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recommended_chunk_size(&self) -> u64 {
|
||||||
|
1000
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,269 +1,40 @@
|
|||||||
use crate::node::types::BlockAndReceipts;
|
use crate::node::types::BlockAndReceipts;
|
||||||
use aws_sdk_s3::types::RequestPayer;
|
use auto_impl::auto_impl;
|
||||||
use eyre::Context;
|
use futures::future::BoxFuture;
|
||||||
use futures::{future::BoxFuture, FutureExt};
|
use std::{sync::Arc, time::Duration};
|
||||||
use reth_network::cache::LruMap;
|
|
||||||
use std::{
|
|
||||||
path::PathBuf,
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
};
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
|
// Module declarations
|
||||||
|
mod cached;
|
||||||
mod hl_node;
|
mod hl_node;
|
||||||
pub use hl_node::HlNodeBlockSource;
|
mod local;
|
||||||
|
mod s3;
|
||||||
|
mod utils;
|
||||||
|
|
||||||
|
// Public exports
|
||||||
|
pub use cached::CachedBlockSource;
|
||||||
|
pub use hl_node::{HlNodeBlockSource, HlNodeBlockSourceArgs};
|
||||||
|
pub use local::LocalBlockSource;
|
||||||
|
pub use s3::S3BlockSource;
|
||||||
|
|
||||||
|
const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(25);
|
||||||
|
|
||||||
|
/// Trait for block sources that can retrieve blocks from various sources
|
||||||
|
#[auto_impl(&, &mut, Box, Arc)]
|
||||||
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
|
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>>;
|
/// Retrieves a block at the specified height
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>>;
|
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>>;
|
||||||
|
|
||||||
|
/// Finds the latest block number available from this source
|
||||||
|
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>>;
|
||||||
|
|
||||||
|
/// Returns the recommended chunk size for batch operations
|
||||||
fn recommended_chunk_size(&self) -> u64;
|
fn recommended_chunk_size(&self) -> u64;
|
||||||
|
|
||||||
|
/// Returns the polling interval
|
||||||
|
fn polling_interval(&self) -> Duration {
|
||||||
|
DEFAULT_POLLING_INTERVAL
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Type alias for a boxed block source
|
||||||
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
|
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
|
||||||
|
|
||||||
fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
|
||||||
let mut files = files
|
|
||||||
.iter()
|
|
||||||
.filter_map(|file_raw| {
|
|
||||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
|
|
||||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
|
||||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if files.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
files.sort_by_key(|(number, _)| *number);
|
|
||||||
files.last().cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct S3BlockSource {
|
|
||||||
client: aws_sdk_s3::Client,
|
|
||||||
bucket: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl S3BlockSource {
|
|
||||||
pub fn new(client: aws_sdk_s3::Client, bucket: String) -> Self {
|
|
||||||
Self { client, bucket }
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pick_path_with_highest_number(
|
|
||||||
client: aws_sdk_s3::Client,
|
|
||||||
bucket: String,
|
|
||||||
dir: String,
|
|
||||||
is_dir: bool,
|
|
||||||
) -> Option<(u64, String)> {
|
|
||||||
let request = client
|
|
||||||
.list_objects()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.prefix(dir)
|
|
||||||
.delimiter("/")
|
|
||||||
.request_payer(RequestPayer::Requester);
|
|
||||||
let response = request.send().await.ok()?;
|
|
||||||
let files: Vec<String> = if is_dir {
|
|
||||||
response
|
|
||||||
.common_prefixes
|
|
||||||
.unwrap()
|
|
||||||
.iter()
|
|
||||||
.map(|object| object.prefix.as_ref().unwrap().to_string())
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
response
|
|
||||||
.contents
|
|
||||||
.unwrap()
|
|
||||||
.iter()
|
|
||||||
.map(|object| object.key.as_ref().unwrap().to_string())
|
|
||||||
.collect()
|
|
||||||
};
|
|
||||||
name_with_largest_number(&files, is_dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSource for S3BlockSource {
|
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
|
||||||
let client = self.client.clone();
|
|
||||||
let bucket = self.bucket.clone();
|
|
||||||
async move {
|
|
||||||
let path = rmp_path(height);
|
|
||||||
let request = client
|
|
||||||
.get_object()
|
|
||||||
.request_payer(RequestPayer::Requester)
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key(path);
|
|
||||||
let response = request.send().await?;
|
|
||||||
let bytes = response.body.collect().await?.into_bytes();
|
|
||||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
|
|
||||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
|
||||||
Ok(blocks[0].clone())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
|
||||||
let client = self.client.clone();
|
|
||||||
let bucket = self.bucket.clone();
|
|
||||||
async move {
|
|
||||||
let (_, first_level) = Self::pick_path_with_highest_number(
|
|
||||||
client.clone(),
|
|
||||||
bucket.clone(),
|
|
||||||
"".to_string(),
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let (_, second_level) = Self::pick_path_with_highest_number(
|
|
||||||
client.clone(),
|
|
||||||
bucket.clone(),
|
|
||||||
first_level,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let (block_number, third_level) = Self::pick_path_with_highest_number(
|
|
||||||
client.clone(),
|
|
||||||
bucket.clone(),
|
|
||||||
second_level,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
|
||||||
Some(block_number)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recommended_chunk_size(&self) -> u64 {
|
|
||||||
1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSource for LocalBlockSource {
|
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
|
||||||
let dir = self.dir.clone();
|
|
||||||
async move {
|
|
||||||
let path = dir.join(rmp_path(height));
|
|
||||||
let file = tokio::fs::read(&path)
|
|
||||||
.await
|
|
||||||
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
|
|
||||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
|
|
||||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
|
||||||
Ok(blocks[0].clone())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
|
||||||
let dir = self.dir.clone();
|
|
||||||
async move {
|
|
||||||
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
|
|
||||||
let (_, second_level) =
|
|
||||||
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
|
|
||||||
let (block_number, third_level) =
|
|
||||||
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
|
|
||||||
|
|
||||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
|
||||||
Some(block_number)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recommended_chunk_size(&self) -> u64 {
|
|
||||||
1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct LocalBlockSource {
|
|
||||||
dir: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalBlockSource {
|
|
||||||
pub fn new(dir: impl Into<PathBuf>) -> Self {
|
|
||||||
Self { dir: dir.into() }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
|
||||||
let mut files = files
|
|
||||||
.iter()
|
|
||||||
.filter_map(|file_raw| {
|
|
||||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
|
||||||
let file = file.split("/").last().unwrap();
|
|
||||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
|
||||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if files.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
files.sort_by_key(|(number, _)| *number);
|
|
||||||
files.last().map(|(number, file)| (*number, file.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
|
||||||
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
|
||||||
let files = files
|
|
||||||
.into_iter()
|
|
||||||
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
|
|
||||||
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
Self::name_with_largest_number_static(&files, is_dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rmp_path(height: u64) -> String {
|
|
||||||
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
|
||||||
let s = ((height - 1) / 1_000) * 1_000;
|
|
||||||
let path = format!("{f}/{s}/{height}.rmp.lz4");
|
|
||||||
path
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSource for BlockSourceBoxed {
|
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
|
||||||
self.as_ref().collect_block(height)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
|
||||||
self.as_ref().find_latest_block_number()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recommended_chunk_size(&self) -> u64 {
|
|
||||||
self.as_ref().recommended_chunk_size()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CachedBlockSource {
|
|
||||||
block_source: BlockSourceBoxed,
|
|
||||||
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CachedBlockSource {
|
|
||||||
const CACHE_LIMIT: u32 = 100000;
|
|
||||||
pub fn new(block_source: BlockSourceBoxed) -> Self {
|
|
||||||
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSource for CachedBlockSource {
|
|
||||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
|
||||||
let block_source = self.block_source.clone();
|
|
||||||
let cache = self.cache.clone();
|
|
||||||
async move {
|
|
||||||
if let Some(block) = cache.write().unwrap().get(&height) {
|
|
||||||
return Ok(block.clone());
|
|
||||||
}
|
|
||||||
let block = block_source.collect_block(height).await?;
|
|
||||||
cache.write().unwrap().insert(height, block.clone());
|
|
||||||
Ok(block)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
|
||||||
self.block_source.find_latest_block_number()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recommended_chunk_size(&self) -> u64 {
|
|
||||||
self.block_source.recommended_chunk_size()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
95
src/pseudo_peer/sources/s3.rs
Normal file
95
src/pseudo_peer/sources/s3.rs
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
use super::{utils, BlockSource};
|
||||||
|
use crate::node::types::BlockAndReceipts;
|
||||||
|
use aws_sdk_s3::types::RequestPayer;
|
||||||
|
use futures::{future::BoxFuture, FutureExt};
|
||||||
|
use std::{sync::Arc, time::Duration};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
/// Block source that reads blocks from S3 (--s3)
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct S3BlockSource {
|
||||||
|
client: Arc<aws_sdk_s3::Client>,
|
||||||
|
bucket: String,
|
||||||
|
polling_interval: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl S3BlockSource {
|
||||||
|
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
|
||||||
|
Self { client: client.into(), bucket, polling_interval }
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pick_path_with_highest_number(
|
||||||
|
client: &aws_sdk_s3::Client,
|
||||||
|
bucket: &str,
|
||||||
|
dir: &str,
|
||||||
|
is_dir: bool,
|
||||||
|
) -> Option<(u64, String)> {
|
||||||
|
let request = client
|
||||||
|
.list_objects()
|
||||||
|
.bucket(bucket)
|
||||||
|
.prefix(dir)
|
||||||
|
.delimiter("/")
|
||||||
|
.request_payer(RequestPayer::Requester);
|
||||||
|
let response = request.send().await.ok()?;
|
||||||
|
let files: Vec<String> = if is_dir {
|
||||||
|
response
|
||||||
|
.common_prefixes?
|
||||||
|
.iter()
|
||||||
|
.map(|object| object.prefix.as_ref().unwrap().to_string())
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
response
|
||||||
|
.contents?
|
||||||
|
.iter()
|
||||||
|
.map(|object| object.key.as_ref().unwrap().to_string())
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
utils::name_with_largest_number(&files, is_dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSource for S3BlockSource {
|
||||||
|
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||||
|
let client = self.client.clone();
|
||||||
|
let bucket = self.bucket.clone();
|
||||||
|
async move {
|
||||||
|
let path = utils::rmp_path(height);
|
||||||
|
let request = client
|
||||||
|
.get_object()
|
||||||
|
.request_payer(RequestPayer::Requester)
|
||||||
|
.bucket(&bucket)
|
||||||
|
.key(path);
|
||||||
|
let response = request.send().await?;
|
||||||
|
let bytes = response.body.collect().await?.into_bytes();
|
||||||
|
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
|
||||||
|
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||||
|
Ok(blocks[0].clone())
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||||
|
let client = self.client.clone();
|
||||||
|
let bucket = self.bucket.clone();
|
||||||
|
async move {
|
||||||
|
let (_, first_level) =
|
||||||
|
Self::pick_path_with_highest_number(&client, &bucket, "", true).await?;
|
||||||
|
let (_, second_level) =
|
||||||
|
Self::pick_path_with_highest_number(&client, &bucket, &first_level, true).await?;
|
||||||
|
let (block_number, third_level) =
|
||||||
|
Self::pick_path_with_highest_number(&client, &bucket, &second_level, false).await?;
|
||||||
|
|
||||||
|
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||||
|
Some(block_number)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recommended_chunk_size(&self) -> u64 {
|
||||||
|
1000
|
||||||
|
}
|
||||||
|
|
||||||
|
fn polling_interval(&self) -> Duration {
|
||||||
|
self.polling_interval
|
||||||
|
}
|
||||||
|
}
|
||||||
26
src/pseudo_peer/sources/utils.rs
Normal file
26
src/pseudo_peer/sources/utils.rs
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
//! Shared utilities for block sources
|
||||||
|
|
||||||
|
/// Finds the file/directory with the largest number in its name from a list of files
|
||||||
|
pub fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
||||||
|
let mut files = files
|
||||||
|
.iter()
|
||||||
|
.filter_map(|file_raw| {
|
||||||
|
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
||||||
|
let file = file.split("/").last().unwrap();
|
||||||
|
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
||||||
|
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if files.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
files.sort_by_key(|(number, _)| *number);
|
||||||
|
files.last().cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates the RMP file path for a given block height
|
||||||
|
pub fn rmp_path(height: u64) -> String {
|
||||||
|
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
||||||
|
let s = ((height - 1) / 1_000) * 1_000;
|
||||||
|
format!("{f}/{s}/{height}.rmp.lz4")
|
||||||
|
}
|
||||||
@ -1,30 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use crate::pseudo_peer::{prelude::*, BlockSourceType};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_block_source_config_s3() {
|
|
||||||
let config = BlockSourceConfig::s3("test-bucket".to_string()).await;
|
|
||||||
assert!(
|
|
||||||
matches!(config.source_type, BlockSourceType::S3 { bucket } if bucket == "test-bucket")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_block_source_config_local() {
|
|
||||||
let config = BlockSourceConfig::local("/test/path".into());
|
|
||||||
assert!(
|
|
||||||
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_error_types() {
|
|
||||||
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
|
|
||||||
let benchmark_error: PseudoPeerError = io_error.into();
|
|
||||||
|
|
||||||
match benchmark_error {
|
|
||||||
PseudoPeerError::Io(_) => (),
|
|
||||||
_ => panic!("Expected Io error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user