48 Commits

Author SHA1 Message Date
7d223a464e Merge pull request #63 from hl-archive-node/feat/nb-release
feat: Add nb tag to docker releases
2025-09-11 19:36:43 -04:00
afcc551f67 feat: Add nb tag to docker releases 2025-09-11 19:35:50 -04:00
0dfd7a4c7f Merge pull request #62 from hl-archive-node/doc/testnet
doc: Update testnet instruction, add support channel
2025-09-11 19:33:50 -04:00
8faac526b7 doc: Add support channel 2025-09-11 19:32:55 -04:00
acfabf969c doc: Update testnet block number 2025-09-11 19:31:37 -04:00
fccf877a3a Merge pull request #61 from hl-archive-node/chore/v1.7.0
chore: Upgrade to reth v1.7.0
2025-09-11 19:26:47 -04:00
9e3f0c722e chore: Upgrade to reth v1.7.0 2025-09-11 19:25:48 -04:00
cd5bcc4cb0 chore: Add issue templates from reth 2025-09-11 19:00:09 -04:00
d831a459bb Merge pull request #60 from hl-archive-node/feat/block-metrics
feat: Add block source metrics
2025-09-11 18:56:18 -04:00
66c2ee654c feat: Add block source metrics 2025-09-11 18:50:22 -04:00
701e6a25e6 refactor: Remove duplications 2025-09-11 18:47:58 -04:00
ab11ce513f Merge pull request #57 from Quertyy/chore/reth-hl-version
chore(build): add reth-hl version output
2025-09-09 09:43:12 -04:00
37b852e810 chore(build): add reth-hl version output 2025-09-09 20:19:52 +07:00
51c43d6dbd Create a docker release github action (#54)
* create docker release action

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .
2025-09-08 10:26:20 -04:00
3f08b0a4e6 Merge pull request #55 from hl-archive-node/fix/txenv-on-trace
fix: Fill precompiles when tracing
2025-09-04 20:39:16 -04:00
d7992ab8ff remove: Remove unnecessary trait implementation 2025-09-04 20:38:41 -04:00
b37a30fb37 fix: Fill precompiles in tracing APIs 2025-09-04 20:37:10 -04:00
f6432498d8 refactor: Relax apply_precompiles and expose 2025-09-04 20:37:07 -04:00
772ff250ce Merge pull request #52 from hl-archive-node/fix/avoid-crash-on-eth-failure
fix: Do not crash when collect_block failed
2025-08-29 02:51:10 +09:00
5ee9053286 fix: Do not crash when collect_block failed
Just gracefully return it as error and log it
2025-08-28 13:47:44 -04:00
29e6972d58 Merge pull request #51 from hl-archive-node/feat/no-eth-proof
fix: Disable eth_getProof by default
2025-08-29 02:07:24 +09:00
e87b9232cc fix: Disable eth_getProof by default
No need to give malfunctioning feature by default. Issue #15 affects
StoragesTrie, AccountsTrie table which is used only for state root and
proof generation.
Also clearing the table does not affect any other parts of reth node.

Meanwhile, add --experimental-eth-get-proof flag to enable eth_getProof
forcefully.
2025-08-28 10:27:32 -04:00
b004263f82 Merge pull request #50 from Quertyy/feat/rpc-system-tx-receipts
chore(rpc): add eth_getEvmSystemTxsReceiptsByBlockHash and eth_getEvmSystemTxsReceiptsByBlockHash rpc method
2025-08-28 23:26:05 +09:00
74e27b5ee2 refactor(rpc): extract common logic for getting system txs 2025-08-28 16:10:41 +02:00
09fcf0751f chore(rpc): add eth_getSystemTxsReceiptsByBlockNumber and eth_getSystemTxsReceiptsByBlockNumber rpc method 2025-08-28 15:39:37 +02:00
8f2eca4754 Merge pull request #48 from Quertyy/feat/rpc-block-system-tx
chore(rpc): add eth_getEvmSystemTxsByBlockNumber and eth_getEvmSystemTxsByBlockHash rpc methods
2025-08-28 17:45:43 +09:00
707b4fb709 chore(rpc): return types compliance 2025-08-27 10:34:34 +02:00
62dd5a71b5 chore(rpc): change methods name 2025-08-26 22:03:40 +02:00
412c38a8cd chore(rpc): add eth_getSystemTxsByBlockNumber and eth_getSystemTxsByBlockNumber rpc method 2025-08-26 21:24:28 +02:00
796ea518bd Merge pull request #47 from hl-archive-node/fix/issue-46
fix: Sort hl-node files correctly
2025-08-27 02:49:16 +09:00
dd2c925af2 fix: Sort hl-node files correctly 2025-08-26 13:47:34 -04:00
3ffd7bb351 Merge pull request #45 from hl-archive-node/feat/add-cli-params-for-sources
feat: Add --local.fallback-threshold, --s3.polling-interval
2025-08-26 11:29:34 +09:00
52909eea3f feat: Add --local.fallback-threshold, --s3.polling-interval 2025-08-25 22:27:26 -04:00
0f9c2c5897 chore: Code style 2025-08-25 21:12:57 -04:00
ad4a8cd365 remove: Remove unnecssary tests 2025-08-25 21:12:34 -04:00
80506a7a43 fix(hl-node-compliance): Fix transaction index on block response 2025-08-25 10:00:43 -04:00
2af312b628 remove: Remove unused code 2025-08-25 10:00:43 -04:00
1908e9f414 Merge pull request #40 from sentioxyz/node-builder
fix: correct ingest local blocks
2025-08-24 18:13:41 +09:00
65cdc27b51 fix: line_to_evm_block don't hold equivalent semantic after refactor 2025-08-24 16:46:45 +08:00
4f430487d6 refactor: Move RPC addons to addons/ 2025-08-24 01:18:52 -04:00
19f35a6b54 chore: clippy, fmt 2025-08-24 01:15:36 -04:00
d61020e996 refactor: Split files for block sources
By claude code
2025-08-24 01:14:33 -04:00
657df240f4 fix: Avoid unnecessarily exposing pseudo peer 2025-08-23 22:17:03 -04:00
73a34a4bc1 chore: clippy 2025-08-23 22:17:03 -04:00
d8eef6305b remove: Reduce unnecessary LoC 2025-08-23 22:17:03 -04:00
bae68ef8db refactor: Reduce unnecessary LoC
By claude code
2025-08-23 04:21:23 -04:00
f576dddfa6 remove: Remove unused code 2025-08-23 03:10:05 -04:00
894ebcbfa5 Merge pull request #36 from hl-archive-node/fix/support-new-api
fix: Support new reth API
2025-08-23 01:51:36 +09:00
61 changed files with 2532 additions and 2149 deletions

127
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Bug Report
description: Create a bug report
labels: ["C-bug", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report! Please provide as much detail as possible.
If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead.
- type: textarea
id: what-happened
attributes:
label: Describe the bug
description: |
A clear and concise description of what the bug is.
If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well.
validations:
required: true
- type: textarea
id: reproduction-steps
attributes:
label: Steps to reproduce
description: Please provide any steps you think might be relevant to reproduce the bug.
placeholder: |
Steps to reproduce:
1. Start '...'
2. Then '...'
3. Check '...'
4. See error
validations:
required: true
- type: textarea
id: logs
attributes:
label: Node logs
description: |
If applicable, please provide the node logs leading up to the bug.
**Please also provide debug logs.** By default, these can be found in:
- `~/.cache/reth/logs` on Linux
- `~/Library/Caches/reth/logs` on macOS
- `%localAppData%/reth/logs` on Windows
render: text
validations:
required: false
- type: dropdown
id: platform
attributes:
label: Platform(s)
description: What platform(s) did this occur on?
multiple: true
options:
- Linux (x86)
- Linux (ARM)
- Mac (Intel)
- Mac (Apple Silicon)
- Windows (x86)
- Windows (ARM)
- type: dropdown
id: container_type
attributes:
label: Container Type
description: Were you running it in a container?
multiple: true
options:
- Not running in a container
- Docker
- Kubernetes
- LXC/LXD
- Other
validations:
required: true
- type: textarea
id: client-version
attributes:
label: What version/commit are you on?
description: This can be obtained with `reth --version`
validations:
required: true
- type: textarea
id: database-version
attributes:
label: What database version are you on?
description: This can be obtained with `reth db version`
validations:
required: true
- type: textarea
id: network
attributes:
label: Which chain / network are you on?
description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet.
validations:
required: true
- type: dropdown
id: node-type
attributes:
label: What type of node are you running?
options:
- Archive (default)
- Full via --full flag
- Pruned with custom reth.toml config
validations:
required: true
- type: textarea
id: prune-config
attributes:
label: What prune config do you use, if any?
description: The `[prune]` section in `reth.toml` file
validations:
required: false
- type: input
attributes:
label: If you've built Reth from source, provide the full command you used
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md#code-of-conduct)
options:
- label: I agree to follow the Code of Conduct
required: true

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: GitHub Discussions
url: https://github.com/paradigmxyz/reth/discussions
about: Please ask and answer questions here to keep the issue tracker clean.

19
.github/ISSUE_TEMPLATE/docs.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Documentation
description: Suggest a change to our documentation
labels: ["C-docs", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
If you are unsure if the docs are relevant or needed, please open up a discussion first.
- type: textarea
attributes:
label: Describe the change
description: |
Please describe the documentation you want to change or add, and if it is for end-users or contributors.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

21
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Feature request
description: Suggest a feature
labels: ["C-enhancement", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Please ensure that the feature has not already been requested in the issue tracker.
- type: textarea
attributes:
label: Describe the feature
description: |
Please describe the feature and what it is aiming to solve, if relevant.
If the feature is for a crate, please include a proposed API surface.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

38
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,38 @@
# Publishes the Docker image.
name: docker
on:
push:
tags:
- v*
- nb-????????
env:
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
CARGO_TERM_COLOR: always
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
DOCKER_USERNAME: ${{ github.actor }}
jobs:
build:
name: build and push as latest
runs-on: ubuntu-24.04
permissions:
packages: write
contents: read
steps:
- uses: actions/checkout@v5
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Log in to Docker
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
- name: Set up Docker builder
run: |
docker buildx create --use --name builder
- name: Build and push nanoreth image
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest

776
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@
name = "reth_hl"
version = "0.1.0"
edition = "2021"
build = "build.rs"
[lib]
name = "reth_hl"
@ -25,67 +26,68 @@ lto = "fat"
codegen-units = 1
[dependencies]
reth = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-evm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-revm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-provider = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
revm = { version = "28.0.1", default-features = false }
reth = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-cli = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-db = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-evm = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-revm = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-network = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-provider = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
reth-metrics = { git = "https://github.com/sprites0/reth", rev = "d26fd2e25b57d695aa453c93f15a8cd158a1f505" }
revm = { version = "29.0.0", default-features = false }
# alloy dependencies
alloy-genesis = { version = "1.0.23", default-features = false }
alloy-consensus = { version = "1.0.23", default-features = false }
alloy-genesis = { version = "1.0.30", default-features = false }
alloy-consensus = { version = "1.0.30", default-features = false }
alloy-chains = { version = "0.2.5", default-features = false }
alloy-eips = { version = "1.0.23", default-features = false }
alloy-evm = { version = "0.18.2", default-features = false }
alloy-eips = { version = "1.0.30", default-features = false }
alloy-evm = { version = "0.20.1", default-features = false }
alloy-json-abi = { version = "1.3.1", default-features = false }
alloy-json-rpc = { version = "1.0.23", default-features = false }
alloy-json-rpc = { version = "1.0.30", default-features = false }
alloy-dyn-abi = "1.3.1"
alloy-network = { version = "1.0.23", default-features = false }
alloy-network = { version = "1.0.30", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] }
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.23", default-features = false }
alloy-rpc-types-engine = { version = "1.0.23", default-features = false }
alloy-signer = { version = "1.0.23", default-features = false }
alloy-rpc-types = { version = "1.0.30", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.30", default-features = false }
alloy-rpc-types-engine = { version = "1.0.30", default-features = false }
alloy-signer = { version = "1.0.30", default-features = false }
alloy-sol-macro = "1.3.1"
alloy-sol-types = { version = "1.3.1", default-features = false }
jsonrpsee = "0.25.1"
jsonrpsee-core = "0.25.1"
jsonrpsee-types = "0.25.1"
jsonrpsee = "0.26.0"
jsonrpsee-core = "0.26.0"
jsonrpsee-types = "0.26.0"
# misc dependencies
auto_impl = "1"
@ -166,3 +168,7 @@ client = [
[dev-dependencies]
tempfile = "3.20.0"
[build-dependencies]
vergen = { version = "9.0.4", features = ["build", "cargo", "emit_and_set"] }
vergen-git2 = "1.0.5"

View File

@ -1,6 +1,8 @@
# Modifed from reth Makefile
.DEFAULT_GOAL := help
GIT_SHA ?= $(shell git rev-parse HEAD)
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
BIN_DIR = "dist/bin"
# List of features to use when building. Can be overridden via the environment.
@ -17,6 +19,9 @@ PROFILE ?= release
# Extra flags for Cargo
CARGO_INSTALL_EXTRA_FLAGS ?=
# The docker image name
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
##@ Help
.PHONY: help
@ -207,3 +212,50 @@ check-features:
--package reth-primitives-traits \
--package reth-primitives \
--feature-powerset
##@ Docker
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-git-sha
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-latest
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
$(call docker_build_push,$(GIT_TAG),latest)
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --name cross-builder`
.PHONY: docker-build-push-nightly
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
$(call docker_build_push,nightly,nightly)
# Create a Docker image using the main Dockerfile
define docker_build_push
docker buildx build --file ./Dockerfile . \
--platform linux/amd64 \
--tag $(DOCKER_IMAGE_NAME):$(1) \
--tag $(DOCKER_IMAGE_NAME):$(2) \
--build-arg BUILD_PROFILE="$(PROFILE)" \
--build-arg FEATURES="jemalloc,asm-keccak" \
--build-arg RUSTFLAGS="-C target-cpu=native" \
--provenance=false \
--push
endef

View File

@ -3,6 +3,8 @@
HyperEVM archive node implementation based on [reth](https://github.com/paradigmxyz/reth).
NodeBuilder API version is heavily inspired by [reth-bsc](https://github.com/loocapro/reth-bsc).
Got questions? Drop by the [Hyperliquid Discord](https://discord.gg/hyperliquid) #node-operators channel.
## ⚠️ IMPORTANT: System Transactions Appear as Pseudo Transactions
Deposit transactions from [System Addresses](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/hypercore-less-than-greater-than-hyperevm-transfers#system-addresses) like `0x222..22` / `0x200..xx` to user addresses are intentionally recorded as pseudo transactions.
@ -58,19 +60,19 @@ $ reth-hl node --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \
## How to run (testnet)
Testnet is supported since block 21304281.
Testnet is supported since block 30281484.
```sh
# Get testnet genesis at block 21304281
# Get testnet genesis at block 30281484
$ cd ~
$ git clone https://github.com/sprites0/hl-testnet-genesis
$ zstd --rm -d ~/hl-testnet-genesis/*.zst
# Init node
$ make install
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/21304281.rlp \
--header-hash 0x5b10856d2b1ad241c9bd6136bcc60ef7e8553560ca53995a590db65f809269b4 \
~/hl-testnet-genesis/21304281.jsonl --total-difficulty 0
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/30281484.rlp \
--header-hash 0x147cc3c09e9ddbb11799c826758db284f77099478ab5f528d3a57a6105516c21 \
~/hl-testnet-genesis/30281484.jsonl --total-difficulty 0
# Run node
$ reth-hl node --chain testnet --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \

91
build.rs Normal file
View File

@ -0,0 +1,91 @@
use std::{env, error::Error};
use vergen::{BuildBuilder, CargoBuilder, Emitter};
use vergen_git2::Git2Builder;
fn main() -> Result<(), Box<dyn Error>> {
let mut emitter = Emitter::default();
let build_builder = BuildBuilder::default().build_timestamp(true).build()?;
emitter.add_instructions(&build_builder)?;
let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?;
emitter.add_instructions(&cargo_builder)?;
let git_builder =
Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?;
emitter.add_instructions(&git_builder)?;
emitter.emit_and_set()?;
let sha = env::var("VERGEN_GIT_SHA")?;
let sha_short = &sha[0..7];
let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true";
// > git describe --always --tags
// if not on a tag: v0.2.0-beta.3-82-g1939939b
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_HL_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);
// Set the build profile
let out_dir = env::var("OUT_DIR").unwrap();
let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap();
println!("cargo:rustc-env=RETH_HL_BUILD_PROFILE={profile}");
// Set formatted version strings
let pkg_version = env!("CARGO_PKG_VERSION");
// The short version information for reth.
// - The latest version from Cargo.toml
// - The short SHA of the latest commit.
// Example: 0.1.0 (defa64b2)
println!("cargo:rustc-env=RETH_HL_SHORT_VERSION={pkg_version}{version_suffix} ({sha_short})");
// LONG_VERSION
// The long version information for reth.
//
// - The latest version from Cargo.toml + version suffix (if any)
// - The full SHA of the latest commit
// - The build datetime
// - The build features
// - The build profile
//
// Example:
//
// ```text
// Version: 0.1.0
// Commit SHA: defa64b2
// Build Timestamp: 2023-05-19T01:47:19.815651705Z
// Build Features: jemalloc
// Build Profile: maxperf
// ```
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_0=Version: {pkg_version}{version_suffix}");
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_1=Commit SHA: {sha}");
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_2=Build Timestamp: {}",
env::var("VERGEN_BUILD_TIMESTAMP")?
);
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_3=Build Features: {}",
env::var("VERGEN_CARGO_FEATURES")?
);
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_4=Build Profile: {profile}");
// The version information for reth formatted for P2P (devp2p).
// - The latest version from Cargo.toml
// - The target triple
//
// Example: reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin
println!(
"cargo:rustc-env=RETH_HL_P2P_CLIENT_VERSION={}",
format_args!("reth/v{pkg_version}-{sha_short}/{}", env::var("VERGEN_CARGO_TARGET_TRIPLE")?)
);
Ok(())
}

View File

@ -1,21 +1,28 @@
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
//! Overrides for RPC methods to post-filter system transactions and logs.
//!
//! System transactions are always at the beginning of the block,
//! so we can use the transaction index to determine if the log is from a system transaction,
//! and if it is, we can exclude it.
//!
//! For non-system transactions, we can just return the log as is, and the client will
//! adjust the transaction index accordingly.
use alloy_consensus::{transaction::TransactionMeta, BlockHeader, TxReceipt};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{B256, U256};
use alloy_rpc_types::{
pubsub::{Params, SubscriptionKind},
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
TransactionInfo,
};
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
use jsonrpsee_core::{async_trait, RpcResult};
use jsonrpsee_types::ErrorObject;
use reth::{
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
tasks::TaskSpawner,
};
use jsonrpsee_types::{error::INTERNAL_ERROR_CODE, ErrorObject};
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
use reth_rpc::{EthFilter, EthPubSub};
use reth_rpc::{eth::pubsub::SubscriptionSerializeError, EthFilter, EthPubSub, RpcTypes};
use reth_rpc_eth_api::{
helpers::{EthBlocks, EthTransactions, LoadReceipt},
transaction::ConvertReceiptInput,
@ -25,12 +32,9 @@ use reth_rpc_eth_api::{
use serde::Serialize;
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
use tokio_stream::{Stream, StreamExt};
use tracing::{info, trace, Instrument};
use tracing::{trace, Instrument};
use crate::{
node::primitives::{HlPrimitives, TransactionSigned},
HlBlock,
};
use crate::{node::primitives::HlPrimitives, HlBlock};
pub trait EthWrapper:
EthApiServer<
@ -39,8 +43,10 @@ pub trait EthWrapper:
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<Primitives = HlPrimitives>
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
@ -48,23 +54,235 @@ pub trait EthWrapper:
{
}
impl<
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<Primitives = HlPrimitives>
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static,
> EthWrapper for T
impl<T> EthWrapper for T where
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
#[method(name = "getEvmSystemTxsByBlockHash")]
async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsByBlockNumber")]
async fn get_evm_system_txs_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<R>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<R>>>;
}
pub struct HlSystemTransactionExt<Eth: EthWrapper> {
eth_api: Eth,
_marker: PhantomData<Eth>,
}
impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
pub fn new(eth_api: Eth) -> Self {
Self { eth_api, _marker: PhantomData }
}
async fn get_system_txs_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
let system_txs = block
.transactions_with_sender()
.enumerate()
.filter_map(|(index, (signer, tx))| {
if tx.is_system_transaction() {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
self.eth_api
.tx_resp_builder()
.fill(tx.clone().with_signer(*signer), tx_info)
.ok()
} else {
None
}
})
.collect();
Ok(Some(system_txs))
} else {
Ok(None)
}
}
async fn get_system_txs_receipts_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some((block, receipts)) =
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
{
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas;
let timestamp = block.timestamp;
let mut gas_used = 0;
let mut next_log_index = 0;
let mut inputs = Vec::new();
for (idx, (tx, receipt)) in
block.transactions_recovered().zip(receipts.iter()).enumerate()
{
if receipt.cumulative_gas_used() != 0 {
break;
}
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: Cow::Borrowed(receipt),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
inputs.push(input);
}
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
Ok(Some(receipts))
} else {
Ok(None)
}
}
}
#[async_trait]
impl<Eth: EthWrapper>
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
for HlSystemTransactionExt<Eth>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
/// Returns the system transactions for a given block hash.
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
Ok(txs) => Ok(txs),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the system transactions for a given block number, or the latest block if no block
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_number(
&self,
id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
Some(txs) => Ok(Some(txs)),
None => {
// hl-node returns an error if the block is not found
Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {id:?}"),
Some(()),
))
}
}
}
/// Returns the receipts for the system transactions for a given block hash.
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
Ok(receipts) => Ok(receipts),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the receipts for the system transactions for a given block number, or the latest
/// block if no block
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
Some(receipts) => Ok(Some(receipts)),
None => Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {block_id:?}"),
Some(()),
)),
}
}
}
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
filter: Arc<EthFilter<Eth>>,
provider: Arc<Eth::Provider>,
@ -80,19 +298,16 @@ impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterHttp<Eth>
{
/// Handler for `eth_newFilter`
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newFilter");
self.filter.new_filter(filter).await
}
/// Handler for `eth_newBlockFilter`
async fn new_block_filter(&self) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
self.filter.new_block_filter().await
}
/// Handler for `eth_newPendingTransactionFilter`
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
@ -101,7 +316,6 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
self.filter.new_pending_transaction_filter(kind).await
}
/// Handler for `eth_getFilterChanges`
async fn filter_changes(
&self,
id: FilterId,
@ -110,31 +324,20 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
}
/// Returns an array of all logs matching filter with given id.
///
/// Returns an error if no matching log filter exists.
///
/// Handler for `eth_getFilterLogs`
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
}
/// Handler for `eth_uninstallFilter`
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
self.filter.uninstall_filter(id).await
}
/// Returns logs matching given filter object.
///
/// Handler for `eth_getLogs`
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getLogs");
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
let provider = self.provider.clone();
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &provider)).collect())
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &self.provider)).collect())
}
}
@ -155,10 +358,10 @@ impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterWs<Eth>
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
where
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
/// Handler for `eth_subscribe`
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
@ -166,16 +369,12 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let pubsub = self.pubsub.clone();
let provider = self.provider.clone();
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::Logs {
// if no params are provided, used default filter params
let filter = match params {
Some(Params::Logs(filter)) => *filter,
Some(Params::Bool(_)) => {
return;
}
Some(Params::Logs(f)) => *f,
Some(Params::Bool(_)) => return,
_ => Default::default(),
};
let _ = pipe_from_stream(
@ -185,93 +384,42 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
.await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
};
}
}));
Ok(())
}
}
fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
let transaction_index = log.transaction_index?;
let log_index = log.log_index?;
let (tx_idx, log_idx) = (log.transaction_index?, log.log_index?);
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
// System transactions are always at the beginning of the block,
// so we can use the transaction index to determine if the log is from a system transaction,
// and if it is, we can exclude it.
//
// For non-system transactions, we can just return the log as is, and the client will
// adjust the transaction index accordingly.
let mut system_tx_count = 0u64;
let mut system_tx_logs_count = 0u64;
let (mut sys_tx_count, mut sys_log_count) = (0u64, 0u64);
for receipt in receipts {
let is_system_tx = receipt.cumulative_gas_used() == 0;
if is_system_tx {
system_tx_count += 1;
system_tx_logs_count += receipt.logs().len() as u64;
if receipt.cumulative_gas_used() == 0 {
sys_tx_count += 1;
sys_log_count += receipt.logs().len() as u64;
}
}
if system_tx_count > transaction_index {
if sys_tx_count > tx_idx {
return None;
}
log.transaction_index = Some(transaction_index - system_tx_count);
log.log_index = Some(log_index - system_tx_logs_count);
log.transaction_index = Some(tx_idx - sys_tx_count);
log.log_index = Some(log_idx - sys_log_count);
Some(log)
}
/// Helper to convert a serde error into an [`ErrorObject`]
#[derive(Debug, thiserror::Error)]
#[error("Failed to serialize subscription item: {0}")]
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
impl SubscriptionSerializeError {
const fn new(err: serde_json::Error) -> Self {
Self(err)
}
}
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
fn from(value: SubscriptionSerializeError) -> Self {
internal_rpc_err(value.to_string())
}
}
async fn pipe_from_stream<T, St>(
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>>
where
St: Stream<Item = T> + Unpin,
T: Serialize,
{
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => {
// connection dropped
break Ok(())
},
_ = sink.closed() => break Ok(()),
maybe_item = stream.next() => {
let item = match maybe_item {
Some(item) => item,
None => {
// stream ended
break Ok(())
},
};
let msg = SubscriptionMessage::new(
sink.method_name(),
sink.subscription_id(),
&item
).map_err(SubscriptionSerializeError::new)?;
if sink.send(msg).await.is_err() {
break Ok(());
}
let Some(item) = maybe_item else { break Ok(()) };
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(SubscriptionSerializeError::from)?;
if sink.send(msg).await.is_err() { break Ok(()); }
}
}
}
@ -321,10 +469,6 @@ macro_rules! engine_span {
};
}
fn is_system_tx(tx: &TransactionSigned) -> bool {
tx.is_system_transaction()
}
fn adjust_block<Eth: EthWrapper>(
recovered_block: &RpcBlock<Eth::NetworkTypes>,
eth_api: &Eth,
@ -335,6 +479,11 @@ fn adjust_block<Eth: EthWrapper>(
new_block.transactions = match new_block.transactions {
BlockTransactions::Full(mut transactions) => {
transactions.drain(..system_tx_count);
transactions.iter_mut().for_each(|tx| {
if let Some(idx) = &mut tx.transaction_index {
*idx -= system_tx_count as u64;
}
});
BlockTransactions::Full(transactions)
}
BlockTransactions::Hashes(mut hashes) => {
@ -410,8 +559,8 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>, Eth::Error> {
match eth_api.load_transaction_and_receipt(tx_hash).await? {
Some((_, meta, _)) => {
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again doesn't hurt performance much
info!("block hash: {:?}", meta.block_hash);
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
// doesn't hurt performance much
let Some((system_tx_count, block_receipts)) =
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
else {
@ -423,10 +572,12 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
}
}
// This function assumes that `block_id` is already validated by the caller.
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
let provider = eth_api.provider();
let block = provider.block_by_id(block_id).unwrap().unwrap();
let system_tx_count = block.body.transactions().iter().filter(|tx| is_system_tx(tx)).count();
let system_tx_count =
block.body.transactions().iter().filter(|tx| tx.is_system_transaction()).count();
system_tx_count
}
@ -464,8 +615,9 @@ where
let res =
self.eth_api.block_transaction_count_by_hash(hash).instrument(engine_span!()).await?;
Ok(res.map(|count| {
count
- U256::from(system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into())))
let sys_tx_count =
system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into()));
count - U256::from(sys_tx_count)
}))
}
@ -507,7 +659,7 @@ where
}
pub fn install_hl_node_compliance<Node, EthApi>(
ctx: RpcContext<Node, EthApi>,
ctx: &mut RpcContext<Node, EthApi>,
) -> Result<(), eyre::Error>
where
Node: FullNodeComponents,
@ -534,5 +686,9 @@ where
ctx.modules.replace_configured(
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
)?;
ctx.modules
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
Ok(())
}

3
src/addons/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod call_forwarder;
pub mod hl_node_compliance;
pub mod tx_forwarder;

View File

@ -37,7 +37,7 @@ impl EthForwarderExt {
Self { client }
}
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject {
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject<'static> {
match e {
ClientError::Call(e) => e,
_ => ErrorObject::owned(

View File

@ -7,7 +7,6 @@ use std::sync::LazyLock;
static GENESIS_HASH: B256 =
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
/// Dev hardforks
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),

View File

@ -1,8 +1,7 @@
//! Chain specification for HyperEVM.
pub mod hl;
pub mod parser;
use crate::hardforks::{hl::HlHardfork, HlHardforks};
use crate::hardforks::HlHardforks;
use alloy_consensus::Header;
use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis;
@ -13,15 +12,13 @@ use reth_chainspec::{
};
use reth_discv4::NodeRecord;
use reth_evm::eth::spec::EthExecutorSpec;
use std::{fmt::Display, sync::Arc};
use std::fmt::Display;
pub const MAINNET_CHAIN_ID: u64 = 999;
pub const TESTNET_CHAIN_ID: u64 = 998;
/// Hl chain spec type.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct HlChainSpec {
/// [`ChainSpec`].
pub inner: ChainSpec,
}
@ -40,10 +37,6 @@ impl EthChainSpec for HlChainSpec {
self.inner.chain()
}
fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_block(block_number)
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp)
}
@ -75,10 +68,6 @@ impl EthChainSpec for HlChainSpec {
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.inner.bootnodes()
}
fn is_optimism(&self) -> bool {
false
}
}
impl Hardforks for HlChainSpec {
@ -105,23 +94,13 @@ impl Hardforks for HlChainSpec {
}
}
impl From<ChainSpec> for HlChainSpec {
fn from(value: ChainSpec) -> Self {
Self { inner: value }
}
}
impl EthereumHardforks for HlChainSpec {
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
self.inner.ethereum_fork_activation(fork)
}
}
impl HlHardforks for HlChainSpec {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl HlHardforks for HlChainSpec {}
impl EthExecutorSpec for HlChainSpec {
fn deposit_contract_address(&self) -> Option<Address> {
@ -129,18 +108,6 @@ impl EthExecutorSpec for HlChainSpec {
}
}
impl From<HlChainSpec> for ChainSpec {
fn from(value: HlChainSpec) -> Self {
value.inner
}
}
impl HlHardforks for Arc<HlChainSpec> {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.as_ref().hl_fork_activation(fork)
}
}
impl HlChainSpec {
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";

View File

@ -1 +0,0 @@

View File

@ -1,4 +1,3 @@
pub mod api;
mod handler;
pub mod spec;
pub mod transaction;

View File

@ -1,20 +1,15 @@
use revm::primitives::hardfork::SpecId;
use std::str::FromStr;
#[repr(u8)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum HlSpecId {
/// Placeholder for evm cancun fork
#[default]
V1, // V1
V1,
}
impl HlSpecId {
pub const fn is_enabled_in(self, other: HlSpecId) -> bool {
other as u8 <= self as u8
}
/// Converts the [`HlSpecId`] into a [`SpecId`].
pub const fn into_eth_spec(self) -> SpecId {
match self {
Self::V1 => SpecId::CANCUN,
@ -23,31 +18,8 @@ impl HlSpecId {
}
impl From<HlSpecId> for SpecId {
/// Converts the [`HlSpecId`] into a [`SpecId`].
fn from(spec: HlSpecId) -> Self {
spec.into_eth_spec()
}
}
/// String identifiers for HL hardforks
pub mod name {
pub const V1: &str = "V1";
}
impl FromStr for HlSpecId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
name::V1 => Self::V1,
_ => return Err(format!("Unknown HL spec: {s}")),
})
}
}
impl From<HlSpecId> for &'static str {
fn from(spec_id: HlSpecId) -> Self {
match spec_id {
HlSpecId::V1 => name::V1,
}
}
}

View File

@ -124,12 +124,13 @@ impl FromRecoveredTx<TransactionSigned> for HlTxEnv<TxEnv> {
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
use reth_primitives::Transaction;
let base = match tx.clone().into_inner().into_typed_transaction() {
reth_primitives::Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
};
Self { base }

View File

@ -13,88 +13,5 @@ hardfork!(
HlHardfork {
/// Initial version
V1,
/// block.number bugfix
V2,
/// gas mismatch bugfix
V3,
}
);
impl HlHardfork {
/// Retrieves the activation block for the specified hardfork on the given chain.
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
if chain == Chain::from_named(NamedChain::Hyperliquid) {
return Self::hl_mainnet_activation_block(fork);
}
None
}
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
None
}
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
match_hardfork(
fork,
|fork| match fork {
EthereumHardfork::Frontier |
EthereumHardfork::Homestead |
EthereumHardfork::Tangerine |
EthereumHardfork::SpuriousDragon |
EthereumHardfork::Byzantium |
EthereumHardfork::Constantinople |
EthereumHardfork::Petersburg |
EthereumHardfork::Istanbul |
EthereumHardfork::MuirGlacier |
EthereumHardfork::Berlin |
EthereumHardfork::London |
EthereumHardfork::Shanghai |
EthereumHardfork::Cancun => Some(0),
_ => None,
},
|fork| match fork {
Self::V1 | Self::V2 | Self::V3 => Some(0),
_ => None,
},
)
}
/// Hl mainnet list of hardforks.
pub fn hl_mainnet() -> ChainHardforks {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
(Self::V1.boxed(), ForkCondition::Block(0)),
(Self::V2.boxed(), ForkCondition::Block(0)),
(Self::V3.boxed(), ForkCondition::Block(0)),
])
}
}
/// Match helper method since it's not possible to match on `dyn Hardfork`
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
where
H: Hardfork,
HF: Fn(&EthereumHardfork) -> Option<u64>,
HHF: Fn(&HlHardfork) -> Option<u64>,
{
let fork: &dyn Any = &fork;
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
return hardfork_fn(fork);
}
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
}

View File

@ -1,13 +1,14 @@
//! Hard forks of hl protocol.
//! Hard forks of HyperEVM.
#![allow(unused)]
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
pub mod hl;
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
use std::sync::Arc;
/// Extends [`EthereumHardforks`] with hl helper methods.
pub trait HlHardforks: EthereumHardforks {
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
/// [`ForkCondition::Never`].
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
}
///
/// Currently a placeholder for future use.
pub trait HlHardforks: EthereumHardforks {}
impl<T: HlHardforks> HlHardforks for Arc<T> {}

View File

@ -1,11 +1,10 @@
pub mod call_forwarder;
pub mod addons;
pub mod chainspec;
pub mod consensus;
mod evm;
mod hardforks;
pub mod hl_node_compliance;
pub mod node;
pub mod pseudo_peer;
pub mod tx_forwarder;
pub mod version;
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};

View File

@ -4,15 +4,17 @@ use clap::Parser;
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
use reth_db::DatabaseEnv;
use reth_hl::{
call_forwarder::{self, CallForwarderApiServer},
addons::{
call_forwarder::{self, CallForwarderApiServer},
hl_node_compliance::install_hl_node_compliance,
tx_forwarder::{self, EthForwarderApiServer},
},
chainspec::{parser::HlChainSpecParser, HlChainSpec},
hl_node_compliance::install_hl_node_compliance,
node::{
cli::{Cli, HlNodeArgs},
storage::tables::Tables,
HlNode,
},
tx_forwarder::{self, EthForwarderApiServer},
};
use tracing::info;
@ -29,6 +31,9 @@ fn main() -> eyre::Result<()> {
std::env::set_var("RUST_BACKTRACE", "1");
}
// Initialize custom version metadata before parsing CLI so --version uses reth-hl values
reth_hl::version::init_reth_hl_version();
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(
|builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>,
ext: HlNodeArgs| async move {
@ -37,7 +42,7 @@ fn main() -> eyre::Result<()> {
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?);
let NodeHandle { node, node_exit_future: exit_future } = builder
.node(node)
.extend_rpc_modules(move |ctx| {
.extend_rpc_modules(move |mut ctx| {
let upstream_rpc_url =
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
@ -58,10 +63,15 @@ fn main() -> eyre::Result<()> {
}
if ext.hl_node_compliant {
install_hl_node_compliance(ctx)?;
install_hl_node_compliance(&mut ctx)?;
info!("hl-node compliant mode enabled");
}
if !ext.experimental_eth_get_proof {
ctx.modules.remove_method_from_configured("eth_getProof");
info!("eth_getProof is disabled by default");
}
Ok(())
})
.apply(|builder| {

View File

@ -55,6 +55,24 @@ pub struct HlNodeArgs {
/// This is useful when read precompile is needed for gas estimation.
#[arg(long, env = "FORWARD_CALL")]
pub forward_call: bool,
/// Experimental: enables the eth_getProof RPC method.
///
/// Note: Due to the state root difference, trie updates* may not function correctly in all
/// scenarios. For example, incremental root updates are not possible, which can cause
/// eth_getProof to malfunction in some cases.
///
/// This limitation does not impact normal node functionality, except for state root (which is
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
/// potential issues.
///
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
/// working as intended. Enabling this by default will be tracked in #15.
///
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
pub experimental_eth_get_proof: bool,
}
/// The main reth_hl cli interface.

View File

@ -1,22 +1,9 @@
use std::sync::Arc;
use crate::{
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
HlBlock, HlPrimitives,
};
use crate::{HlBlock, HlPrimitives};
use alloy_eips::eip7685::Requests;
use alloy_primitives::U256;
use reth::{
api::FullNodeTypes,
builder::{components::PayloadServiceBuilder, BuilderContext},
payload::{PayloadBuilderHandle, PayloadServiceCommand},
transaction_pool::TransactionPool,
};
use reth_evm::ConfigureEvm;
use reth_payload_primitives::BuiltPayload;
use reth_primitives::SealedBlock;
use tokio::sync::{broadcast, mpsc};
use tracing::warn;
use std::sync::Arc;
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
/// included into [`HlBlock`].
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
self.requests.clone()
}
}
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct HlPayloadServiceBuilder;
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
where
Node: FullNodeTypes<Types = HlNode>,
Pool: TransactionPool,
Evm: ConfigureEvm,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
_pool: Pool,
_evm_config: Evm,
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
let (tx, mut rx) = mpsc::unbounded_channel();
ctx.task_executor().spawn_critical("payload builder", async move {
let mut subscriptions = Vec::new();
while let Some(message) = rx.recv().await {
match message {
PayloadServiceCommand::Subscribe(tx) => {
let (events_tx, events_rx) = broadcast::channel(100);
// Retain senders to make sure that channels are not getting closed
subscriptions.push(events_tx);
let _ = tx.send(events_rx);
}
message => warn!(?message, "Noop payload service received a message"),
}
}
});
Ok(PayloadBuilderHandle::new(tx))
}
}
// impl From<EthBuiltPayload> for HlBuiltPayload {
// fn from(value: EthBuiltPayload) -> Self {
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
// HlBuiltPayload {
// id,
// block: block.into(),
// fees,
// requests,
// }
// }
// }
// pub struct HlPayloadBuilder<Inner> {
// inner: Inner,
// }
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
// where
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
// {
// type Attributes = Inner::Attributes;
// type BuiltPayload = HlBuiltPayload;
// type Error = Inner::Error;
// fn try_build(
// &self,
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
// let outcome = self.inner.try_build(args)?;
// }
// }

View File

@ -71,10 +71,10 @@ where
let timestamp = evm_env.block_env.timestamp.saturating_to();
// Filter out system tx receipts
let transactions_for_root: Vec<TransactionSigned> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect::<Vec<_>>();
let receipts_for_root: Vec<Receipt> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
let transactions_for_root: Vec<_> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect();
let receipts_for_root: Vec<_> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect();
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
@ -295,7 +295,6 @@ where
// configure evm env based on parent block
let mut cfg_env =
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
if let Some(blob_params) = &blob_params {
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
}
@ -376,10 +375,6 @@ where
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> ExecutionCtxFor<'a, Self> {
let block_body = block.body();
let extras = HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
};
HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header().parent_hash,
@ -387,7 +382,10 @@ where
ommers: &block.body().ommers,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
},
extras,
extras: HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
},
}
}
@ -403,8 +401,7 @@ where
ommers: &[],
withdrawals: attributes.withdrawals.map(Cow::Owned),
},
// TODO: hacky, double check if this is correct
extras: HlExtras::default(),
extras: HlExtras::default(), // TODO: hacky, double check if this is correct
}
}
}
@ -416,10 +413,6 @@ impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
let block = &payload.0;
let extras = HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
};
HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header.parent_hash,
@ -427,7 +420,10 @@ impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
ommers: &block.body.ommers,
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
},
extras,
extras: HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
},
}
}

View File

@ -4,7 +4,7 @@ use crate::{
hardforks::HlHardforks,
node::{
primitives::TransactionSigned,
types::{ReadPrecompileInput, ReadPrecompileResult},
types::{HlExtras, ReadPrecompileInput, ReadPrecompileResult},
},
};
use alloy_consensus::{Transaction, TxReceipt};
@ -102,7 +102,7 @@ where
{
/// Creates a new HlBlockExecutor.
pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self {
apply_precompiles(&mut evm, &ctx);
apply_precompiles(&mut evm, &ctx.extras);
Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx }
}
@ -155,7 +155,7 @@ where
type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
apply_precompiles(&mut self.evm, &self.ctx);
apply_precompiles(&mut self.evm, &self.ctx.extras);
self.deploy_corewriter_contract()?;
Ok(())
@ -240,10 +240,9 @@ where
}
}
fn apply_precompiles<'a, DB, EVM>(evm: &mut EVM, ctx: &HlBlockExecutionCtx<'a>)
pub fn apply_precompiles<EVM>(evm: &mut EVM, extras: &HlExtras)
where
EVM: Evm<DB = &'a mut State<DB>, Precompiles = PrecompilesMap>,
DB: Database + 'a,
EVM: Evm<Precompiles = PrecompilesMap>,
{
let block_number = evm.block().number;
let precompiles_mut = evm.precompiles_mut();
@ -255,9 +254,7 @@ where
precompiles_mut.apply_precompile(&address, |_| None);
}
}
for (address, precompile) in
ctx.extras.read_precompile_calls.clone().unwrap_or_default().0.iter()
{
for (address, precompile) in extras.read_precompile_calls.clone().unwrap_or_default().0.iter() {
let precompile = precompile.clone();
precompiles_mut.apply_precompile(address, |_| {
let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> =
@ -271,7 +268,7 @@ where
// NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5
const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684;
if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) {
fill_all_precompiles(ctx, precompiles_mut);
fill_all_precompiles(extras, precompiles_mut);
}
}
@ -279,9 +276,9 @@ fn address_to_u64(address: Address) -> u64 {
address.into_u256().try_into().unwrap()
}
fn fill_all_precompiles<'a>(ctx: &HlBlockExecutionCtx<'a>, precompiles_mut: &mut PrecompilesMap) {
fn fill_all_precompiles(extras: &HlExtras, precompiles_mut: &mut PrecompilesMap) {
let lowest_address = 0x800;
let highest_address = ctx.extras.highest_precompile_address.map_or(0x80D, address_to_u64);
let highest_address = extras.highest_precompile_address.map_or(0x80D, address_to_u64);
for address in lowest_address..=highest_address {
let address = Address::from(U160::from(address));
precompiles_mut.apply_precompile(&address, |f| {

View File

@ -32,6 +32,8 @@ mod factory;
mod patch;
pub mod receipt_builder;
pub use executor::apply_precompiles;
/// HL EVM implementation.
///
/// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing)
@ -165,7 +167,6 @@ where
type EVM = HlEvmConfig;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
Ok(evm_config)
Ok(HlEvmConfig::hl(ctx.chain_spec()))
}
}

View File

@ -1,5 +1,6 @@
use crate::node::primitives::TransactionSigned;
use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx};
use reth_codecs::alloy::transaction::Envelope;
use reth_evm::Evm;
use reth_primitives::Receipt;

View File

@ -15,12 +15,15 @@ use crate::{
pseudo_peer::BlockSourceConfig,
};
use consensus::HlConsensusBuilder;
use engine::HlPayloadServiceBuilder;
use evm::HlExecutorBuilder;
use network::HlNetworkBuilder;
use reth::{
api::{FullNodeTypes, NodeTypes},
builder::{components::ComponentsBuilder, rpc::RpcAddOns, Node, NodeAdapter},
builder::{
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
rpc::RpcAddOns,
Node, NodeAdapter,
},
};
use reth_engine_primitives::ConsensusEngineHandle;
use std::{marker::PhantomData, sync::Arc};
@ -65,7 +68,7 @@ impl HlNode {
) -> ComponentsBuilder<
Node,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,
@ -77,7 +80,7 @@ impl HlNode {
.node_types::<Node>()
.pool(HlPoolBuilder)
.executor(HlExecutorBuilder::default())
.payload(HlPayloadServiceBuilder::default())
.payload(NoopPayloadServiceBuilder::default())
.network(HlNetworkBuilder {
engine_handle_rx: self.engine_handle_rx.clone(),
block_source_config: self.block_source_config.clone(),
@ -100,7 +103,7 @@ where
type ComponentsBuilder = ComponentsBuilder<
N,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,

View File

@ -89,7 +89,6 @@ where
/// Process a new payload and return the outcome
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
let engine = self.engine.clone();
Box::pin(async move {
let sealed_block = block.block.0.block.clone().seal();
let payload = HlPayloadTypes::block_to_payload(sealed_block);
@ -107,7 +106,7 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
@ -117,15 +116,10 @@ where
let engine = self.engine.clone();
let consensus = self.consensus.clone();
let sealed_block = block.block.0.block.clone().seal();
let hash = sealed_block.hash();
let number = sealed_block.number();
let (hash, number) = (sealed_block.hash(), sealed_block.number());
Box::pin(async move {
let (head_block_hash, current_hash) = match consensus.canonical_head(hash, number) {
Ok(hash) => hash,
Err(_) => return None,
};
let (head_block_hash, _) = consensus.canonical_head(hash, number).ok()?;
let state = ForkchoiceState {
head_block_hash,
safe_block_hash: head_block_hash,
@ -146,18 +140,15 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
/// Add a new block import task to the pending imports
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
let payload_fut = self.new_payload(block.clone(), peer_id);
self.pending_imports.push(payload_fut);
let fcu_fut = self.update_fork_choice(block, peer_id);
self.pending_imports.push(fcu_fut);
self.pending_imports.push(self.new_payload(block.clone(), peer_id));
self.pending_imports.push(self.update_fork_choice(block, peer_id));
}
}
@ -176,11 +167,9 @@ where
}
// Process completed imports and send events to network
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
if let Some(outcome) = outcome {
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
return Poll::Ready(Err(Box::new(e)));
}
while let Poll::Ready(Some(Some(outcome))) = this.pending_imports.poll_next_unpin(cx) {
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
return Poll::Ready(Err(Box::new(e)));
}
}
@ -188,22 +177,6 @@ where
}
}
pub(crate) fn collect_block(height: u64) -> Option<BlockAndReceipts> {
let ingest_dir = "/home/user/personal/evm-blocks";
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{ingest_dir}/{f}/{s}/{height}.rmp.lz4");
if std::path::Path::new(&path).exists() {
let file = std::fs::File::open(path).unwrap();
let file = std::io::BufReader::new(file);
let mut decoder = lz4_flex::frame::FrameDecoder::new(file);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder).unwrap();
Some(blocks[0].clone())
} else {
None
}
}
#[cfg(test)]
mod tests {
use crate::chainspec::hl::hl_mainnet;
@ -277,15 +250,12 @@ mod tests {
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
unimplemented!()
}
fn best_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn last_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
Ok(None)
}
@ -295,7 +265,6 @@ mod tests {
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
Ok(Some(B256::ZERO))
}
fn canonical_hashes_range(
&self,
_start: u64,
@ -315,14 +284,12 @@ mod tests {
fn both_valid() -> Self {
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
}
fn invalid_new_payload() -> Self {
Self {
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
fcu: PayloadStatusEnum::Valid,
}
}
fn invalid_fcu() -> Self {
Self {
new_payload: PayloadStatusEnum::Valid,
@ -342,19 +309,15 @@ mod tests {
let consensus = Arc::new(HlConsensus { provider: MockProvider });
let (to_engine, from_engine) = mpsc::unbounded_channel();
let engine_handle = ConsensusEngineHandle::new(to_engine);
handle_engine_msg(from_engine, responses).await;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
tokio::spawn(Box::pin(async move {
service.await.unwrap();
}));
Self { handle }
}

View File

@ -12,7 +12,6 @@ use crate::{
HlBlock,
};
use alloy_rlp::{Decodable, Encodable};
// use handshake::HlHandshake;
use reth::{
api::{FullNodeTypes, TxTy},
builder::{components::NetworkBuilder, BuilderContext},
@ -69,32 +68,22 @@ mod rlp {
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
fn from(value: &'a HlNewBlock) -> Self {
let HlNewBlock(NewBlock {
block:
HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
},
td,
}) = value;
let b = &value.0.block;
Self {
block: BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
header: Cow::Borrowed(&b.header),
transactions: Cow::Borrowed(&b.body.inner.transactions),
ommers: Cow::Borrowed(&b.body.inner.ommers),
withdrawals: b.body.inner.withdrawals.as_ref().map(Cow::Borrowed),
},
td: *td,
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
td: value.0.td,
sidecars: b.body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: b.body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: b
.body
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
}
@ -111,30 +100,24 @@ mod rlp {
impl Decodable for HlNewBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let HlNewBlockHelper {
block: BlockHelper { header, transactions, ommers, withdrawals },
td,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = HlNewBlockHelper::decode(buf)?;
let h = HlNewBlockHelper::decode(buf)?;
Ok(HlNewBlock(NewBlock {
block: HlBlock {
header: header.into_owned(),
header: h.block.header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
transactions: h.block.transactions.into_owned(),
ommers: h.block.ommers.into_owned(),
withdrawals: h.block.withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address
sidecars: h.sidecars.map(|s| s.into_owned()),
read_precompile_calls: h.read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: h
.highest_precompile_address
.map(|s| s.into_owned()),
},
},
td,
td: h.td,
}))
}
}
@ -172,41 +155,32 @@ impl HlNetworkBuilder {
where
Node: FullNodeTypes<Types = HlNode>,
{
let Self { engine_handle_rx, .. } = self;
let network_builder = ctx.network_config_builder()?;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
ctx.task_executor().spawn_critical("block import", async move {
let handle = engine_handle_rx
let handle = self
.engine_handle_rx
.lock()
.await
.take()
.expect("node should only be launched once")
.await
.unwrap();
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
});
let network_builder = network_builder
.disable_dns_discovery()
.disable_nat()
.boot_nodes(boot_nodes())
.set_head(ctx.head())
.with_pow()
.block_import(Box::new(HlBlockImport::new(handle)));
// .discovery(discv4)
// .eth_rlpx_handshake(Arc::new(HlHandshake::default()));
let network_config = ctx.build_network_config(network_builder);
Ok(network_config)
Ok(ctx.build_network_config(
ctx.network_config_builder()?
.disable_dns_discovery()
.disable_nat()
.boot_nodes(boot_nodes())
.set_head(ctx.head())
.with_pow()
.block_import(Box::new(HlBlockImport::new(handle))),
))
}
}
@ -229,11 +203,9 @@ where
pool: Pool,
) -> eyre::Result<Self::Network> {
let block_source_config = self.block_source_config.clone();
let network_config = self.network_config(ctx)?;
let network = NetworkManager::builder(network_config).await?;
let handle = ctx.start_network(network, pool);
let handle =
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
let local_node_record = handle.local_node_record();
let chain_spec = ctx.chain_spec();
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
let next_block_number = ctx
@ -243,12 +215,17 @@ where
.block_number +
1;
let chain_spec = ctx.chain_spec();
ctx.task_executor().spawn_critical("pseudo peer", async move {
let block_source =
block_source_config.create_cached_block_source((&*chain_spec).clone(), next_block_number).await;
start_pseudo_peer(chain_spec, local_node_record.to_string(), block_source)
.await
.unwrap();
start_pseudo_peer(
chain_spec.clone(),
local_node_record.to_string(),
block_source_config
.create_cached_block_source((*chain_spec).clone(), next_block_number)
.await,
)
.await
.unwrap();
});
Ok(handle)

View File

@ -68,19 +68,15 @@ impl BlockBodyTrait for HlBlockBody {
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
@ -116,15 +112,12 @@ impl Block for HlBlock {
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
@ -179,7 +172,6 @@ mod rlp {
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
@ -203,7 +195,6 @@ mod rlp {
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
@ -220,7 +211,6 @@ mod rlp {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
@ -253,7 +243,6 @@ mod rlp {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}

View File

@ -1,20 +1,22 @@
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
//! except that it supports pseudo signer for system transactions.
use crate::evm::transaction::HlTxEnv;
use alloy_consensus::{
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, EthereumTypedTransaction,
SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559,
TxEip2930, TxEip4844, TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction,
crypto::RecoveryError, error::ValueError, SignableTransaction, Signed,
Transaction as TransactionTrait, TransactionEnvelope, TxEip1559, TxEip2930, TxEip4844,
TxEip7702, TxLegacy, TxType, TypedTransaction,
};
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718};
use alloy_eips::Encodable2718;
use alloy_network::TxSigner;
use alloy_primitives::{address, Address, TxHash, U256};
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
use alloy_signer::Signature;
use reth_codecs::alloy::transaction::FromTxCompact;
use reth_codecs::alloy::transaction::{Envelope, FromTxCompact};
use reth_db::{
table::{Compress, Decompress},
DatabaseError,
};
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_evm::FromRecoveredTx;
use reth_primitives::Recovered;
use reth_primitives_traits::{
@ -26,8 +28,6 @@ use reth_rpc_eth_api::{
};
use revm::context::{BlockEnv, CfgEnv, TxEnv};
use crate::evm::transaction::HlTxEnv;
type InnerType = alloy_consensus::EthereumTxEnvelope<TxEip4844>;
#[derive(Debug, Clone, TransactionEnvelope)]
@ -114,11 +114,6 @@ impl reth_codecs::Compact for TransactionSigned {
}
}
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
let (tx, signer) = value.into_parts();
Recovered::new_unchecked(tx.into_inner(), signer)
}
impl FromRecoveredTx<TransactionSigned> for TxEnv {
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
TxEnv::from_recovered_tx(&tx.inner(), sender)
@ -162,16 +157,8 @@ impl TransactionSigned {
}
}
pub fn signature(&self) -> &Signature {
self.inner().signature()
}
pub const fn tx_type(&self) -> TxType {
self.inner().tx_type()
}
pub fn is_system_transaction(&self) -> bool {
self.gas_price().is_some() && self.gas_price().unwrap() == 0
matches!(self.gas_price(), Some(0))
}
}
@ -192,38 +179,16 @@ impl SerdeBincodeCompat for TransactionSigned {
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
fn from(value: TransactionSigned) -> Self {
value.into_inner()
}
}
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
impl TryFrom<TransactionSigned> for PooledTransactionVariant {
type Error = <InnerType as TryInto<PooledTransactionVariant>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl TryFrom<TransactionSigned>
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
{
type Error = <InnerType as TryInto<
EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>>
for TransactionSigned
{
fn from(
value: EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
) -> Self {
impl From<PooledTransactionVariant> for TransactionSigned {
fn from(value: PooledTransactionVariant) -> Self {
Self::Default(value.into())
}
}
@ -231,10 +196,6 @@ impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>
impl Compress for TransactionSigned {
type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
self.into_inner().compress()
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
self.inner().compress_to_buf(buf);
}
@ -300,26 +261,7 @@ impl SignableTxRequest<TransactionSigned> for TransactionRequest {
self,
signer: impl TxSigner<Signature> + Send,
) -> Result<TransactionSigned, SignTxRequestError> {
let mut tx =
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
let signed = match tx {
EthereumTypedTransaction::Legacy(tx) => {
EthereumTxEnvelope::Legacy(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip2930(tx) => {
EthereumTxEnvelope::Eip2930(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip1559(tx) => {
EthereumTxEnvelope::Eip1559(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip4844(tx) => {
EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature))
}
EthereumTypedTransaction::Eip7702(tx) => {
EthereumTxEnvelope::Eip7702(tx.into_signed(signature))
}
};
let signed = SignableTxRequest::<InnerType>::try_build_and_sign(self, signer).await?;
Ok(TransactionSigned::Default(signed))
}
}

View File

@ -1,4 +1,4 @@
use crate::node::rpc::HlEthApi;
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use reth::rpc::server_types::eth::{
builder::config::PendingBlockKind, error::FromEvmError, EthApiError, PendingBlock,
};
@ -6,12 +6,12 @@ use reth_rpc_eth_api::{
helpers::{
pending_block::PendingEnvBuilder, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt,
},
RpcConvert, RpcNodeCore,
RpcConvert,
};
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -19,7 +19,7 @@ where
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -27,7 +27,7 @@ where
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -49,7 +49,7 @@ where
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{

View File

@ -1,32 +1,56 @@
use super::HlEthApi;
use super::{HlEthApi, HlRpcNodeCore};
use crate::{node::evm::apply_precompiles, HlBlock};
use alloy_evm::Evm;
use alloy_primitives::B256;
use reth::rpc::server_types::eth::EthApiError;
use reth_evm::TxEnvFor;
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, SpecFor, TxEnvFor};
use reth_primitives::{NodePrimitives, Recovered};
use reth_primitives_traits::SignedTransaction;
use reth_provider::{ProviderError, ProviderTx};
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
FromEvmError, RpcConvert, RpcNodeCore,
};
use revm::DatabaseCommit;
impl<N> HlRpcNodeCore for N where N: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> Call for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
@ -37,4 +61,35 @@ where
fn max_simulate_blocks(&self) -> u64 {
self.inner.eth_api.max_simulate_blocks()
}
fn replay_transactions_until<'a, DB, I>(
&self,
db: &mut DB,
evm_env: EvmEnvFor<Self::Evm>,
transactions: I,
target_tx_hash: B256,
) -> Result<usize, Self::Error>
where
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.try_into().unwrap())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let mut index = 0;
for tx in transactions {
if *tx.tx_hash() == target_tx_hash {
// reached the target transaction
break;
}
let tx_env = self.evm_config().tx_env(tx);
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
index += 1;
}
Ok(index)
}
}

View File

@ -36,7 +36,7 @@ where
}
}
/// Validator for Optimism engine API.
/// Validator for HyperEVM engine API.
#[derive(Debug, Clone)]
pub struct HlPayloadValidator {
inner: HlExecutionPayloadValidator<HlChainSpec>,
@ -123,7 +123,7 @@ where
return Err(PayloadError::BlockHash {
execution: sealed_block.hash(),
consensus: expected_hash,
})?;
});
}
Ok(sealed_block)

View File

@ -1,3 +1,9 @@
use crate::{
chainspec::HlChainSpec,
node::{evm::apply_precompiles, types::HlExtras},
HlBlock, HlPrimitives,
};
use alloy_evm::Evm;
use alloy_network::Ethereum;
use alloy_primitives::U256;
use reth::{
@ -18,28 +24,30 @@ use reth::{
TaskSpawner,
},
};
use reth_evm::ConfigureEvm;
use reth_provider::{ChainSpecProvider, ProviderHeader, ProviderTx};
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, TxEnvFor};
use reth_primitives::NodePrimitives;
use reth_provider::{BlockReader, ChainSpecProvider, ProviderError, ProviderHeader, ProviderTx};
use reth_rpc::RpcTypes;
use reth_rpc_eth_api::{
helpers::{
pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees,
EthState, LoadFee, LoadState, SpawnBlocking, Trace,
EthState, LoadFee, LoadPendingBlock, LoadState, SpawnBlocking, Trace,
},
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
SignableTxRequest,
};
use revm::context::result::ResultAndState;
use std::{fmt, marker::PhantomData, sync::Arc};
use crate::chainspec::HlChainSpec;
mod block;
mod call;
pub mod engine_api;
mod transaction;
pub trait HlRpcNodeCore: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
/// Container type `HlEthApi`
pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
pub(crate) struct HlEthApiInner<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) eth_api: EthApiInner<N, Rpc>,
}
@ -48,14 +56,14 @@ type HlRpcConvert<N, NetworkT> =
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
#[derive(Clone)]
pub struct HlEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
pub struct HlEthApi<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
}
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -65,7 +73,7 @@ where
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
type Error = EthApiError;
@ -79,7 +87,7 @@ where
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Primitives = N::Primitives;
@ -111,7 +119,7 @@ where
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
@ -122,7 +130,7 @@ where
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
type Transaction = ProviderTx<Self::Provider>;
@ -141,7 +149,7 @@ where
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
@ -162,7 +170,7 @@ where
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -179,15 +187,17 @@ where
impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
}
impl<N, Rpc> EthState for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
#[inline]
fn max_proof_window(&self) -> u64 {
@ -197,7 +207,7 @@ where
impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -205,15 +215,50 @@ where
impl<N, Rpc> Trace for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn inspect<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError>,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.try_into().unwrap())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
}
}
impl<N, Rpc> HlEthApi<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn get_hl_extras(&self, block_number: u64) -> Result<HlExtras, ProviderError> {
Ok(self
.provider()
.block_by_number(block_number)?
.map(|block| HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
})
.unwrap_or_default())
}
}
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>,
@ -239,7 +284,7 @@ impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
where
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>>
+ RpcNodeCore<
Primitives = PrimitivesTy<N::Types>,
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,

View File

@ -1,21 +1,21 @@
use crate::node::rpc::HlEthApi;
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use alloy_primitives::{Bytes, B256};
use reth::rpc::server_types::eth::EthApiError;
use reth_rpc_eth_api::{
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
RpcConvert, RpcNodeCore,
RpcConvert,
};
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {

View File

@ -1,3 +1,7 @@
use std::time::Duration;
use crate::pseudo_peer::HlNodeBlockSourceArgs;
use super::config::BlockSourceConfig;
use clap::{Args, Parser};
use reth_node_core::args::LogArgs;
@ -13,7 +17,7 @@ pub struct BlockSourceArgs {
block_source: Option<String>,
#[arg(long, alias = "local-ingest-dir")]
block_source_from_node: Option<String>,
local_ingest_dir: Option<String>,
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
#[arg(long, default_value_t = false)]
@ -22,6 +26,19 @@ pub struct BlockSourceArgs {
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
#[arg(long)]
local: bool,
/// Interval for polling new blocks in S3 in milliseconds.
#[arg(id = "s3.polling-interval", long = "s3.polling-interval", default_value = "25")]
s3_polling_interval: u64,
/// Maximum allowed delay for the hl-node block source in milliseconds.
/// If this threshold is exceeded, the client falls back to other sources.
#[arg(
id = "local.fallback-threshold",
long = "local.fallback-threshold",
default_value = "5000"
)]
local_fallback_threshold: u64,
}
impl BlockSourceArgs {
@ -33,7 +50,10 @@ impl BlockSourceArgs {
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
if self.s3 {
return Ok(BlockSourceConfig::s3_default().await);
return Ok(BlockSourceConfig::s3_default(Duration::from_millis(
self.s3_polling_interval,
))
.await);
}
if self.local {
@ -47,18 +67,25 @@ impl BlockSourceArgs {
};
if let Some(bucket) = value.strip_prefix("s3://") {
Ok(BlockSourceConfig::s3(bucket.to_string()).await)
Ok(BlockSourceConfig::s3(
bucket.to_string(),
Duration::from_millis(self.s3_polling_interval),
)
.await)
} else {
Ok(BlockSourceConfig::local(value.into()))
}
}
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
let Some(block_source_from_node) = self.block_source_from_node.as_ref() else {
let Some(local_ingest_dir) = self.local_ingest_dir.as_ref() else {
return config;
};
config.with_block_source_from_node(block_source_from_node.to_string())
config.with_block_source_from_node(HlNodeBlockSourceArgs {
root: local_ingest_dir.into(),
fallback_threshold: Duration::from_millis(self.local_fallback_threshold),
})
}
}

View File

@ -1,31 +1,38 @@
use crate::chainspec::HlChainSpec;
use super::sources::{
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, LocalBlockSource, S3BlockSource,
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, HlNodeBlockSourceArgs,
LocalBlockSource, S3BlockSource,
};
use aws_config::BehaviorVersion;
use std::{env::home_dir, path::PathBuf, sync::Arc};
use std::{env::home_dir, path::PathBuf, sync::Arc, time::Duration};
#[derive(Debug, Clone)]
pub struct BlockSourceConfig {
pub source_type: BlockSourceType,
pub block_source_from_node: Option<String>,
pub block_source_from_node: Option<HlNodeBlockSourceArgs>,
}
#[derive(Debug, Clone)]
pub enum BlockSourceType {
S3Default,
S3 { bucket: String },
S3Default { polling_interval: Duration },
S3 { bucket: String, polling_interval: Duration },
Local { path: PathBuf },
}
impl BlockSourceConfig {
pub async fn s3_default() -> Self {
Self { source_type: BlockSourceType::S3Default, block_source_from_node: None }
pub async fn s3_default(polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3Default { polling_interval },
block_source_from_node: None,
}
}
pub async fn s3(bucket: String) -> Self {
Self { source_type: BlockSourceType::S3 { bucket }, block_source_from_node: None }
pub async fn s3(bucket: String, polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3 { bucket, polling_interval },
block_source_from_node: None,
}
}
pub fn local(path: PathBuf) -> Self {
@ -45,15 +52,22 @@ impl BlockSourceConfig {
}
}
pub fn with_block_source_from_node(mut self, block_source_from_node: String) -> Self {
pub fn with_block_source_from_node(
mut self,
block_source_from_node: HlNodeBlockSourceArgs,
) -> Self {
self.block_source_from_node = Some(block_source_from_node);
self
}
pub async fn create_block_source(&self, chain_spec: HlChainSpec) -> BlockSourceBoxed {
match &self.source_type {
BlockSourceType::S3Default => s3_block_source(chain_spec.official_s3_bucket()).await,
BlockSourceType::S3 { bucket } => s3_block_source(bucket).await,
BlockSourceType::S3Default { polling_interval } => {
s3_block_source(chain_spec.official_s3_bucket(), *polling_interval).await
}
BlockSourceType::S3 { bucket, polling_interval } => {
s3_block_source(bucket, *polling_interval).await
}
BlockSourceType::Local { path } => {
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
}
@ -72,7 +86,7 @@ impl BlockSourceConfig {
Arc::new(Box::new(
HlNodeBlockSource::new(
fallback_block_source,
PathBuf::from(block_source_from_node.clone()),
block_source_from_node.clone(),
next_block_number,
)
.await,
@ -91,9 +105,9 @@ impl BlockSourceConfig {
}
}
async fn s3_block_source(bucket: impl AsRef<str>) -> BlockSourceBoxed {
async fn s3_block_source(bucket: impl AsRef<str>, polling_interval: Duration) -> BlockSourceBoxed {
let client = aws_sdk_s3::Client::new(
&aws_config::defaults(BehaviorVersion::latest()).region("ap-northeast-1").load().await,
);
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string())))
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string(), polling_interval)))
}

View File

@ -1 +0,0 @@
pub const MAX_CONCURRENCY: usize = 100;

View File

@ -1,36 +0,0 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum PseudoPeerError {
#[error("Block source error: {0}")]
BlockSource(String),
#[error("Network error: {0}")]
Network(#[from] reth_network::error::NetworkError),
#[error("Configuration error: {0}")]
Config(String),
#[error("AWS S3 error: {0}")]
S3(#[from] aws_sdk_s3::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serialization error: {0}")]
Serialization(#[from] rmp_serde::encode::Error),
#[error("Deserialization error: {0}")]
Deserialization(#[from] rmp_serde::decode::Error),
#[error("Compression error: {0}")]
Compression(String),
}
impl From<eyre::Error> for PseudoPeerError {
fn from(err: eyre::Error) -> Self {
PseudoPeerError::Config(err.to_string())
}
}
pub type Result<T> = std::result::Result<T, PseudoPeerError>;

View File

@ -5,33 +5,25 @@
pub mod cli;
pub mod config;
pub mod consts;
pub mod error;
pub mod network;
pub mod service;
pub mod sources;
pub mod utils;
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::{error, info};
pub use cli::*;
pub use config::*;
pub use error::*;
pub use network::*;
pub use service::*;
pub use sources::*;
#[cfg(test)]
mod tests;
use tokio::sync::mpsc;
use tracing::info;
/// Re-export commonly used types
pub mod prelude {
pub use super::{
config::BlockSourceConfig,
error::{PseudoPeerError, Result},
service::{BlockPoller, PseudoPeer},
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
};
@ -86,8 +78,11 @@ pub async fn start_pseudo_peer(
_ = transaction_rx.recv() => {}
Some(eth_req) = eth_rx.recv() => {
service.process_eth_request(eth_req).await?;
info!("Processed eth request");
if let Err(e) = service.process_eth_request(eth_req).await {
error!("Error processing eth request: {e:?}");
} else {
info!("Processed eth request");
}
}
}
}

View File

@ -6,7 +6,11 @@ use reth_network::{
};
use reth_network_peers::TrustedPeer;
use reth_provider::test_utils::NoopProvider;
use std::{str::FromStr, sync::Arc};
use std::{
net::{Ipv4Addr, SocketAddr},
str::FromStr,
sync::Arc,
};
use tokio::sync::mpsc;
pub struct NetworkBuilder {
@ -32,27 +36,11 @@ impl Default for NetworkBuilder {
}
impl NetworkBuilder {
pub fn with_secret(mut self, secret: SecretKey) -> Self {
self.secret = secret;
self
}
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
self.peer_config = peer_config;
self
}
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
self.boot_nodes = boot_nodes;
self
}
pub fn with_ports(mut self, discovery_port: u16, listener_port: u16) -> Self {
self.discovery_port = discovery_port;
self.listener_port = listener_port;
self
}
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
self.chain_spec = chain_spec;
self
@ -66,8 +54,8 @@ impl NetworkBuilder {
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
.boot_nodes(self.boot_nodes)
.peer_config(self.peer_config)
.discovery_port(self.discovery_port)
.listener_port(self.listener_port);
.discovery_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.discovery_port))
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
let chain_id = self.chain_spec.inner.chain().id();
let (block_poller, start_tx) =

View File

@ -26,7 +26,6 @@ use std::{
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
time::Duration,
};
use tokio::{sync::mpsc, task::JoinHandle};
use tracing::{debug, info};
@ -49,8 +48,6 @@ pub struct BlockPoller {
}
impl BlockPoller {
const POLL_INTERVAL: Duration = Duration::from_millis(25);
pub fn new_suspended<BS: BlockSource>(
chain_id: u64,
block_source: BS,
@ -77,19 +74,20 @@ impl BlockPoller {
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
info!("Starting block poller");
let latest_block_number = block_source
let polling_interval = block_source.polling_interval();
let mut next_block_number = block_source
.find_latest_block_number()
.await
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
let mut next_block_number = latest_block_number;
loop {
let Ok(block) = block_source.collect_block(next_block_number).await else {
tokio::time::sleep(Self::POLL_INTERVAL).await;
continue;
};
block_tx_clone.send((next_block_number, block)).await?;
next_block_number += 1;
match block_source.collect_block(next_block_number).await {
Ok(block) => {
block_tx_clone.send((next_block_number, block)).await?;
next_block_number += 1;
}
Err(_) => tokio::time::sleep(polling_interval).await,
}
}
}
}
@ -111,8 +109,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
},
}))
}
Poll::Ready(None) => Poll::Pending,
Poll::Pending => Poll::Pending,
Poll::Ready(None) | Poll::Pending => Poll::Pending,
}
}
@ -155,14 +152,14 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn collect_blocks(
&self,
block_numbers: impl IntoIterator<Item = u64>,
) -> Vec<BlockAndReceipts> {
) -> eyre::Result<Vec<BlockAndReceipts>> {
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
let blocks = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await.unwrap())
let res = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await)
.buffered(self.block_source.recommended_chunk_size() as usize)
.collect::<Vec<_>>()
.await;
blocks
res.into_iter().collect()
}
pub async fn process_eth_request(
@ -179,7 +176,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
debug!(
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
);
let number = match start_block {
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
HashOrNumber::Number(number) => number,
@ -190,7 +186,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
HeadersDirection::Falling => {
self.collect_blocks((number + 1 - limit..number + 1).rev()).await
}
}
}?
.into_par_iter()
.map(|block| block.to_reth_block(chain_id).header.clone())
.collect::<Vec<_>>();
@ -208,19 +204,15 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let block_bodies = self
.collect_blocks(numbers)
.await
.await?
.into_iter()
.map(|block| block.to_reth_block(chain_id).body)
.collect::<Vec<_>>();
let _ = response.send(Ok(BlockBodies(block_bodies)));
}
IncomingEthRequest::GetNodeData { .. } => {
debug!("GetNodeData request: {eth_req:?}");
}
eth_req => {
debug!("New eth protocol request: {eth_req:?}");
}
IncomingEthRequest::GetNodeData { .. } => debug!("GetNodeData request: {eth_req:?}"),
eth_req => debug!("New eth protocol request: {eth_req:?}"),
}
Ok(())
}
@ -251,7 +243,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
// official RPC. The latter is much easier but has 300/day rate limit.
use jsonrpsee::http_client::HttpClientBuilder;
use jsonrpsee_core::client::ClientT;
@ -259,7 +250,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let client =
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
self.cache_blocks([(hash, target_block.header.number)]);
Ok(target_block.header.number)
@ -272,9 +262,10 @@ impl<BS: BlockSource> PseudoPeer<BS> {
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
}
return Some(block_number);
Some(block_number)
} else {
None
}
None
}
/// Backfill the cache with blocks to find the target hash
@ -319,10 +310,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
self.if_hit_then_warm_around.lock().unwrap().insert(start);
self.if_hit_then_warm_around.lock().unwrap().insert(end);
{
let mut guard = self.if_hit_then_warm_around.lock().unwrap();
guard.insert(start);
guard.insert(end);
}
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
}
@ -348,15 +340,12 @@ impl<BS: BlockSource> PseudoPeer<BS> {
}
debug!("Backfilling from {start_number} to {end_number}");
// Collect blocks and cache them
let blocks = self.collect_blocks(uncached_block_numbers).await;
let blocks = self.collect_blocks(uncached_block_numbers).await?;
let block_map: HashMap<B256, u64> =
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
let maybe_block_number = block_map.get(&target_hash).copied();
self.cache_blocks(block_map);
Ok(maybe_block_number)
}

View File

@ -0,0 +1,48 @@
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::{future::BoxFuture, FutureExt};
use reth_network::cache::LruMap;
use std::sync::{Arc, RwLock};
/// Block source wrapper that caches blocks in memory
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
fn polling_interval(&self) -> std::time::Duration {
self.block_source.polling_interval()
}
}

View File

@ -1,635 +0,0 @@
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::{BlockAndReceipts, EvmBlock};
use futures::future::BoxFuture;
use rangemap::RangeInclusiveMap;
use reth_network::cache::LruMap;
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{BufRead, BufReader, Read, Seek, SeekFrom},
ops::RangeInclusive,
path::{Path, PathBuf},
sync::Arc,
};
use time::{macros::format_description, Date, Duration, OffsetDateTime, Time};
use tokio::sync::Mutex;
use tracing::{info, warn};
const TAIL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(25);
const HOURLY_SUBDIR: &str = "hourly";
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
// Lightweight range map to track the ranges of blocks in the local ingest directory
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
// 3660 blocks per hour
const CACHE_SIZE: u32 = 8000;
fn new() -> Self {
Self { cache: LruMap::new(Self::CACHE_SIZE), ranges: RangeInclusiveMap::new() }
}
fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct LocalBlockAndReceipts(String, BlockAndReceipts);
struct ScanResult {
path: PathBuf,
next_expected_height: u64,
new_blocks: Vec<BlockAndReceipts>,
new_block_ranges: Vec<RangeInclusive<u64>>,
}
struct ScanOptions {
start_height: u64,
only_load_ranges: bool,
}
fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_block_timestamp, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
let file = File::open(path).expect("Failed to open hour file path");
let reader = BufReader::new(file);
let ScanOptions { start_height, only_load_ranges } = options;
let mut new_blocks = Vec::new();
let mut last_height = start_height;
let lines: Vec<String> = reader.lines().collect::<Result<_, _>>().unwrap();
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
if line_idx < *last_line || line.trim().is_empty() {
continue;
}
match line_to_evm_block(line) {
Ok((parsed_block, height)) => {
if height >= start_height {
last_height = last_height.max(height);
if !only_load_ranges {
new_blocks.push(parsed_block);
}
*last_line = line_idx;
}
match current_range {
Some((start, end)) if end + 1 == height => {
current_range = Some((start, height));
}
_ => {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
}
Err(_) => {
warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line));
continue;
}
}
}
if let Some((start, end)) = current_range {
block_ranges.push(start..=end);
}
ScanResult {
path: path.to_path_buf(),
next_expected_height: last_height + 1,
new_blocks,
new_block_ranges: block_ranges,
}
}
fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_ingest_dir: PathBuf,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, // height → block
// for rate limiting requests to fallback
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
Box::pin(async move {
let now = OffsetDateTime::now_utc();
if let Some(block) = self.try_collect_local_block(height).await {
self.update_last_fetch(height, now).await;
return Ok(block);
}
if let Some((last_height, last_poll_time)) = *self.last_local_fetch.lock().await {
let more_recent = last_height < height;
let too_soon = now - last_poll_time < Self::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK;
if more_recent && too_soon {
return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
));
}
}
let block = self.fallback.collect_block(height).await?;
self.update_last_fetch(height, now).await;
Ok(block)
})
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
Box::pin(async move {
let Some(dir) = Self::find_latest_hourly_file(&self.local_ingest_dir) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
self.local_ingest_dir
);
return self.fallback.find_latest_block_number().await;
};
let mut file = File::open(&dir).expect("Failed to open hour file path");
if let Some((_, height)) = read_last_complete_line(&mut file) {
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
} else {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
file
);
self.fallback.find_latest_block_number().await
}
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
const CHUNK_SIZE: u64 = 50000;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line = Vec::new();
while pos > 0 {
let read_size = std::cmp::min(pos, CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if let Ok((evm_block, height)) = line_to_evm_block(str::from_utf8(candidate).unwrap()) {
return Some((evm_block, height));
}
// Incomplete line; truncate and continue
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
}
impl HlNodeBlockSource {
/// [HlNodeBlockSource] picks the faster one between local ingest directory and s3/ingest-dir.
/// But if we immediately fallback to s3/ingest-dir, in case of S3, it may cause unnecessary
/// requests to S3 while it'll return 404.
///
/// To avoid unnecessary fallback, we set a short threshold period.
/// This threshold is several times longer than the expected block time, reducing redundant
/// fallback attempts.
pub(crate) const MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK: Duration = Duration::milliseconds(5000);
async fn update_last_fetch(&self, height: u64, now: OffsetDateTime) {
let mut last_fetch = self.last_local_fetch.lock().await;
if let Some((last_height, _)) = *last_fetch {
if last_height >= height {
return;
}
}
*last_fetch = Some((height, now));
}
async fn try_collect_local_block(&self, height: u64) -> Option<BlockAndReceipts> {
let mut u_cache = self.local_blocks_cache.lock().await;
if let Some(block) = u_cache.cache.remove(&height) {
return Some(block);
}
let path = u_cache.ranges.get(&height).cloned()?;
info!("Loading block data from {:?}", path);
u_cache.load_scan_result(scan_hour_file(
&path,
&mut 0,
ScanOptions { start_height: 0, only_load_ranges: false },
));
u_cache.cache.get(&height).cloned()
}
fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let dt_part = path.parent()?.file_name()?.to_str()?;
let hour_part = path.file_name()?.to_str()?;
let hour: u8 = hour_part.parse().ok()?;
Some(OffsetDateTime::new_utc(
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour, 0, 0).ok()?,
))
}
fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let dir = root.join(HOURLY_SUBDIR);
let mut files = Vec::new();
for entry in std::fs::read_dir(dir).ok()? {
let file = entry.ok()?.path();
let subfiles: Vec<_> = std::fs::read_dir(&file)
.ok()?
.filter_map(|f| f.ok().map(|f| f.path()))
.filter(|p| Self::datetime_from_path(p).is_some())
.collect();
files.extend(subfiles);
}
files.sort();
Some(files)
}
fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.last().cloned()
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in Self::all_hourly_files(root).unwrap_or_default() {
let mut file = File::open(&subfile).expect("Failed to open hour file path");
if let Some((_, height)) = read_last_complete_line(&mut file) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file, fallback to slow path: {:?}", subfile);
}
let mut scan_result = scan_hour_file(
&subfile,
&mut 0,
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
);
// Only store the block ranges for now; actual block data will be loaded lazily later to
// optimize memory usage
scan_result.new_blocks.clear();
u_cache.load_scan_result(scan_result);
}
if u_cache.ranges.is_empty() {
warn!("No ranges found in {:?}", root);
} else {
let (min, _) = u_cache.ranges.first_range_value().unwrap();
let (max, _) = u_cache.ranges.last_range_value().unwrap();
info!(
"Populated {} ranges (min: {}, max: {})",
u_cache.ranges.len(),
min.start(),
max.end()
);
}
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.local_ingest_dir.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
// Wait for the first hourly file to be created
let mut dt = loop {
if let Some(latest_file) = Self::find_latest_hourly_file(&root) {
break Self::datetime_from_path(&latest_file).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let mut hour = dt.hour();
let mut day_str = date_from_datetime(dt);
let mut last_line = 0;
info!("Starting local ingest loop from height: {:?}", current_head);
loop {
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
if hour_file.exists() {
let scan_result = scan_hour_file(
&hour_file,
&mut last_line,
ScanOptions { start_height: next_height, only_load_ranges: false },
);
next_height = scan_result.next_expected_height;
let mut u_cache = cache.lock().await;
u_cache.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + Duration::HOUR < now {
dt += Duration::HOUR;
hour = dt.hour();
day_str = date_from_datetime(dt);
last_line = 0;
info!(
"Moving to a new file. {:?}",
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.local_ingest_dir,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
local_ingest_dir: PathBuf,
next_block_number: u64,
) -> Self {
let block_source = HlNodeBlockSource {
fallback,
local_ingest_dir,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new())),
last_local_fetch: Arc::new(Mutex::new(None)),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
node::types::{reth_compat, ReadPrecompileCalls},
pseudo_peer::sources::LocalBlockSource,
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
use std::{io::Write, time::Duration};
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = HlNodeBlockSource::datetime_from_path(path).unwrap();
println!("{dt:?}");
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new()));
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
println!("{:?}", u_cache.ranges);
assert_eq!(
u_cache.ranges.get(&9735058),
Some(&test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
fn scan_result_from_single_block(block: BlockAndReceipts) -> ScanResult {
let height = match &block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
ScanResult {
path: PathBuf::from("/nonexistent-block"),
next_expected_height: height + 1,
new_blocks: vec![block],
new_block_ranges: vec![height..=height],
}
}
fn empty_block(
number: u64,
timestamp: u64,
extra_data: &'static [u8],
) -> LocalBlockAndReceipts {
let extra_data = Bytes::from_static(extra_data);
let res = BlockAndReceipts {
block: EvmBlock::Reth115(reth_compat::SealedBlock {
header: reth_compat::SealedHeader {
header: Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Bloom::ZERO,
difficulty: U256::ZERO,
number,
gas_limit: 0,
gas_used: 0,
timestamp,
extra_data,
mix_hash: B256::ZERO,
nonce: B64::ZERO,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
hash: B256::ZERO,
},
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
}),
receipts: vec![],
system_txs: vec![],
read_precompile_calls: ReadPrecompileCalls(vec![]),
highest_precompile_address: None,
};
LocalBlockAndReceipts(timestamp.to_string(), res)
}
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, File)> {
let now = OffsetDateTime::now_utc();
let day_str = date_from_datetime(now);
let hour = now.hour();
let temp_dir = tempfile::tempdir()?;
let path = temp_dir.path().join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
std::fs::create_dir_all(path.parent().unwrap())?;
Ok((temp_dir, File::create(path)?))
}
struct BlockSourceHierarchy {
block_source: HlNodeBlockSource,
_temp_dir: tempfile::TempDir,
file1: File,
current_block: LocalBlockAndReceipts,
future_block_hl_node: LocalBlockAndReceipts,
future_block_fallback: LocalBlockAndReceipts,
}
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
// Setup fallback block source
let block_source_fallback = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
PathBuf::from("/nonexistent"),
1000000,
)
.await;
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
let block_source = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
temp_dir1.path().to_path_buf(),
1000000,
)
.await;
block_source_fallback
.local_blocks_cache
.lock()
.await
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
Ok(BlockSourceHierarchy {
block_source,
_temp_dir: temp_dir1,
file1,
current_block: block_hl_node_0,
future_block_hl_node: block_hl_node_1,
future_block_fallback: block_fallback_1,
})
}
#[tokio::test]
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source,
current_block,
future_block_hl_node,
mut file1,
..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
let block = block_source.collect_block(1000001).await;
assert!(block.is_err());
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
tokio::time::sleep(Duration::from_millis(100)).await;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_hl_node.1);
Ok(())
}
#[tokio::test]
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source,
current_block,
future_block_fallback,
mut file1,
..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
tokio::time::sleep(HlNodeBlockSource::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK.unsigned_abs())
.await;
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_fallback.1);
Ok(())
}
}

View File

@ -0,0 +1,51 @@
use super::scan::ScanResult;
use crate::node::types::{BlockAndReceipts, EvmBlock};
use rangemap::RangeInclusiveMap;
use reth_network::cache::LruMap;
use std::path::{Path, PathBuf};
use tracing::{info, warn};
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
pub fn new(cache_size: u32) -> Self {
Self { cache: LruMap::new(cache_size), ranges: RangeInclusiveMap::new() }
}
pub fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
self.cache.remove(&height)
}
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {
self.ranges.get(&height).cloned()
}
pub fn log_range_summary(&self, root: &Path) {
if self.ranges.is_empty() {
warn!("No ranges found in {:?}", root);
} else {
let (min, max) =
(self.ranges.first_range_value().unwrap(), self.ranges.last_range_value().unwrap());
info!(
"Populated {} ranges (min: {}, max: {})",
self.ranges.len(),
min.0.start(),
max.0.end()
);
}
}
}

View File

@ -0,0 +1,67 @@
use super::{scan::Scanner, time_utils::TimeUtils, HOURLY_SUBDIR};
use crate::node::types::BlockAndReceipts;
use std::{
fs::File,
io::{Read, Seek, SeekFrom},
path::{Path, PathBuf},
};
pub struct FileOperations;
impl FileOperations {
pub fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let mut files = Vec::new();
for entry in std::fs::read_dir(root.join(HOURLY_SUBDIR)).ok()? {
let dir = entry.ok()?.path();
if let Ok(subentries) = std::fs::read_dir(&dir) {
files.extend(
subentries
.filter_map(|f| f.ok().map(|f| f.path()))
.filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
);
}
}
files.sort();
Some(files.into_iter().map(|(_, p)| p).collect())
}
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.into_iter().last()
}
pub fn read_last_block_from_file(path: &Path) -> Option<(BlockAndReceipts, u64)> {
let mut file = File::open(path).ok()?;
Self::read_last_complete_line(&mut file)
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
const CHUNK_SIZE: u64 = 50000;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line = Vec::new();
while pos > 0 {
let read_size = pos.min(CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if let Ok(result) = Scanner::line_to_evm_block(str::from_utf8(candidate).unwrap()) {
return Some(result);
}
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
Scanner::line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
}
}

View File

@ -0,0 +1,246 @@
mod cache;
mod file_ops;
mod scan;
#[cfg(test)]
mod tests;
mod time_utils;
use self::{
cache::LocalBlocksCache,
file_ops::FileOperations,
scan::{ScanOptions, Scanner},
time_utils::TimeUtils,
};
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::future::BoxFuture;
use reth_metrics::{metrics, metrics::Counter, Metrics};
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use time::OffsetDateTime;
use tokio::sync::Mutex;
use tracing::{info, warn};
const HOURLY_SUBDIR: &str = "hourly";
const CACHE_SIZE: u32 = 8000; // 3660 blocks per hour
const ONE_HOUR: Duration = Duration::from_secs(60 * 60);
const TAIL_INTERVAL: Duration = Duration::from_millis(25);
#[derive(Debug, Clone)]
pub struct HlNodeBlockSourceArgs {
pub root: PathBuf,
pub fallback_threshold: Duration,
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
pub args: HlNodeBlockSourceArgs,
pub metrics: HlNodeBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.hl_node")]
pub struct HlNodeBlockSourceMetrics {
/// How many times the HL node block source is polling for a block
pub fetched_from_hl_node: Counter,
/// How many times the HL node block source is fetched from the fallback
pub fetched_from_fallback: Counter,
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
let local_blocks_cache = self.local_blocks_cache.clone();
let last_local_fetch = self.last_local_fetch.clone();
let metrics = self.metrics.clone();
Box::pin(async move {
let now = OffsetDateTime::now_utc();
if let Some(block) = Self::try_collect_local_block(local_blocks_cache, height).await {
Self::update_last_fetch(last_local_fetch, height, now).await;
metrics.fetched_from_hl_node.increment(1);
return Ok(block);
}
if let Some((last_height, last_poll_time)) = *last_local_fetch.lock().await {
let more_recent = last_height < height;
let too_soon = now - last_poll_time < args.fallback_threshold;
if more_recent && too_soon {
return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
));
}
}
let block = fallback.collect_block(height).await?;
metrics.fetched_from_fallback.increment(1);
Self::update_last_fetch(last_local_fetch, height, now).await;
Ok(block)
})
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
Box::pin(async move {
let Some(dir) = FileOperations::find_latest_hourly_file(&args.root) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
args.root
);
return fallback.find_latest_block_number().await;
};
match FileOperations::read_last_block_from_file(&dir) {
Some((_, height)) => {
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
}
None => {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
dir
);
fallback.find_latest_block_number().await
}
}
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
impl HlNodeBlockSource {
async fn update_last_fetch(
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
height: u64,
now: OffsetDateTime,
) {
let mut last_fetch = last_local_fetch.lock().await;
if last_fetch.is_none_or(|(h, _)| h < height) {
*last_fetch = Some((height, now));
}
}
async fn try_collect_local_block(
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
height: u64,
) -> Option<BlockAndReceipts> {
let mut u_cache = local_blocks_cache.lock().await;
if let Some(block) = u_cache.get_block(height) {
return Some(block);
}
let path = u_cache.get_path_for_height(height)?;
info!("Loading block data from {:?}", path);
let scan_result = Scanner::scan_hour_file(
&path,
&mut 0,
ScanOptions { start_height: 0, only_load_ranges: false },
);
u_cache.load_scan_result(scan_result);
u_cache.get_block(height)
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in FileOperations::all_hourly_files(root).unwrap_or_default() {
if let Some((_, height)) = FileOperations::read_last_block_from_file(&subfile) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file: {:?}", subfile);
}
let mut scan_result = Scanner::scan_hour_file(
&subfile,
&mut 0,
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
);
scan_result.new_blocks.clear(); // Only store ranges, load data lazily
u_cache.load_scan_result(scan_result);
}
u_cache.log_range_summary(root);
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.args.root.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
let mut dt = loop {
if let Some(f) = FileOperations::find_latest_hourly_file(&root) {
break TimeUtils::datetime_from_path(&f).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let (mut hour, mut day_str, mut last_line) =
(dt.hour(), TimeUtils::date_from_datetime(dt), 0);
info!("Starting local ingest loop from height: {}", current_head);
loop {
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
if hour_file.exists() {
let scan_result = Scanner::scan_hour_file(
&hour_file,
&mut last_line,
ScanOptions { start_height: next_height, only_load_ranges: false },
);
next_height = scan_result.next_expected_height;
cache.lock().await.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + ONE_HOUR < now {
dt += ONE_HOUR;
(hour, day_str, last_line) = (dt.hour(), TimeUtils::date_from_datetime(dt), 0);
info!(
"Moving to new file: {:?}",
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.args.root,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
args: HlNodeBlockSourceArgs,
next_block_number: u64,
) -> Self {
let block_source = Self {
fallback,
args,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
last_local_fetch: Arc::new(Mutex::new(None)),
metrics: HlNodeBlockSourceMetrics::default(),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}

View File

@ -0,0 +1,91 @@
use crate::node::types::{BlockAndReceipts, EvmBlock};
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{BufRead, BufReader},
ops::RangeInclusive,
path::{Path, PathBuf},
};
use tracing::warn;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LocalBlockAndReceipts(pub String, pub BlockAndReceipts);
pub struct ScanResult {
pub path: PathBuf,
pub next_expected_height: u64,
pub new_blocks: Vec<BlockAndReceipts>,
pub new_block_ranges: Vec<RangeInclusive<u64>>,
}
pub struct ScanOptions {
pub start_height: u64,
pub only_load_ranges: bool,
}
pub struct Scanner;
impl Scanner {
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
pub fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
let lines: Vec<String> =
BufReader::new(File::open(path).expect("Failed to open hour file"))
.lines()
.collect::<Result<_, _>>()
.unwrap();
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
let mut new_blocks = Vec::new();
let mut last_height = options.start_height;
let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
if line_idx < *last_line || line.trim().is_empty() {
continue;
}
match Self::line_to_evm_block(line) {
Ok((parsed_block, height)) => {
if height >= options.start_height {
last_height = last_height.max(height);
if !options.only_load_ranges {
new_blocks.push(parsed_block);
}
*last_line = line_idx;
}
match current_range {
Some((start, end)) if end + 1 == height => {
current_range = Some((start, height))
}
_ => {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
}
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line)),
}
}
if let Some((start, end)) = current_range {
block_ranges.push(start..=end);
}
ScanResult {
path: path.to_path_buf(),
next_expected_height: last_height + 1,
new_blocks,
new_block_ranges: block_ranges,
}
}
}

View File

@ -0,0 +1,214 @@
use super::*;
use crate::{
node::types::{reth_compat, ReadPrecompileCalls},
pseudo_peer::sources::{hl_node::scan::LocalBlockAndReceipts, LocalBlockSource},
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
use std::{io::Write, time::Duration};
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = TimeUtils::datetime_from_path(path).unwrap();
println!("{dt:?}");
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE)));
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
assert_eq!(
u_cache.get_path_for_height(9735058),
Some(test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
fn scan_result_from_single_block(block: BlockAndReceipts) -> scan::ScanResult {
use crate::node::types::EvmBlock;
let height = match &block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
scan::ScanResult {
path: PathBuf::from("/nonexistent-block"),
next_expected_height: height + 1,
new_blocks: vec![block],
new_block_ranges: vec![height..=height],
}
}
fn empty_block(number: u64, timestamp: u64, extra_data: &'static [u8]) -> LocalBlockAndReceipts {
use crate::node::types::EvmBlock;
LocalBlockAndReceipts(
timestamp.to_string(),
BlockAndReceipts {
block: EvmBlock::Reth115(reth_compat::SealedBlock {
header: reth_compat::SealedHeader {
header: Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Bloom::ZERO,
difficulty: U256::ZERO,
number,
gas_limit: 0,
gas_used: 0,
timestamp,
extra_data: Bytes::from_static(extra_data),
mix_hash: B256::ZERO,
nonce: B64::ZERO,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
hash: B256::ZERO,
},
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
}),
receipts: vec![],
system_txs: vec![],
read_precompile_calls: ReadPrecompileCalls(vec![]),
highest_precompile_address: None,
},
)
}
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, std::fs::File)> {
let now = OffsetDateTime::now_utc();
let temp_dir = tempfile::tempdir()?;
let path = temp_dir
.path()
.join(HOURLY_SUBDIR)
.join(TimeUtils::date_from_datetime(now))
.join(format!("{}", now.hour()));
std::fs::create_dir_all(path.parent().unwrap())?;
Ok((temp_dir, std::fs::File::create(path)?))
}
struct BlockSourceHierarchy {
block_source: HlNodeBlockSource,
_temp_dir: tempfile::TempDir,
file1: std::fs::File,
current_block: LocalBlockAndReceipts,
future_block_hl_node: LocalBlockAndReceipts,
future_block_fallback: LocalBlockAndReceipts,
}
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
// Setup fallback block source
let block_source_fallback = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
HlNodeBlockSourceArgs {
root: { PathBuf::from("/nonexistent") },
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
let block_source = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
HlNodeBlockSourceArgs {
root: temp_dir1.path().to_path_buf(),
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
block_source_fallback
.local_blocks_cache
.lock()
.await
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
Ok(BlockSourceHierarchy {
block_source,
_temp_dir: temp_dir1,
file1,
current_block: block_hl_node_0,
future_block_hl_node: block_hl_node_1,
future_block_fallback: block_fallback_1,
})
}
#[tokio::test]
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_hl_node, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
let block = block_source.collect_block(1000001).await;
assert!(block.is_err());
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
tokio::time::sleep(Duration::from_millis(100)).await;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_hl_node.1);
Ok(())
}
#[tokio::test]
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_fallback, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
tokio::time::sleep(DEFAULT_FALLBACK_THRESHOLD_FOR_TEST).await;
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_fallback.1);
Ok(())
}
#[test]
fn test_hourly_files_sort() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir()?;
// create 20250826/9, 20250826/14
let targets = [("20250826", "9"), ("20250826", "14")];
for (date, hour) in targets {
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
let parent = hourly_file.parent().unwrap();
std::fs::create_dir_all(parent)?;
std::fs::File::create(hourly_file)?;
}
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
let file_names: Vec<_> =
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
assert_eq!(file_names, ["9", "14"]);
Ok(())
}

View File

@ -0,0 +1,19 @@
use std::path::Path;
use time::{macros::format_description, Date, OffsetDateTime, Time};
pub struct TimeUtils;
impl TimeUtils {
pub fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let (dt_part, hour_part) =
(path.parent()?.file_name()?.to_str()?, path.file_name()?.to_str()?);
Some(OffsetDateTime::new_utc(
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour_part.parse().ok()?, 0, 0).ok()?,
))
}
pub fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
}

View File

@ -0,0 +1,79 @@
use super::{utils, BlockSource};
use crate::node::types::BlockAndReceipts;
use eyre::Context;
use futures::{future::BoxFuture, FutureExt};
use reth_metrics::{metrics, metrics::Counter, Metrics};
use std::path::PathBuf;
use tracing::info;
/// Block source that reads blocks from local filesystem (--ingest-dir)
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
metrics: LocalBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.local")]
pub struct LocalBlockSourceMetrics {
/// How many times the local block source is polling for a block
pub polling_attempt: Counter,
/// How many times the local block source is fetched from the local filesystem
pub fetched: Counter,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into(), metrics: LocalBlockSourceMetrics::default() }
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
let metrics = self.metrics.clone();
async move {
let path = dir.join(utils::rmp_path(height));
metrics.polling_attempt.increment(1);
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
metrics.fetched.increment(1);
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}

View File

@ -1,269 +1,40 @@
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use eyre::Context;
use futures::{future::BoxFuture, FutureExt};
use reth_network::cache::LruMap;
use std::{
path::PathBuf,
sync::{Arc, RwLock},
};
use tracing::info;
use auto_impl::auto_impl;
use futures::future::BoxFuture;
use std::{sync::Arc, time::Duration};
// Module declarations
mod cached;
mod hl_node;
pub use hl_node::HlNodeBlockSource;
mod local;
mod s3;
mod utils;
// Public exports
pub use cached::CachedBlockSource;
pub use hl_node::{HlNodeBlockSource, HlNodeBlockSourceArgs};
pub use local::LocalBlockSource;
pub use s3::S3BlockSource;
const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(25);
/// Trait for block sources that can retrieve blocks from various sources
#[auto_impl(&, &mut, Box, Arc)]
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>>;
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>>;
/// Retrieves a block at the specified height
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>>;
/// Finds the latest block number available from this source
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>>;
/// Returns the recommended chunk size for batch operations
fn recommended_chunk_size(&self) -> u64;
/// Returns the polling interval
fn polling_interval(&self) -> Duration {
DEFAULT_POLLING_INTERVAL
}
}
/// Type alias for a boxed block source
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: aws_sdk_s3::Client,
bucket: String,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String) -> Self {
Self { client, bucket }
}
async fn pick_path_with_highest_number(
client: aws_sdk_s3::Client,
bucket: String,
dir: String,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(&bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes
.unwrap()
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents
.unwrap()
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let path = rmp_path(height);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
"".to_string(),
true,
)
.await?;
let (_, second_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
first_level,
true,
)
.await?;
let (block_number, third_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
second_level,
false,
)
.await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
async move {
let path = dir.join(rmp_path(height));
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into() }
}
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().map(|(number, file)| (*number, file.to_string()))
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
Self::name_with_largest_number_static(&files, is_dir)
}
}
fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{f}/{s}/{height}.rmp.lz4");
path
}
impl BlockSource for BlockSourceBoxed {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
self.as_ref().collect_block(height)
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.as_ref().find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.as_ref().recommended_chunk_size()
}
}
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
}

View File

@ -0,0 +1,115 @@
use super::{utils, BlockSource};
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use futures::{future::BoxFuture, FutureExt};
use reth_metrics::{metrics, metrics::Counter, Metrics};
use std::{sync::Arc, time::Duration};
use tracing::info;
/// Block source that reads blocks from S3 (--s3)
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: Arc<aws_sdk_s3::Client>,
bucket: String,
polling_interval: Duration,
metrics: S3BlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.s3")]
pub struct S3BlockSourceMetrics {
/// How many times the S3 block source is polling for a block
pub polling_attempt: Counter,
/// How many times the S3 block source has polled a block
pub fetched: Counter,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
Self {
client: client.into(),
bucket,
polling_interval,
metrics: S3BlockSourceMetrics::default(),
}
}
async fn pick_path_with_highest_number(
client: &aws_sdk_s3::Client,
bucket: &str,
dir: &str,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes?
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents?
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
let metrics = self.metrics.clone();
async move {
let path = utils::rmp_path(height);
metrics.polling_attempt.increment(1);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
metrics.fetched.increment(1);
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) =
Self::pick_path_with_highest_number(&client, &bucket, "", true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(&client, &bucket, &first_level, true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(&client, &bucket, &second_level, false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
fn polling_interval(&self) -> Duration {
self.polling_interval
}
}

View File

@ -0,0 +1,26 @@
//! Shared utilities for block sources
/// Finds the file/directory with the largest number in its name from a list of files
pub fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
/// Generates the RMP file path for a given block height
pub fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
format!("{f}/{s}/{height}.rmp.lz4")
}

View File

@ -1,30 +0,0 @@
use std::path::Path;
use crate::pseudo_peer::{prelude::*, BlockSourceType};
#[tokio::test]
async fn test_block_source_config_s3() {
let config = BlockSourceConfig::s3("test-bucket".to_string()).await;
assert!(
matches!(config.source_type, BlockSourceType::S3 { bucket } if bucket == "test-bucket")
);
}
#[tokio::test]
async fn test_block_source_config_local() {
let config = BlockSourceConfig::local("/test/path".into());
assert!(
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
);
}
#[test]
fn test_error_types() {
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
let benchmark_error: PseudoPeerError = io_error.into();
match benchmark_error {
PseudoPeerError::Io(_) => (),
_ => panic!("Expected Io error"),
}
}

35
src/version.rs Normal file
View File

@ -0,0 +1,35 @@
use std::borrow::Cow;
use reth_node_core::version::{try_init_version_metadata, RethCliVersionConsts};
pub fn init_reth_hl_version() {
let cargo_pkg_version = env!("CARGO_PKG_VERSION").to_string();
let short = env!("RETH_HL_SHORT_VERSION").to_string();
let long = format!(
"{}\n{}\n{}\n{}\n{}",
env!("RETH_HL_LONG_VERSION_0"),
env!("RETH_HL_LONG_VERSION_1"),
env!("RETH_HL_LONG_VERSION_2"),
env!("RETH_HL_LONG_VERSION_3"),
env!("RETH_HL_LONG_VERSION_4"),
);
let p2p = env!("RETH_HL_P2P_CLIENT_VERSION").to_string();
let meta = RethCliVersionConsts {
name_client: Cow::Borrowed("reth_hl"),
cargo_pkg_version: Cow::Owned(cargo_pkg_version.clone()),
vergen_git_sha_long: Cow::Owned(env!("VERGEN_GIT_SHA").to_string()),
vergen_git_sha: Cow::Owned(env!("VERGEN_GIT_SHA_SHORT").to_string()),
vergen_build_timestamp: Cow::Owned(env!("VERGEN_BUILD_TIMESTAMP").to_string()),
vergen_cargo_target_triple: Cow::Owned(env!("VERGEN_CARGO_TARGET_TRIPLE").to_string()),
vergen_cargo_features: Cow::Owned(env!("VERGEN_CARGO_FEATURES").to_string()),
short_version: Cow::Owned(short),
long_version: Cow::Owned(long),
build_profile_name: Cow::Owned(env!("RETH_HL_BUILD_PROFILE").to_string()),
p2p_client_version: Cow::Owned(p2p),
extra_data: Cow::Owned(format!("reth_hl/v{}/{}", cargo_pkg_version, std::env::consts::OS)),
};
let _ = try_init_version_metadata(meta);
}