108 Commits

Author SHA1 Message Date
893822e5b0 Merge pull request #98 from hl-archive-node/fix/testnet-system-tx 2025-10-26 03:39:06 -04:00
c2528ce223 fix: Support certain types of system tx 2025-10-26 06:42:14 +00:00
d46e808b8d Merge pull request #94 from hl-archive-node/fix/migrator-typo
fix(migrate): Fix wrong chunk ranges
2025-10-16 10:42:59 -04:00
497353fd2f fix(migrate): Fix wrong chunk ranges 2025-10-16 14:35:04 +00:00
eee6eeb2fc Merge pull request #93 from hl-archive-node/fix/subscriptions
fix: Prevent #89 from overriding --hl-node-compliant subscriptions
2025-10-13 01:27:19 -04:00
611e6867bf fix: Do not override --hl-node-compliant for subscription 2025-10-13 02:57:25 +00:00
6c3ed63c3c fix: Override NewHeads only 2025-10-13 02:57:05 +00:00
51924e9671 Merge pull request #91 from hl-archive-node/fix/debug-cutoff
fix: Fix --debug-cutoff-height semantics
2025-10-11 22:29:45 -04:00
8f15aa311f fix: Fix --debug-cutoff-height semantics
NOTE: This is a debug feature not on by default.

The original intention of it was limiting the highest block number. But it was instead enforcing the starting block number for fetching, leading to block progression.
2025-10-12 02:22:55 +00:00
bc66716a41 Merge pull request #89 from hl-archive-node/cleanup
fix: Convert header type for eth_subscribe
2025-10-10 23:09:33 -04:00
fc819dbba2 test: Add regression tests 2025-10-11 02:52:09 +00:00
1c5a22a814 fix: Convert header type for eth_subscribe
Due to custom header usage, only `eth_subscribe` method was returning the new header format in raw format, while other part were using RpcConvert to convert headers.

Make `eth_subscribe` newHeads to return the `inner` field (original eth header) instead.
2025-10-11 02:49:19 +00:00
852e186b1a Merge pull request #88 from hl-archive-node/hotfix
hotfix: Mark migrator experimantal
2025-10-09 04:55:53 -04:00
f83326059f chore: clippy 2025-10-09 08:55:40 +00:00
ca8c374116 feat: Mark migrator as experimental 2025-10-09 08:49:29 +00:00
5ba12a4850 perf: adjust chunk size, do not hold tx too long 2025-10-09 08:20:22 +00:00
8a179a6d9e perf: Use smaller chunks 2025-10-09 08:13:53 +00:00
d570cf3e8d fix: Create directory before migration 2025-10-09 08:13:45 +00:00
0e49e65068 Merge pull request #86 from hl-archive-node/breaking/hl-header
feat(breaking): Use custom header format (HlHeader)
2025-10-09 02:51:09 -04:00
13b63ff136 feat: add migrator for mdbx as well 2025-10-09 06:35:56 +00:00
233026871f perf: chunkify block ranges 2025-10-08 13:54:16 +00:00
7e169d409d chore: Change branch to v1.8.2-fork-hl-header 2025-10-08 13:04:11 +00:00
47aaad6ed9 feat: add migrator 2025-10-08 13:03:51 +00:00
9f73b1ede0 refactor: Move BlockBody from transaction to body 2025-10-06 06:43:17 +00:00
bcdf4d933d feat(breaking): Use HlHeader for HlPrimitives 2025-10-06 06:21:08 +00:00
2390ed864a feat(breaking): Use HlHeader for storing header 2025-10-06 06:21:08 +00:00
567d6ce2e4 feat: Introduce HlHeader 2025-10-06 06:21:08 +00:00
8b2c3a4a34 refactor: Move primitives into files 2025-10-06 06:21:08 +00:00
92759f04db Merge pull request #84 from hl-archive-node/fix/no-panic
fix: Fix panic when block receipts are called on non-existing blocks
2025-10-05 19:47:22 -04:00
71bb70bca6 fix: Fix panic when block receipts are called on non-existing blocks 2025-10-05 14:54:55 +00:00
5327ebc97a Merge pull request #82 from hl-archive-node/fix/local-reader
fix(local-ingest-dir): Use more robust resumption for hl-node line reader, fix block number increment for reading files
2025-10-05 07:36:32 -04:00
4d83b687d4 feat: Add metrics for file read triggered
Usually, "Loading block data from ..." shouldn't be shown in logs at all. Add metrics to detect the file read.
2025-10-05 11:28:11 +00:00
12f366573e fix: Do not increase block counter when no block is read
This made ingest loop to infinitely increase the block number
2025-10-05 11:28:11 +00:00
b8bae7cde9 fix: Utillize LruMap better
LruMap was introduced to allow getting the same block twice, so removing the item when getting the block doesn't make sense.
2025-10-05 11:28:11 +00:00
0fd4b7943f refactor: Use offsets instead of lines, wrap related structs in one 2025-10-05 11:28:04 +00:00
bfd61094ee chore: cargo fmt 2025-10-05 09:58:13 +00:00
3b33b0a526 Merge pull request #81 from hl-archive-node/fix/typo-local
fix: Fix typo in --local (default hl-node dir)
2025-10-05 05:54:35 -04:00
de7b524f0b fix: Fix typo in --local (default hl-node dir) 2025-10-05 04:39:09 -04:00
24f2460337 Merge pull request #80 from hl-archive-node/chore/v1.8.2
chore: Upgrade to reth v1.8.2
2025-10-05 04:38:54 -04:00
b55ddc54ad chore: clippy 2025-10-05 04:04:30 -04:00
aa73fab281 chore: Now cargo fmt sorts imports and trait methods 2025-10-05 03:56:23 -04:00
ae0cb0da6d chore: Move sprites0/reth to hl-archive-node/reth 2025-10-05 03:56:23 -04:00
8605be9864 chore: Upgrade to reth v1.8.2 2025-10-05 03:56:23 -04:00
c93ff90f94 Merge pull request #79 from hl-archive-node/fix/issue-78
fix: Do not filter out logs based on bloom (which is for perf optimization)
2025-10-05 00:43:20 -04:00
ce64e00e2f fix: Do not filter out logs based on bloom (which is for perf optimization)
Resolves #78
2025-10-05 00:33:44 -04:00
8d8da57d3a Merge pull request #77 from hl-archive-node/feat/cutoff-latest
feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
2025-10-02 10:57:04 -04:00
875304f891 feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
This is useful when syncing to specific testnet blocks
2025-10-02 14:53:47 +00:00
b37ba15765 Merge pull request #74 from Quertyy/feat/block-precompila-data-rpc-method
feat(rpc): add HlBlockPrecompile rpc API
2025-09-19 02:42:21 -04:00
3080665702 style: pass clippy check 2025-09-19 13:23:49 +07:00
4896e4f0ea refactor: use BlockId as block type 2025-09-19 12:41:14 +07:00
458f506ad2 refactor: use BlockHashOrNumber as block type 2025-09-19 12:33:32 +07:00
1c7136bfab feat(rpc): add HlBlockPrecompile rpc API 2025-09-18 04:57:49 +07:00
491e902904 Merge pull request #69 from hl-archive-node/fix/call-and-estimate
fix: Apply precompiles for eth_call and eth_estimateGas
2025-09-15 02:22:21 -04:00
45648a7a98 fix: Apply precompiles for eth_call and eth_estimateGas 2025-09-15 02:21:45 -04:00
c87c5a055a Merge pull request #68 from hl-archive-node/fix/testnet-token
fix: Add a manual mapping for testnet
2025-09-14 23:31:19 -04:00
c9416a3948 fix: Add a manual mapping for testnet 2025-09-14 23:24:00 -04:00
db10c23c56 Merge pull request #66 from hl-archive-node/feat/nb-release
fix: Fix tag format
2025-09-13 16:48:00 -04:00
fc395123f3 fix: Fix tag format 2025-09-13 16:47:05 -04:00
84ea1af682 Merge pull request #64 from sentioxyz/node-builder
fix docker build args
2025-09-13 16:43:35 -04:00
bd3e0626ed fix docker build args 2025-09-13 15:28:36 +08:00
7d223a464e Merge pull request #63 from hl-archive-node/feat/nb-release
feat: Add nb tag to docker releases
2025-09-11 19:36:43 -04:00
afcc551f67 feat: Add nb tag to docker releases 2025-09-11 19:35:50 -04:00
0dfd7a4c7f Merge pull request #62 from hl-archive-node/doc/testnet
doc: Update testnet instruction, add support channel
2025-09-11 19:33:50 -04:00
8faac526b7 doc: Add support channel 2025-09-11 19:32:55 -04:00
acfabf969c doc: Update testnet block number 2025-09-11 19:31:37 -04:00
fccf877a3a Merge pull request #61 from hl-archive-node/chore/v1.7.0
chore: Upgrade to reth v1.7.0
2025-09-11 19:26:47 -04:00
9e3f0c722e chore: Upgrade to reth v1.7.0 2025-09-11 19:25:48 -04:00
cd5bcc4cb0 chore: Add issue templates from reth 2025-09-11 19:00:09 -04:00
d831a459bb Merge pull request #60 from hl-archive-node/feat/block-metrics
feat: Add block source metrics
2025-09-11 18:56:18 -04:00
66c2ee654c feat: Add block source metrics 2025-09-11 18:50:22 -04:00
701e6a25e6 refactor: Remove duplications 2025-09-11 18:47:58 -04:00
ab11ce513f Merge pull request #57 from Quertyy/chore/reth-hl-version
chore(build): add reth-hl version output
2025-09-09 09:43:12 -04:00
37b852e810 chore(build): add reth-hl version output 2025-09-09 20:19:52 +07:00
51c43d6dbd Create a docker release github action (#54)
* create docker release action

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .
2025-09-08 10:26:20 -04:00
3f08b0a4e6 Merge pull request #55 from hl-archive-node/fix/txenv-on-trace
fix: Fill precompiles when tracing
2025-09-04 20:39:16 -04:00
d7992ab8ff remove: Remove unnecessary trait implementation 2025-09-04 20:38:41 -04:00
b37a30fb37 fix: Fill precompiles in tracing APIs 2025-09-04 20:37:10 -04:00
f6432498d8 refactor: Relax apply_precompiles and expose 2025-09-04 20:37:07 -04:00
772ff250ce Merge pull request #52 from hl-archive-node/fix/avoid-crash-on-eth-failure
fix: Do not crash when collect_block failed
2025-08-29 02:51:10 +09:00
5ee9053286 fix: Do not crash when collect_block failed
Just gracefully return it as error and log it
2025-08-28 13:47:44 -04:00
29e6972d58 Merge pull request #51 from hl-archive-node/feat/no-eth-proof
fix: Disable eth_getProof by default
2025-08-29 02:07:24 +09:00
e87b9232cc fix: Disable eth_getProof by default
No need to give malfunctioning feature by default. Issue #15 affects
StoragesTrie, AccountsTrie table which is used only for state root and
proof generation.
Also clearing the table does not affect any other parts of reth node.

Meanwhile, add --experimental-eth-get-proof flag to enable eth_getProof
forcefully.
2025-08-28 10:27:32 -04:00
b004263f82 Merge pull request #50 from Quertyy/feat/rpc-system-tx-receipts
chore(rpc): add eth_getEvmSystemTxsReceiptsByBlockHash and eth_getEvmSystemTxsReceiptsByBlockHash rpc method
2025-08-28 23:26:05 +09:00
74e27b5ee2 refactor(rpc): extract common logic for getting system txs 2025-08-28 16:10:41 +02:00
09fcf0751f chore(rpc): add eth_getSystemTxsReceiptsByBlockNumber and eth_getSystemTxsReceiptsByBlockNumber rpc method 2025-08-28 15:39:37 +02:00
8f2eca4754 Merge pull request #48 from Quertyy/feat/rpc-block-system-tx
chore(rpc): add eth_getEvmSystemTxsByBlockNumber and eth_getEvmSystemTxsByBlockHash rpc methods
2025-08-28 17:45:43 +09:00
707b4fb709 chore(rpc): return types compliance 2025-08-27 10:34:34 +02:00
62dd5a71b5 chore(rpc): change methods name 2025-08-26 22:03:40 +02:00
412c38a8cd chore(rpc): add eth_getSystemTxsByBlockNumber and eth_getSystemTxsByBlockNumber rpc method 2025-08-26 21:24:28 +02:00
796ea518bd Merge pull request #47 from hl-archive-node/fix/issue-46
fix: Sort hl-node files correctly
2025-08-27 02:49:16 +09:00
dd2c925af2 fix: Sort hl-node files correctly 2025-08-26 13:47:34 -04:00
3ffd7bb351 Merge pull request #45 from hl-archive-node/feat/add-cli-params-for-sources
feat: Add --local.fallback-threshold, --s3.polling-interval
2025-08-26 11:29:34 +09:00
52909eea3f feat: Add --local.fallback-threshold, --s3.polling-interval 2025-08-25 22:27:26 -04:00
0f9c2c5897 chore: Code style 2025-08-25 21:12:57 -04:00
ad4a8cd365 remove: Remove unnecssary tests 2025-08-25 21:12:34 -04:00
80506a7a43 fix(hl-node-compliance): Fix transaction index on block response 2025-08-25 10:00:43 -04:00
2af312b628 remove: Remove unused code 2025-08-25 10:00:43 -04:00
1908e9f414 Merge pull request #40 from sentioxyz/node-builder
fix: correct ingest local blocks
2025-08-24 18:13:41 +09:00
65cdc27b51 fix: line_to_evm_block don't hold equivalent semantic after refactor 2025-08-24 16:46:45 +08:00
4f430487d6 refactor: Move RPC addons to addons/ 2025-08-24 01:18:52 -04:00
19f35a6b54 chore: clippy, fmt 2025-08-24 01:15:36 -04:00
d61020e996 refactor: Split files for block sources
By claude code
2025-08-24 01:14:33 -04:00
657df240f4 fix: Avoid unnecessarily exposing pseudo peer 2025-08-23 22:17:03 -04:00
73a34a4bc1 chore: clippy 2025-08-23 22:17:03 -04:00
d8eef6305b remove: Reduce unnecessary LoC 2025-08-23 22:17:03 -04:00
bae68ef8db refactor: Reduce unnecessary LoC
By claude code
2025-08-23 04:21:23 -04:00
f576dddfa6 remove: Remove unused code 2025-08-23 03:10:05 -04:00
894ebcbfa5 Merge pull request #36 from hl-archive-node/fix/support-new-api
fix: Support new reth API
2025-08-23 01:51:36 +09:00
92 changed files with 4572 additions and 2833 deletions

127
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Bug Report
description: Create a bug report
labels: ["C-bug", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report! Please provide as much detail as possible.
If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead.
- type: textarea
id: what-happened
attributes:
label: Describe the bug
description: |
A clear and concise description of what the bug is.
If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well.
validations:
required: true
- type: textarea
id: reproduction-steps
attributes:
label: Steps to reproduce
description: Please provide any steps you think might be relevant to reproduce the bug.
placeholder: |
Steps to reproduce:
1. Start '...'
2. Then '...'
3. Check '...'
4. See error
validations:
required: true
- type: textarea
id: logs
attributes:
label: Node logs
description: |
If applicable, please provide the node logs leading up to the bug.
**Please also provide debug logs.** By default, these can be found in:
- `~/.cache/reth/logs` on Linux
- `~/Library/Caches/reth/logs` on macOS
- `%localAppData%/reth/logs` on Windows
render: text
validations:
required: false
- type: dropdown
id: platform
attributes:
label: Platform(s)
description: What platform(s) did this occur on?
multiple: true
options:
- Linux (x86)
- Linux (ARM)
- Mac (Intel)
- Mac (Apple Silicon)
- Windows (x86)
- Windows (ARM)
- type: dropdown
id: container_type
attributes:
label: Container Type
description: Were you running it in a container?
multiple: true
options:
- Not running in a container
- Docker
- Kubernetes
- LXC/LXD
- Other
validations:
required: true
- type: textarea
id: client-version
attributes:
label: What version/commit are you on?
description: This can be obtained with `reth --version`
validations:
required: true
- type: textarea
id: database-version
attributes:
label: What database version are you on?
description: This can be obtained with `reth db version`
validations:
required: true
- type: textarea
id: network
attributes:
label: Which chain / network are you on?
description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet.
validations:
required: true
- type: dropdown
id: node-type
attributes:
label: What type of node are you running?
options:
- Archive (default)
- Full via --full flag
- Pruned with custom reth.toml config
validations:
required: true
- type: textarea
id: prune-config
attributes:
label: What prune config do you use, if any?
description: The `[prune]` section in `reth.toml` file
validations:
required: false
- type: input
attributes:
label: If you've built Reth from source, provide the full command you used
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md#code-of-conduct)
options:
- label: I agree to follow the Code of Conduct
required: true

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: GitHub Discussions
url: https://github.com/paradigmxyz/reth/discussions
about: Please ask and answer questions here to keep the issue tracker clean.

19
.github/ISSUE_TEMPLATE/docs.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Documentation
description: Suggest a change to our documentation
labels: ["C-docs", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
If you are unsure if the docs are relevant or needed, please open up a discussion first.
- type: textarea
attributes:
label: Describe the change
description: |
Please describe the documentation you want to change or add, and if it is for end-users or contributors.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

21
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Feature request
description: Suggest a feature
labels: ["C-enhancement", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Please ensure that the feature has not already been requested in the issue tracker.
- type: textarea
attributes:
label: Describe the feature
description: |
Please describe the feature and what it is aiming to solve, if relevant.
If the feature is for a crate, please include a proposed API surface.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

38
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,38 @@
# Publishes the Docker image.
name: docker
on:
push:
tags:
- v*
- nb-*
env:
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
CARGO_TERM_COLOR: always
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
DOCKER_USERNAME: ${{ github.actor }}
jobs:
build:
name: build and push as latest
runs-on: ubuntu-24.04
permissions:
packages: write
contents: read
steps:
- uses: actions/checkout@v5
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Log in to Docker
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
- name: Set up Docker builder
run: |
docker buildx create --use --name builder
- name: Build and push nanoreth image
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest

838
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,8 @@
[package]
name = "reth_hl"
version = "0.1.0"
edition = "2021"
edition = "2024"
build = "build.rs"
[lib]
name = "reth_hl"
@ -25,67 +26,73 @@ lto = "fat"
codegen-units = 1
[dependencies]
reth = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-evm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-revm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-provider = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" }
revm = { version = "28.0.1", default-features = false }
reth = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-commands = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-basic-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-chainspec = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-util = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-discv4 = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-engine-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-forks = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-core = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-revm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-p2p = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-peers = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-payload-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives-traits = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-provider = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-engine-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-tracing = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-common = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-codecs = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-transaction-pool = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-stages-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-storage-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-errors = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-convert = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-server-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-metrics = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
revm = { version = "29.0.1", default-features = false }
# alloy dependencies
alloy-genesis = { version = "1.0.23", default-features = false }
alloy-consensus = { version = "1.0.23", default-features = false }
alloy-genesis = { version = "1.0.37", default-features = false }
alloy-consensus = { version = "1.0.37", default-features = false }
alloy-chains = { version = "0.2.5", default-features = false }
alloy-eips = { version = "1.0.23", default-features = false }
alloy-evm = { version = "0.18.2", default-features = false }
alloy-eips = { version = "1.0.37", default-features = false }
alloy-evm = { version = "0.21.0", default-features = false }
alloy-json-abi = { version = "1.3.1", default-features = false }
alloy-json-rpc = { version = "1.0.23", default-features = false }
alloy-json-rpc = { version = "1.0.37", default-features = false }
alloy-dyn-abi = "1.3.1"
alloy-network = { version = "1.0.23", default-features = false }
alloy-network = { version = "1.0.37", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] }
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.23", default-features = false }
alloy-rpc-types-engine = { version = "1.0.23", default-features = false }
alloy-signer = { version = "1.0.23", default-features = false }
alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.37", default-features = false }
alloy-rpc-types-engine = { version = "1.0.37", default-features = false }
alloy-signer = { version = "1.0.37", default-features = false }
alloy-sol-macro = "1.3.1"
alloy-sol-types = { version = "1.3.1", default-features = false }
jsonrpsee = "0.25.1"
jsonrpsee-core = "0.25.1"
jsonrpsee-types = "0.25.1"
jsonrpsee = "0.26.0"
jsonrpsee-core = "0.26.0"
jsonrpsee-types = "0.26.0"
# misc dependencies
auto_impl = "1"
@ -166,3 +173,7 @@ client = [
[dev-dependencies]
tempfile = "3.20.0"
[build-dependencies]
vergen = { version = "9.0.4", features = ["build", "cargo", "emit_and_set"] }
vergen-git2 = "1.0.5"

View File

@ -1,6 +1,8 @@
# Modifed from reth Makefile
.DEFAULT_GOAL := help
GIT_SHA ?= $(shell git rev-parse HEAD)
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
BIN_DIR = "dist/bin"
# List of features to use when building. Can be overridden via the environment.
@ -17,6 +19,9 @@ PROFILE ?= release
# Extra flags for Cargo
CARGO_INSTALL_EXTRA_FLAGS ?=
# The docker image name
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
##@ Help
.PHONY: help
@ -207,3 +212,49 @@ check-features:
--package reth-primitives-traits \
--package reth-primitives \
--feature-powerset
##@ Docker
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-git-sha
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-latest
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
$(call docker_build_push,$(GIT_TAG),latest)
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --name cross-builder`
.PHONY: docker-build-push-nightly
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
$(call docker_build_push,nightly,nightly)
# Create a Docker image using the main Dockerfile
define docker_build_push
docker buildx build --file ./Dockerfile . \
--platform linux/amd64 \
--tag $(DOCKER_IMAGE_NAME):$(1) \
--tag $(DOCKER_IMAGE_NAME):$(2) \
--build-arg BUILD_PROFILE="$(PROFILE)" \
--build-arg FEATURES="jemalloc,asm-keccak" \
--provenance=false \
--push
endef

View File

@ -3,6 +3,8 @@
HyperEVM archive node implementation based on [reth](https://github.com/paradigmxyz/reth).
NodeBuilder API version is heavily inspired by [reth-bsc](https://github.com/loocapro/reth-bsc).
Got questions? Drop by the [Hyperliquid Discord](https://discord.gg/hyperliquid) #node-operators channel.
## ⚠️ IMPORTANT: System Transactions Appear as Pseudo Transactions
Deposit transactions from [System Addresses](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/hypercore-less-than-greater-than-hyperevm-transfers#system-addresses) like `0x222..22` / `0x200..xx` to user addresses are intentionally recorded as pseudo transactions.
@ -58,19 +60,19 @@ $ reth-hl node --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \
## How to run (testnet)
Testnet is supported since block 21304281.
Testnet is supported since block 30281484.
```sh
# Get testnet genesis at block 21304281
# Get testnet genesis at block 30281484
$ cd ~
$ git clone https://github.com/sprites0/hl-testnet-genesis
$ zstd --rm -d ~/hl-testnet-genesis/*.zst
# Init node
$ make install
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/21304281.rlp \
--header-hash 0x5b10856d2b1ad241c9bd6136bcc60ef7e8553560ca53995a590db65f809269b4 \
~/hl-testnet-genesis/21304281.jsonl --total-difficulty 0
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/30281484.rlp \
--header-hash 0x147cc3c09e9ddbb11799c826758db284f77099478ab5f528d3a57a6105516c21 \
~/hl-testnet-genesis/30281484.jsonl --total-difficulty 0
# Run node
$ reth-hl node --chain testnet --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \

91
build.rs Normal file
View File

@ -0,0 +1,91 @@
use std::{env, error::Error};
use vergen::{BuildBuilder, CargoBuilder, Emitter};
use vergen_git2::Git2Builder;
fn main() -> Result<(), Box<dyn Error>> {
let mut emitter = Emitter::default();
let build_builder = BuildBuilder::default().build_timestamp(true).build()?;
emitter.add_instructions(&build_builder)?;
let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?;
emitter.add_instructions(&cargo_builder)?;
let git_builder =
Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?;
emitter.add_instructions(&git_builder)?;
emitter.emit_and_set()?;
let sha = env::var("VERGEN_GIT_SHA")?;
let sha_short = &sha[0..7];
let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true";
// > git describe --always --tags
// if not on a tag: v0.2.0-beta.3-82-g1939939b
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_HL_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);
// Set the build profile
let out_dir = env::var("OUT_DIR").unwrap();
let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap();
println!("cargo:rustc-env=RETH_HL_BUILD_PROFILE={profile}");
// Set formatted version strings
let pkg_version = env!("CARGO_PKG_VERSION");
// The short version information for reth.
// - The latest version from Cargo.toml
// - The short SHA of the latest commit.
// Example: 0.1.0 (defa64b2)
println!("cargo:rustc-env=RETH_HL_SHORT_VERSION={pkg_version}{version_suffix} ({sha_short})");
// LONG_VERSION
// The long version information for reth.
//
// - The latest version from Cargo.toml + version suffix (if any)
// - The full SHA of the latest commit
// - The build datetime
// - The build features
// - The build profile
//
// Example:
//
// ```text
// Version: 0.1.0
// Commit SHA: defa64b2
// Build Timestamp: 2023-05-19T01:47:19.815651705Z
// Build Features: jemalloc
// Build Profile: maxperf
// ```
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_0=Version: {pkg_version}{version_suffix}");
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_1=Commit SHA: {sha}");
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_2=Build Timestamp: {}",
env::var("VERGEN_BUILD_TIMESTAMP")?
);
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_3=Build Features: {}",
env::var("VERGEN_CARGO_FEATURES")?
);
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_4=Build Profile: {profile}");
// The version information for reth formatted for P2P (devp2p).
// - The latest version from Cargo.toml
// - The target triple
//
// Example: reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin
println!(
"cargo:rustc-env=RETH_HL_P2P_CLIENT_VERSION={}",
format_args!("reth/v{pkg_version}-{sha_short}/{}", env::var("VERGEN_CARGO_TARGET_TRIPLE")?)
);
Ok(())
}

View File

@ -2,18 +2,18 @@ use alloy_eips::BlockId;
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Bytes, U256};
use alloy_rpc_types_eth::{
state::{EvmOverrides, StateOverride},
BlockOverrides,
state::{EvmOverrides, StateOverride},
};
use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc,
rpc_params,
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
types::{ErrorObject, error::INTERNAL_ERROR_CODE},
};
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth_rpc::eth::EthApiTypes;
use reth_rpc_eth_api::{helpers::EthCall, RpcTxReq};
use reth_rpc_eth_api::{RpcTxReq, helpers::EthCall};
#[rpc(server, namespace = "eth")]
pub(crate) trait CallForwarderApi<TxReq: RpcObject> {

View File

@ -1,68 +1,251 @@
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
//! Overrides for RPC methods to post-filter system transactions and logs.
//!
//! System transactions are always at the beginning of the block,
//! so we can use the transaction index to determine if the log is from a system transaction,
//! and if it is, we can exclude it.
//!
//! For non-system transactions, we can just return the log as is, and the client will
//! adjust the transaction index accordingly.
use alloy_consensus::{
BlockHeader, TxReceipt,
transaction::{TransactionMeta, TxHashRef},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{B256, U256};
use alloy_rpc_types::{
pubsub::{Params, SubscriptionKind},
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
TransactionInfo,
pubsub::{Params, SubscriptionKind},
};
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
use jsonrpsee_core::{async_trait, RpcResult};
use jsonrpsee_types::ErrorObject;
use reth::{
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
tasks::TaskSpawner,
};
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
use jsonrpsee::{PendingSubscriptionSink, proc_macros::rpc};
use jsonrpsee_core::{RpcResult, async_trait};
use jsonrpsee_types::{ErrorObject, error::INTERNAL_ERROR_CODE};
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
use reth_primitives_traits::SignedTransaction;
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
use reth_rpc::{EthFilter, EthPubSub};
use reth_rpc_eth_api::{
helpers::{EthBlocks, EthTransactions, LoadReceipt},
transaction::ConvertReceiptInput,
EthApiServer, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock,
RpcConvert, RpcHeader, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
EthApiTypes, EthFilterApiServer, EthPubSubApiServer, RpcBlock, RpcConvert, RpcReceipt,
RpcTransaction, helpers::EthBlocks, transaction::ConvertReceiptInput,
};
use serde::Serialize;
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
use tokio_stream::{Stream, StreamExt};
use tracing::{info, trace, Instrument};
use reth_rpc_eth_types::EthApiError;
use std::{marker::PhantomData, sync::Arc};
use tokio_stream::StreamExt;
use tracing::{Instrument, trace};
use crate::{
node::primitives::{HlPrimitives, TransactionSigned},
HlBlock,
};
use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
pub trait EthWrapper:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<Primitives = HlPrimitives>
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
#[method(name = "getEvmSystemTxsByBlockHash")]
async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsByBlockNumber")]
async fn get_evm_system_txs_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<R>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<R>>>;
}
impl<
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<Primitives = HlPrimitives>
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static,
> EthWrapper for T
pub struct HlSystemTransactionExt<Eth: EthWrapper> {
eth_api: Eth,
_marker: PhantomData<Eth>,
}
impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
pub fn new(eth_api: Eth) -> Self {
Self { eth_api, _marker: PhantomData }
}
async fn get_system_txs_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
let system_txs = block
.transactions_with_sender()
.enumerate()
.filter_map(|(index, (signer, tx))| {
if tx.is_system_transaction() {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
self.eth_api
.tx_resp_builder()
.fill(tx.clone().with_signer(*signer), tx_info)
.ok()
} else {
None
}
})
.collect();
Ok(Some(system_txs))
} else {
Ok(None)
}
}
async fn get_system_txs_receipts_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some((block, receipts)) =
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
{
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas;
let timestamp = block.timestamp;
let mut gas_used = 0;
let mut next_log_index = 0;
let mut inputs = Vec::new();
for (idx, (tx, receipt)) in
block.transactions_recovered().zip(receipts.iter()).enumerate()
{
if receipt.cumulative_gas_used() != 0 {
break;
}
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: receipt.clone(),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
inputs.push(input);
}
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
Ok(Some(receipts))
} else {
Ok(None)
}
}
}
#[async_trait]
impl<Eth: EthWrapper>
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
for HlSystemTransactionExt<Eth>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
/// Returns the system transactions for a given block hash.
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
Ok(txs) => Ok(txs),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the system transactions for a given block number, or the latest block if no block
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_number(
&self,
id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
Some(txs) => Ok(Some(txs)),
None => {
// hl-node returns an error if the block is not found
Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {id:?}"),
Some(()),
))
}
}
}
/// Returns the receipts for the system transactions for a given block hash.
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
Ok(receipts) => Ok(receipts),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the receipts for the system transactions for a given block number, or the latest
/// block if no block
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
Some(receipts) => Ok(Some(receipts)),
None => Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {block_id:?}"),
Some(()),
)),
}
}
}
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
@ -80,19 +263,16 @@ impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterHttp<Eth>
{
/// Handler for `eth_newFilter`
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newFilter");
self.filter.new_filter(filter).await
}
/// Handler for `eth_newBlockFilter`
async fn new_block_filter(&self) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
self.filter.new_block_filter().await
}
/// Handler for `eth_newPendingTransactionFilter`
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
@ -101,7 +281,6 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
self.filter.new_pending_transaction_filter(kind).await
}
/// Handler for `eth_getFilterChanges`
async fn filter_changes(
&self,
id: FilterId,
@ -110,31 +289,20 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
}
/// Returns an array of all logs matching filter with given id.
///
/// Returns an error if no matching log filter exists.
///
/// Handler for `eth_getFilterLogs`
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
}
/// Handler for `eth_uninstallFilter`
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
self.filter.uninstall_filter(id).await
}
/// Returns logs matching given filter object.
///
/// Handler for `eth_getLogs`
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getLogs");
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
let provider = self.provider.clone();
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &provider)).collect())
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &self.provider)).collect())
}
}
@ -155,10 +323,10 @@ impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterWs<Eth>
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
where
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
/// Handler for `eth_subscribe`
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
@ -166,16 +334,12 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let pubsub = self.pubsub.clone();
let provider = self.provider.clone();
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::Logs {
// if no params are provided, used default filter params
let filter = match params {
Some(Params::Logs(filter)) => *filter,
Some(Params::Bool(_)) => {
return;
}
Some(Params::Logs(f)) => *f,
Some(Params::Bool(_)) => return,
_ => Default::default(),
};
let _ = pipe_from_stream(
@ -183,100 +347,34 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
)
.await;
} else if kind == SubscriptionKind::NewHeads {
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
};
}
}));
Ok(())
}
}
fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
let transaction_index = log.transaction_index?;
let log_index = log.log_index?;
let (tx_idx, log_idx) = (log.transaction_index?, log.log_index?);
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
// System transactions are always at the beginning of the block,
// so we can use the transaction index to determine if the log is from a system transaction,
// and if it is, we can exclude it.
//
// For non-system transactions, we can just return the log as is, and the client will
// adjust the transaction index accordingly.
let mut system_tx_count = 0u64;
let mut system_tx_logs_count = 0u64;
let (mut sys_tx_count, mut sys_log_count) = (0u64, 0u64);
for receipt in receipts {
let is_system_tx = receipt.cumulative_gas_used() == 0;
if is_system_tx {
system_tx_count += 1;
system_tx_logs_count += receipt.logs().len() as u64;
if receipt.cumulative_gas_used() == 0 {
sys_tx_count += 1;
sys_log_count += receipt.logs().len() as u64;
}
}
if system_tx_count > transaction_index {
if sys_tx_count > tx_idx {
return None;
}
log.transaction_index = Some(transaction_index - system_tx_count);
log.log_index = Some(log_index - system_tx_logs_count);
log.transaction_index = Some(tx_idx - sys_tx_count);
log.log_index = Some(log_idx - sys_log_count);
Some(log)
}
/// Helper to convert a serde error into an [`ErrorObject`]
#[derive(Debug, thiserror::Error)]
#[error("Failed to serialize subscription item: {0}")]
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
impl SubscriptionSerializeError {
const fn new(err: serde_json::Error) -> Self {
Self(err)
}
}
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
fn from(value: SubscriptionSerializeError) -> Self {
internal_rpc_err(value.to_string())
}
}
async fn pipe_from_stream<T, St>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>>
where
St: Stream<Item = T> + Unpin,
T: Serialize,
{
loop {
tokio::select! {
_ = sink.closed() => {
// connection dropped
break Ok(())
},
maybe_item = stream.next() => {
let item = match maybe_item {
Some(item) => item,
None => {
// stream ended
break Ok(())
},
};
let msg = SubscriptionMessage::new(
sink.method_name(),
sink.subscription_id(),
&item
).map_err(SubscriptionSerializeError::new)?;
if sink.send(msg).await.is_err() {
break Ok(());
}
}
}
}
}
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
eth_api: Arc<Eth>,
_marker: PhantomData<Eth>,
@ -321,10 +419,6 @@ macro_rules! engine_span {
};
}
fn is_system_tx(tx: &TransactionSigned) -> bool {
tx.is_system_transaction()
}
fn adjust_block<Eth: EthWrapper>(
recovered_block: &RpcBlock<Eth::NetworkTypes>,
eth_api: &Eth,
@ -335,6 +429,11 @@ fn adjust_block<Eth: EthWrapper>(
new_block.transactions = match new_block.transactions {
BlockTransactions::Full(mut transactions) => {
transactions.drain(..system_tx_count);
transactions.iter_mut().for_each(|tx| {
if let Some(idx) = &mut tx.transaction_index {
*idx -= system_tx_count as u64;
}
});
BlockTransactions::Full(transactions)
}
BlockTransactions::Hashes(mut hashes) => {
@ -381,7 +480,7 @@ async fn adjust_block_receipts<Eth: EthWrapper>(
};
let input = ConvertReceiptInput {
receipt: Cow::Borrowed(receipt),
receipt: receipt.clone(),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
@ -410,8 +509,8 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>, Eth::Error> {
match eth_api.load_transaction_and_receipt(tx_hash).await? {
Some((_, meta, _)) => {
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again doesn't hurt performance much
info!("block hash: {:?}", meta.block_hash);
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
// doesn't hurt performance much
let Some((system_tx_count, block_receipts)) =
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
else {
@ -423,11 +522,12 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
}
}
// This function assumes that `block_id` is already validated by the caller.
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
let provider = eth_api.provider();
let block = provider.block_by_id(block_id).unwrap().unwrap();
let system_tx_count = block.body.transactions().iter().filter(|tx| is_system_tx(tx)).count();
system_tx_count
let header = provider.header_by_id(block_id).unwrap().unwrap();
header.extras.system_tx_count.try_into().unwrap()
}
#[async_trait]
@ -464,8 +564,9 @@ where
let res =
self.eth_api.block_transaction_count_by_hash(hash).instrument(engine_span!()).await?;
Ok(res.map(|count| {
count
- U256::from(system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into())))
let sys_tx_count =
system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into()));
count - U256::from(sys_tx_count)
}))
}
@ -500,6 +601,9 @@ where
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts");
if self.eth_api.provider().block_by_id(block_id).map_err(EthApiError::from)?.is_none() {
return Ok(None);
}
let result =
adjust_block_receipts(block_id, &*self.eth_api).instrument(engine_span!()).await?;
Ok(result.map(|(_, receipts)| receipts))
@ -507,7 +611,7 @@ where
}
pub fn install_hl_node_compliance<Node, EthApi>(
ctx: RpcContext<Node, EthApi>,
ctx: &mut RpcContext<Node, EthApi>,
) -> Result<(), eyre::Error>
where
Node: FullNodeComponents,
@ -534,5 +638,9 @@ where
ctx.modules.replace_configured(
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
)?;
ctx.modules
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
Ok(())
}

5
src/addons/mod.rs Normal file
View File

@ -0,0 +1,5 @@
pub mod call_forwarder;
pub mod hl_node_compliance;
pub mod tx_forwarder;
pub mod subscribe_fixup;
mod utils;

View File

@ -0,0 +1,54 @@
use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
use alloy_rpc_types::pubsub::{Params, SubscriptionKind};
use async_trait::async_trait;
use jsonrpsee::PendingSubscriptionSink;
use jsonrpsee_types::ErrorObject;
use reth::tasks::TaskSpawner;
use reth_rpc::EthPubSub;
use reth_rpc_convert::RpcTransaction;
use reth_rpc_eth_api::{EthApiTypes, EthPubSubApiServer};
use std::sync::Arc;
pub struct SubscribeFixup<Eth: EthWrapper> {
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for SubscribeFixup<Eth>
where
ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::NewHeads {
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
}
}));
Ok(())
}
}
impl<Eth: EthWrapper> SubscribeFixup<Eth> {
pub fn new(
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
) -> Self
where
Eth: EthWrapper,
ErrorObject<'static>: From<Eth::Error>,
{
Self { pubsub, provider, subscription_task_spawner }
}
}

View File

@ -2,14 +2,14 @@ use std::time::Duration;
use alloy_json_rpc::RpcObject;
use alloy_network::Ethereum;
use alloy_primitives::{Bytes, B256};
use alloy_primitives::{B256, Bytes};
use alloy_rpc_types::TransactionRequest;
use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc,
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
types::{ErrorObject, error::INTERNAL_ERROR_CODE},
};
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth::rpc::{result::internal_rpc_err, server_types::eth::EthApiError};
use reth_rpc_eth_api::RpcReceipt;
@ -37,7 +37,7 @@ impl EthForwarderExt {
Self { client }
}
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject {
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject<'static> {
match e {
ClientError::Call(e) => e,
_ => ErrorObject::owned(

90
src/addons/utils.rs Normal file
View File

@ -0,0 +1,90 @@
use std::sync::Arc;
use crate::{HlBlock, HlPrimitives};
use alloy_primitives::U256;
use alloy_rpc_types::Header;
use futures::StreamExt;
use jsonrpsee::{SubscriptionMessage, SubscriptionSink};
use jsonrpsee_types::ErrorObject;
use reth_primitives::SealedHeader;
use reth_provider::{BlockReader, CanonStateSubscriptions};
use reth_rpc::{RpcTypes, eth::pubsub::SubscriptionSerializeError};
use reth_rpc_convert::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq};
use reth_rpc_eth_api::{
EthApiServer, FullEthApiTypes, RpcNodeCoreExt,
helpers::{EthBlocks, EthTransactions, LoadReceipt},
};
use serde::Serialize;
use tokio_stream::Stream;
pub trait EthWrapper:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
impl<T> EthWrapper for T where
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
pub(super) async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => break Ok(()),
maybe_item = stream.next() => {
let Some(item) = maybe_item else { break Ok(()) };
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(SubscriptionSerializeError::from)?;
if sink.send(msg).await.is_err() { break Ok(()); }
}
}
}
}
pub(super) fn new_headers_stream<Eth: EthWrapper>(
provider: &Arc<Eth::Provider>,
) -> impl Stream<Item = Header<alloy_consensus::Header>> {
provider.canonical_state_stream().flat_map(|new_chain| {
let headers = new_chain
.committed()
.blocks_iter()
.map(|block| {
Header::from_consensus(
SealedHeader::new(block.header().inner.clone(), block.hash()).into(),
None,
Some(U256::from(block.rlp_length())),
)
})
.collect::<Vec<_>>();
futures::stream::iter(headers)
})
}

View File

@ -1,5 +1,5 @@
use alloy_chains::{Chain, NamedChain};
use alloy_primitives::{b256, Address, Bytes, B256, B64, U256};
use alloy_primitives::{Address, B64, B256, Bytes, U256, b256};
use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, Hardfork};
use reth_primitives::{Header, SealedHeader};
use std::sync::LazyLock;
@ -7,7 +7,6 @@ use std::sync::LazyLock;
static GENESIS_HASH: B256 =
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
/// Dev hardforks
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),

View File

@ -1,9 +1,7 @@
//! Chain specification for HyperEVM.
pub mod hl;
pub mod parser;
use crate::hardforks::{hl::HlHardfork, HlHardforks};
use alloy_consensus::Header;
use crate::{hardforks::HlHardforks, node::primitives::{header::HlHeaderExtras, HlHeader}};
use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256, U256};
@ -13,20 +11,19 @@ use reth_chainspec::{
};
use reth_discv4::NodeRecord;
use reth_evm::eth::spec::EthExecutorSpec;
use std::{fmt::Display, sync::Arc};
use std::fmt::Display;
pub const MAINNET_CHAIN_ID: u64 = 999;
pub const TESTNET_CHAIN_ID: u64 = 998;
/// Hl chain spec type.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct HlChainSpec {
/// [`ChainSpec`].
pub inner: ChainSpec,
pub genesis_header: HlHeader,
}
impl EthChainSpec for HlChainSpec {
type Header = Header;
type Header = HlHeader;
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> {
self.inner.blob_params_at_timestamp(timestamp)
@ -40,10 +37,6 @@ impl EthChainSpec for HlChainSpec {
self.inner.chain()
}
fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_block(block_number)
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp)
}
@ -64,8 +57,8 @@ impl EthChainSpec for HlChainSpec {
Box::new(self.inner.display_hardforks())
}
fn genesis_header(&self) -> &Header {
self.inner.genesis_header()
fn genesis_header(&self) -> &HlHeader {
&self.genesis_header
}
fn genesis(&self) -> &Genesis {
@ -75,10 +68,6 @@ impl EthChainSpec for HlChainSpec {
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.inner.bootnodes()
}
fn is_optimism(&self) -> bool {
false
}
}
impl Hardforks for HlChainSpec {
@ -105,23 +94,13 @@ impl Hardforks for HlChainSpec {
}
}
impl From<ChainSpec> for HlChainSpec {
fn from(value: ChainSpec) -> Self {
Self { inner: value }
}
}
impl EthereumHardforks for HlChainSpec {
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
self.inner.ethereum_fork_activation(fork)
}
}
impl HlHardforks for HlChainSpec {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl HlHardforks for HlChainSpec {}
impl EthExecutorSpec for HlChainSpec {
fn deposit_contract_address(&self) -> Option<Address> {
@ -129,18 +108,6 @@ impl EthExecutorSpec for HlChainSpec {
}
}
impl From<HlChainSpec> for ChainSpec {
fn from(value: HlChainSpec) -> Self {
value.inner
}
}
impl HlHardforks for Arc<HlChainSpec> {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.as_ref().hl_fork_activation(fork)
}
}
impl HlChainSpec {
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
@ -160,4 +127,10 @@ impl HlChainSpec {
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
}
}
fn new(inner: ChainSpec) -> Self {
let genesis_header =
HlHeader { inner: inner.genesis_header().clone(), extras: HlHeaderExtras::default() };
Self { inner, genesis_header }
}
}

View File

@ -1,4 +1,4 @@
use crate::chainspec::{hl::hl_testnet, HlChainSpec};
use crate::chainspec::{HlChainSpec, hl::hl_testnet};
use super::hl::hl_mainnet;
use reth_cli::chainspec::ChainSpecParser;
@ -26,8 +26,8 @@ impl ChainSpecParser for HlChainSpecParser {
/// Currently only mainnet is supported.
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> {
match s {
"mainnet" => Ok(Arc::new(HlChainSpec { inner: hl_mainnet() })),
"testnet" => Ok(Arc::new(HlChainSpec { inner: hl_testnet() })),
"mainnet" => Ok(Arc::new(HlChainSpec::new(hl_mainnet()))),
"testnet" => Ok(Arc::new(HlChainSpec::new(hl_testnet()))),
_ => Err(eyre::eyre!("Unsupported chain: {}", s)),
}
}

View File

@ -1,4 +1,4 @@
use alloy_primitives::{BlockNumber, B256};
use alloy_primitives::{B256, BlockNumber};
use reth_provider::{BlockNumReader, ProviderError};
use std::cmp::Ordering;

View File

@ -2,8 +2,8 @@ use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use reth_revm::context::ContextTr;
use revm::{
context::Cfg, context_interface::Block, handler::instructions::EthInstructions,
interpreter::interpreter::EthInterpreter, Context, Database,
Context, Database, context::Cfg, context_interface::Block,
handler::instructions::EthInstructions, interpreter::interpreter::EthInterpreter,
};
/// Trait that allows for hl HlEvm to be built.

View File

@ -1,8 +1,8 @@
use crate::evm::{spec::HlSpecId, transaction::HlTxEnv};
use revm::{
Context, Journal, MainContext,
context::{BlockEnv, CfgEnv, TxEnv},
database_interface::EmptyDB,
Context, Journal, MainContext,
};
/// Type alias for the default context type of the HlEvm.

View File

@ -1,16 +1,16 @@
use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use revm::{
context::{result::HaltReason, ContextSetters},
context_interface::{
result::{EVMError, ExecutionResult, ResultAndState},
Cfg, ContextTr, Database, JournalTr,
},
handler::{instructions::EthInstructions, PrecompileProvider},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
state::EvmState,
DatabaseCommit, ExecuteCommitEvm, ExecuteEvm,
context::{ContextSetters, result::HaltReason},
context_interface::{
Cfg, ContextTr, Database, JournalTr,
result::{EVMError, ExecutionResult, ResultAndState},
},
handler::{PrecompileProvider, instructions::EthInstructions},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
};
// Type alias for HL context

View File

@ -1,15 +1,15 @@
use revm::{
Inspector,
bytecode::opcode::BLOCKHASH,
context::{ContextSetters, Evm, FrameStack},
context_interface::ContextTr,
handler::{
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
evm::{ContextDbError, FrameInitResult},
instructions::{EthInstructions, InstructionProvider},
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
},
inspector::{InspectorEvmTr, JournalExt},
interpreter::{interpreter::EthInterpreter, Instruction, InterpreterResult},
Inspector,
interpreter::{Instruction, InterpreterResult, interpreter::EthInterpreter},
};
use crate::chainspec::MAINNET_CHAIN_ID;

View File

@ -7,36 +7,12 @@ use alloy_primitives::keccak256;
use revm::{
context::Host,
interpreter::{
as_u64_saturated, interpreter_types::StackTr, popn_top, InstructionContext,
InterpreterTypes,
_count, InstructionContext, InterpreterTypes, as_u64_saturated, interpreter_types::StackTr,
popn_top,
},
primitives::{BLOCK_HASH_HISTORY, U256},
};
#[doc(hidden)]
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! _count {
(@count) => { 0 };
(@count $head:tt $($tail:tt)*) => { 1 + _count!(@count $($tail)*) };
($($arg:tt)*) => { _count!(@count $($arg)*) };
}
/// Pops n values from the stack and returns the top value. Fails the instruction if n values can't
/// be popped.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! popn_top {
([ $($x:ident),* ], $top:ident, $interpreter:expr $(,$ret:expr)? ) => {
// Workaround for https://github.com/rust-lang/rust/issues/144329.
if $interpreter.stack.len() < (1 + $crate::_count!($($x)*)) {
$interpreter.halt_underflow();
return $($ret)?;
}
let ([$( $x ),*], $top) = unsafe { $interpreter.stack.popn_top().unwrap_unchecked() };
};
}
/// Implements the BLOCKHASH instruction.
///
/// Gets the hash of one of the 256 most recent complete blocks.

View File

@ -1 +0,0 @@

View File

@ -1,4 +1,3 @@
pub mod api;
mod handler;
pub mod spec;
pub mod transaction;

View File

@ -1,20 +1,15 @@
use revm::primitives::hardfork::SpecId;
use std::str::FromStr;
#[repr(u8)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum HlSpecId {
/// Placeholder for evm cancun fork
#[default]
V1, // V1
V1,
}
impl HlSpecId {
pub const fn is_enabled_in(self, other: HlSpecId) -> bool {
other as u8 <= self as u8
}
/// Converts the [`HlSpecId`] into a [`SpecId`].
pub const fn into_eth_spec(self) -> SpecId {
match self {
Self::V1 => SpecId::CANCUN,
@ -23,31 +18,8 @@ impl HlSpecId {
}
impl From<HlSpecId> for SpecId {
/// Converts the [`HlSpecId`] into a [`SpecId`].
fn from(spec: HlSpecId) -> Self {
spec.into_eth_spec()
}
}
/// String identifiers for HL hardforks
pub mod name {
pub const V1: &str = "V1";
}
impl FromStr for HlSpecId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
name::V1 => Self::V1,
_ => return Err(format!("Unknown HL spec: {s}")),
})
}
}
impl From<HlSpecId> for &'static str {
fn from(spec_id: HlSpecId) -> Self {
match spec_id {
HlSpecId::V1 => name::V1,
}
}
}

View File

@ -7,7 +7,7 @@ use reth_primitives_traits::SignerRecoverable;
use revm::{
context::TxEnv,
context_interface::transaction::Transaction,
primitives::{Address, Bytes, TxKind, B256, U256},
primitives::{Address, B256, Bytes, TxKind, U256},
};
#[auto_impl(&, &mut, Box, Arc)]
@ -124,12 +124,13 @@ impl FromRecoveredTx<TransactionSigned> for HlTxEnv<TxEnv> {
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
use reth_primitives::Transaction;
let base = match tx.clone().into_inner().into_typed_transaction() {
reth_primitives::Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
};
Self { base }

View File

@ -2,7 +2,7 @@
use alloy_chains::{Chain, NamedChain};
use core::any::Any;
use reth_chainspec::ForkCondition;
use reth_ethereum_forks::{hardfork, ChainHardforks, EthereumHardfork, Hardfork};
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, Hardfork, hardfork};
hardfork!(
/// The name of a hl hardfork.
@ -13,88 +13,5 @@ hardfork!(
HlHardfork {
/// Initial version
V1,
/// block.number bugfix
V2,
/// gas mismatch bugfix
V3,
}
);
impl HlHardfork {
/// Retrieves the activation block for the specified hardfork on the given chain.
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
if chain == Chain::from_named(NamedChain::Hyperliquid) {
return Self::hl_mainnet_activation_block(fork);
}
None
}
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
None
}
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
match_hardfork(
fork,
|fork| match fork {
EthereumHardfork::Frontier |
EthereumHardfork::Homestead |
EthereumHardfork::Tangerine |
EthereumHardfork::SpuriousDragon |
EthereumHardfork::Byzantium |
EthereumHardfork::Constantinople |
EthereumHardfork::Petersburg |
EthereumHardfork::Istanbul |
EthereumHardfork::MuirGlacier |
EthereumHardfork::Berlin |
EthereumHardfork::London |
EthereumHardfork::Shanghai |
EthereumHardfork::Cancun => Some(0),
_ => None,
},
|fork| match fork {
Self::V1 | Self::V2 | Self::V3 => Some(0),
_ => None,
},
)
}
/// Hl mainnet list of hardforks.
pub fn hl_mainnet() -> ChainHardforks {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
(Self::V1.boxed(), ForkCondition::Block(0)),
(Self::V2.boxed(), ForkCondition::Block(0)),
(Self::V3.boxed(), ForkCondition::Block(0)),
])
}
}
/// Match helper method since it's not possible to match on `dyn Hardfork`
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
where
H: Hardfork,
HF: Fn(&EthereumHardfork) -> Option<u64>,
HHF: Fn(&HlHardfork) -> Option<u64>,
{
let fork: &dyn Any = &fork;
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
return hardfork_fn(fork);
}
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
}

View File

@ -1,13 +1,14 @@
//! Hard forks of hl protocol.
//! Hard forks of HyperEVM.
#![allow(unused)]
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
pub mod hl;
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
use std::sync::Arc;
/// Extends [`EthereumHardforks`] with hl helper methods.
pub trait HlHardforks: EthereumHardforks {
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
/// [`ForkCondition::Never`].
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
}
///
/// Currently a placeholder for future use.
pub trait HlHardforks: EthereumHardforks {}
impl<T: HlHardforks> HlHardforks for Arc<T> {}

View File

@ -1,11 +1,10 @@
pub mod call_forwarder;
pub mod addons;
pub mod chainspec;
pub mod consensus;
mod evm;
mod hardforks;
pub mod hl_node_compliance;
pub mod node;
pub mod pseudo_peer;
pub mod tx_forwarder;
pub mod version;
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};
pub use node::primitives::{HlBlock, HlBlockBody, HlHeader, HlPrimitives};

View File

@ -1,18 +1,25 @@
use std::sync::Arc;
use clap::Parser;
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
use reth::{
builder::{NodeBuilder, NodeHandle, WithLaunchContext},
rpc::{api::EthPubSubApiServer, eth::RpcNodeCore},
};
use reth_db::DatabaseEnv;
use reth_hl::{
addons::{
call_forwarder::{self, CallForwarderApiServer},
chainspec::{parser::HlChainSpecParser, HlChainSpec},
hl_node_compliance::install_hl_node_compliance,
node::{
cli::{Cli, HlNodeArgs},
storage::tables::Tables,
HlNode,
},
subscribe_fixup::SubscribeFixup,
tx_forwarder::{self, EthForwarderApiServer},
},
chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{
HlNode,
cli::{Cli, HlNodeArgs},
rpc::precompile::{HlBlockPrecompileApiServer, HlBlockPrecompileExt},
storage::tables::Tables,
},
};
use tracing::info;
@ -24,20 +31,19 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
fn main() -> eyre::Result<()> {
reth_cli_util::sigsegv_handler::install();
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() {
std::env::set_var("RUST_BACKTRACE", "1");
}
// Initialize custom version metadata before parsing CLI so --version uses reth-hl values
reth_hl::version::init_reth_hl_version();
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(
|builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>,
ext: HlNodeArgs| async move {
let default_upstream_rpc_url = builder.config().chain.official_rpc_url();
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?);
let (node, engine_handle_tx) =
HlNode::new(ext.block_source_args.parse().await?, ext.debug_cutoff_height);
let NodeHandle { node, node_exit_future: exit_future } = builder
.node(node)
.extend_rpc_modules(move |ctx| {
.extend_rpc_modules(move |mut ctx| {
let upstream_rpc_url =
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
@ -57,15 +63,35 @@ fn main() -> eyre::Result<()> {
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
}
// This is a temporary workaround to fix the issue with custom headers
// affects `eth_subscribe[type=newHeads]`
ctx.modules.replace_configured(
SubscribeFixup::new(
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
Box::new(ctx.node().task_executor.clone()),
)
.into_rpc(),
)?;
if ext.hl_node_compliant {
install_hl_node_compliance(ctx)?;
install_hl_node_compliance(&mut ctx)?;
info!("hl-node compliant mode enabled");
}
if !ext.experimental_eth_get_proof {
ctx.modules.remove_method_from_configured("eth_getProof");
info!("eth_getProof is disabled by default");
}
ctx.modules.merge_configured(
HlBlockPrecompileExt::new(ctx.registry.eth_api().clone()).into_rpc(),
)?;
Ok(())
})
.apply(|builder| {
builder.db().create_tables_for::<Tables>().expect("create tables");
.apply(|mut builder| {
builder.db_mut().create_tables_for::<Tables>().expect("create tables");
builder
})
.launch()

View File

@ -1,21 +1,24 @@
use crate::{
chainspec::{parser::HlChainSpecParser, HlChainSpec},
node::{consensus::HlConsensus, evm::config::HlEvmConfig, storage::tables::Tables, HlNode},
chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{
HlNode, consensus::HlConsensus, evm::config::HlEvmConfig, migrate::Migrator,
storage::tables::Tables,
},
pseudo_peer::BlockSourceArgs,
};
use clap::{Args, Parser};
use reth::{
args::LogArgs,
CliRunner,
args::{DatabaseArgs, DatadirArgs, LogArgs},
builder::{NodeBuilder, WithLaunchContext},
cli::Commands,
prometheus_exporter::install_prometheus_recorder,
version::version_metadata,
CliRunner,
};
use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::{common::EnvironmentArgs, launcher::FnLauncher};
use reth_db::{init_db, mdbx::init_db_for, DatabaseEnv};
use reth_db::{DatabaseEnv, init_db, mdbx::init_db_for};
use reth_tracing::FileWorkerGuard;
use std::{
fmt::{self},
@ -35,6 +38,12 @@ pub struct HlNodeArgs {
#[command(flatten)]
pub block_source_args: BlockSourceArgs,
/// Debug cutoff height.
///
/// This option is used to cut off the block import at a specific height.
#[arg(long, env = "DEBUG_CUTOFF_HEIGHT")]
pub debug_cutoff_height: Option<u64>,
/// Upstream RPC URL to forward incoming transactions.
///
/// Default to Hyperliquid's RPC URL when not provided (https://rpc.hyperliquid.xyz/evm).
@ -55,6 +64,24 @@ pub struct HlNodeArgs {
/// This is useful when read precompile is needed for gas estimation.
#[arg(long, env = "FORWARD_CALL")]
pub forward_call: bool,
/// Experimental: enables the eth_getProof RPC method.
///
/// Note: Due to the state root difference, trie updates* may not function correctly in all
/// scenarios. For example, incremental root updates are not possible, which can cause
/// eth_getProof to malfunction in some cases.
///
/// This limitation does not impact normal node functionality, except for state root (which is
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
/// potential issues.
///
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
/// working as intended. Enabling this by default will be tracked in #15.
///
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
pub experimental_eth_get_proof: bool,
}
/// The main reth_hl cli interface.
@ -112,11 +139,14 @@ where
// Install the prometheus recorder to be sure to record all metrics
let _ = install_prometheus_recorder();
let components =
|spec: Arc<C::ChainSpec>| (HlEvmConfig::new(spec.clone()), HlConsensus::new(spec));
let components = |spec: Arc<C::ChainSpec>| {
(HlEvmConfig::new(spec.clone()), Arc::new(HlConsensus::new(spec)))
};
match self.command {
Commands::Node(command) => runner.run_command_until_exit(|ctx| {
Self::migrate_db(&command.chain, &command.datadir, &command.db)
.expect("Failed to migrate database");
command.execute(ctx, FnLauncher::new::<C, Ext>(launcher))
}),
Commands::Init(command) => {
@ -133,9 +163,6 @@ where
runner.run_command_until_exit(|ctx| command.execute::<HlNode, _>(ctx, components))
}
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
Commands::Recover(command) => {
runner.run_command_until_exit(|ctx| command.execute::<HlNode>(ctx))
}
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()),
Commands::Import(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _>(components))
@ -166,4 +193,13 @@ where
init_db_for::<_, Tables>(db_path, env.db.database_args())?;
Ok(())
}
fn migrate_db(
chain: &HlChainSpec,
datadir: &DatadirArgs,
db: &DatabaseArgs,
) -> eyre::Result<()> {
Migrator::<HlNode>::new(chain.clone(), datadir.clone(), *db)?.migrate_db()?;
Ok(())
}
}

View File

@ -1,9 +1,8 @@
use crate::{hardforks::HlHardforks, node::HlNode, HlBlock, HlBlockBody, HlPrimitives};
use alloy_consensus::Header;
use crate::{hardforks::HlHardforks, node::{primitives::HlHeader, HlNode}, HlBlock, HlBlockBody, HlPrimitives};
use reth::{
api::FullNodeTypes,
api::{FullNodeTypes, NodeTypes},
beacon_consensus::EthBeaconConsensus,
builder::{components::ConsensusBuilder, BuilderContext},
builder::{BuilderContext, components::ConsensusBuilder},
consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator},
consensus_common::validation::{
validate_against_parent_4844, validate_against_parent_hash_number,
@ -24,7 +23,7 @@ impl<Node> ConsensusBuilder<Node> for HlConsensusBuilder
where
Node: FullNodeTypes<Types = HlNode>,
{
type Consensus = Arc<dyn FullConsensus<HlPrimitives, Error = ConsensusError>>;
type Consensus = Arc<HlConsensus<<Node::Types as NodeTypes>::ChainSpec>>;
async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> {
Ok(Arc::new(HlConsensus::new(ctx.chain_spec())))
@ -101,14 +100,14 @@ where
impl<ChainSpec> Consensus<HlBlock> for HlConsensus<ChainSpec>
where
ChainSpec: EthChainSpec<Header = Header> + HlHardforks,
ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{
type Error = ConsensusError;
fn validate_body_against_header(
&self,
body: &HlBlockBody,
header: &SealedHeader,
header: &SealedHeader<HlHeader>,
) -> Result<(), ConsensusError> {
Consensus::<HlBlock>::validate_body_against_header(&self.inner, body, header)
}
@ -148,7 +147,7 @@ mod reth_copy;
impl<ChainSpec> FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
where
ChainSpec: EthChainSpec<Header = Header> + HlHardforks,
ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{
fn validate_block_post_execution(
&self,

View File

@ -1,21 +1,21 @@
//! Copy of reth codebase.
use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt};
use crate::HlBlock;
use alloy_consensus::{BlockHeader, TxReceipt, proofs::calculate_receipt_root};
use alloy_eips::eip7685::Requests;
use alloy_primitives::{Bloom, B256};
use alloy_primitives::{B256, Bloom};
use reth::consensus::ConsensusError;
use reth_chainspec::EthereumHardforks;
use reth_primitives::{gas_spent_by_transactions, GotExpected, RecoveredBlock};
use reth_primitives_traits::{Block, Receipt as ReceiptTrait};
use reth_primitives::{GotExpected, RecoveredBlock, gas_spent_by_transactions};
use reth_primitives_traits::Receipt as ReceiptTrait;
pub fn validate_block_post_execution<B, R, ChainSpec>(
block: &RecoveredBlock<B>,
pub fn validate_block_post_execution<R, ChainSpec>(
block: &RecoveredBlock<HlBlock>,
chain_spec: &ChainSpec,
receipts: &[R],
requests: &Requests,
) -> Result<(), ConsensusError>
where
B: Block,
R: ReceiptTrait,
ChainSpec: EthereumHardforks,
{
@ -42,7 +42,7 @@ where
receipts.iter().filter(|&r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
if let Err(error) = verify_receipts(
block.header().receipts_root(),
block.header().logs_bloom(),
block.header().inner.logs_bloom(),
&receipts_for_root,
) {
tracing::debug!(%error, ?receipts, "receipts verification failed");

View File

@ -1,22 +1,9 @@
use std::sync::Arc;
use crate::{
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
HlBlock, HlPrimitives,
};
use crate::{HlBlock, HlPrimitives};
use alloy_eips::eip7685::Requests;
use alloy_primitives::U256;
use reth::{
api::FullNodeTypes,
builder::{components::PayloadServiceBuilder, BuilderContext},
payload::{PayloadBuilderHandle, PayloadServiceCommand},
transaction_pool::TransactionPool,
};
use reth_evm::ConfigureEvm;
use reth_payload_primitives::BuiltPayload;
use reth_primitives::SealedBlock;
use tokio::sync::{broadcast, mpsc};
use tracing::warn;
use std::sync::Arc;
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
/// included into [`HlBlock`].
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
self.requests.clone()
}
}
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct HlPayloadServiceBuilder;
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
where
Node: FullNodeTypes<Types = HlNode>,
Pool: TransactionPool,
Evm: ConfigureEvm,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
_pool: Pool,
_evm_config: Evm,
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
let (tx, mut rx) = mpsc::unbounded_channel();
ctx.task_executor().spawn_critical("payload builder", async move {
let mut subscriptions = Vec::new();
while let Some(message) = rx.recv().await {
match message {
PayloadServiceCommand::Subscribe(tx) => {
let (events_tx, events_rx) = broadcast::channel(100);
// Retain senders to make sure that channels are not getting closed
subscriptions.push(events_tx);
let _ = tx.send(events_rx);
}
message => warn!(?message, "Noop payload service received a message"),
}
}
});
Ok(PayloadBuilderHandle::new(tx))
}
}
// impl From<EthBuiltPayload> for HlBuiltPayload {
// fn from(value: EthBuiltPayload) -> Self {
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
// HlBuiltPayload {
// id,
// block: block.into(),
// fees,
// requests,
// }
// }
// }
// pub struct HlPayloadBuilder<Inner> {
// inner: Inner,
// }
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
// where
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
// {
// type Attributes = Inner::Attributes;
// type BuiltPayload = HlBuiltPayload;
// type Error = Inner::Error;
// fn try_build(
// &self,
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
// let outcome = self.inner.try_build(args)?;
// }
// }

View File

@ -1,8 +1,6 @@
use crate::{
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig},
HlBlock,
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig}, HlBlock, HlHeader
};
use alloy_consensus::Header;
use reth_evm::{
block::BlockExecutionError,
execute::{BlockAssembler, BlockAssemblerInput},
@ -13,7 +11,7 @@ impl BlockAssembler<HlBlockExecutorFactory> for HlEvmConfig {
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, Header>,
input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> {
let HlBlock { header, body } = self.block_assembler.assemble_block(input)?;
Ok(HlBlock { header, body })

View File

@ -1,5 +1,6 @@
use super::{executor::HlBlockExecutor, factory::HlEvmFactory};
use crate::{
HlBlock, HlBlockBody, HlHeader, HlPrimitives,
chainspec::HlChainSpec,
evm::{spec::HlSpecId, transaction::HlTxEnv},
hardforks::HlHardforks,
@ -9,31 +10,30 @@ use crate::{
rpc::engine_api::validator::HlExecutionData,
types::HlExtras,
},
HlBlock, HlBlockBody, HlPrimitives,
};
use alloy_consensus::{BlockHeader, Header, Transaction as _, TxReceipt, EMPTY_OMMER_ROOT_HASH};
use alloy_eips::{merge::BEACON_NONCE, Encodable2718};
use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH, Header, Transaction as _, TxReceipt};
use alloy_eips::{Encodable2718, merge::BEACON_NONCE};
use alloy_primitives::{Log, U256};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{
block::{BlockExecutionError, BlockExecutorFactory, BlockExecutorFor},
eth::{receipt_builder::ReceiptBuilder, EthBlockExecutionCtx},
execute::{BlockAssembler, BlockAssemblerInput},
precompiles::PrecompilesMap,
ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator,
ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, NextBlockEnvAttributes,
block::{BlockExecutionError, BlockExecutorFactory, BlockExecutorFor},
eth::{EthBlockExecutionCtx, receipt_builder::ReceiptBuilder},
execute::{BlockAssembler, BlockAssemblerInput},
precompiles::PrecompilesMap,
};
use reth_evm_ethereum::EthBlockAssembler;
use reth_payload_primitives::NewPayloadError;
use reth_primitives::{logs_bloom, BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader};
use reth_primitives_traits::{proofs, SignerRecoverable, WithEncoded};
use reth_primitives::{BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader, logs_bloom};
use reth_primitives_traits::{SignerRecoverable, WithEncoded, proofs};
use reth_provider::BlockExecutionResult;
use reth_revm::State;
use revm::{
Inspector,
context::{BlockEnv, CfgEnv, TxEnv},
context_interface::block::BlobExcessGasAndPrice,
primitives::hardfork::SpecId,
Inspector,
};
use std::{borrow::Cow, convert::Infallible, sync::Arc};
@ -54,7 +54,7 @@ where
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F>,
input: BlockAssemblerInput<'_, '_, F, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> {
// TODO: Copy of EthBlockAssembler::assemble_block
let inner = &self.inner;
@ -71,10 +71,10 @@ where
let timestamp = evm_env.block_env.timestamp.saturating_to();
// Filter out system tx receipts
let transactions_for_root: Vec<TransactionSigned> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect::<Vec<_>>();
let receipts_for_root: Vec<Receipt> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
let transactions_for_root: Vec<_> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect();
let receipts_for_root: Vec<_> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect();
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
@ -106,7 +106,10 @@ where
} else {
// for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0
Some(alloy_eips::eip7840::BlobParams::cancun().next_block_excess_blob_gas(0, 0))
Some(
alloy_eips::eip7840::BlobParams::cancun()
.next_block_excess_blob_gas_osaka(0, 0, 0),
)
};
}
@ -133,6 +136,9 @@ where
excess_blob_gas,
requests_hash,
};
let system_tx_count =
transactions.iter().filter(|t| is_system_transaction(t)).count() as u64;
let header = HlHeader::from_ethereum_header(header, receipts, system_tx_count);
Ok(Self::Block {
header,
@ -266,6 +272,8 @@ where
}
}
static EMPTY_OMMERS: [Header; 0] = [];
impl ConfigureEvm for HlEvmConfig
where
Self: Send + Sync + Unpin + Clone + 'static,
@ -284,7 +292,7 @@ where
self
}
fn evm_env(&self, header: &Header) -> EvmEnv<HlSpecId> {
fn evm_env(&self, header: &HlHeader) -> Result<EvmEnv<HlSpecId>, Self::Error> {
let blob_params = self.chain_spec().blob_params_at_timestamp(header.timestamp);
let spec = revm_spec_by_timestamp_and_block_number(
self.chain_spec().clone(),
@ -295,7 +303,6 @@ where
// configure evm env based on parent block
let mut cfg_env =
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
if let Some(blob_params) = &blob_params {
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
}
@ -325,12 +332,12 @@ where
blob_excess_gas_and_price,
};
EvmEnv { cfg_env, block_env }
Ok(EvmEnv { cfg_env, block_env })
}
fn next_evm_env(
&self,
parent: &Header,
parent: &HlHeader,
attributes: &Self::NextBlockEnvCtx,
) -> Result<EvmEnv<HlSpecId>, Self::Error> {
// ensure we're not missing any timestamp based hardforks
@ -374,60 +381,57 @@ where
fn context_for_block<'a>(
&self,
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> ExecutionCtxFor<'a, Self> {
) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> {
let block_body = block.body();
let extras = HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
};
HlBlockExecutionCtx {
Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header().parent_hash,
parent_beacon_block_root: block.header().parent_beacon_block_root,
ommers: &block.body().ommers,
ommers: &EMPTY_OMMERS,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
},
extras,
}
extras: HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
},
})
}
fn context_for_next_block(
&self,
parent: &SealedHeader<HeaderTy<Self::Primitives>>,
attributes: Self::NextBlockEnvCtx,
) -> ExecutionCtxFor<'_, Self> {
HlBlockExecutionCtx {
) -> Result<ExecutionCtxFor<'_, Self>, Self::Error> {
Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: parent.hash(),
parent_beacon_block_root: attributes.parent_beacon_block_root,
ommers: &[],
withdrawals: attributes.withdrawals.map(Cow::Owned),
},
// TODO: hacky, double check if this is correct
extras: HlExtras::default(),
}
extras: HlExtras::default(), // TODO: hacky, double check if this is correct
})
}
}
impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
fn evm_env_for_payload(&self, payload: &HlExecutionData) -> EvmEnvFor<Self> {
self.evm_env(&payload.0.header)
self.evm_env(&payload.0.header).unwrap()
}
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
let block = &payload.0;
let extras = HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
};
HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header.parent_hash,
parent_beacon_block_root: block.header.parent_beacon_block_root,
ommers: &block.body.ommers,
ommers: &EMPTY_OMMERS,
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
},
extras,
extras: HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
},
}
}

View File

@ -4,33 +4,30 @@ use crate::{
hardforks::HlHardforks,
node::{
primitives::TransactionSigned,
types::{ReadPrecompileInput, ReadPrecompileResult},
types::{HlExtras, ReadPrecompileInput, ReadPrecompileResult},
},
};
use alloy_consensus::{Transaction, TxReceipt};
use alloy_eips::{eip7685::Requests, Encodable2718};
use alloy_eips::{Encodable2718, eip7685::Requests};
use alloy_evm::{block::ExecutableTx, eth::receipt_builder::ReceiptBuilderCtx};
use alloy_primitives::{address, hex, Address, Bytes, U160, U256};
use alloy_primitives::{Address, Bytes, U160, U256, address, hex};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{
block::{BlockValidationError, CommitChanges},
Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
block::BlockValidationError,
eth::receipt_builder::ReceiptBuilder,
execute::{BlockExecutionError, BlockExecutor},
precompiles::{DynPrecompile, PrecompileInput, PrecompilesMap},
Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
};
use reth_provider::BlockExecutionResult;
use reth_revm::State;
use revm::{
context::{
result::{ExecutionResult, ResultAndState},
TxEnv,
},
DatabaseCommit,
context::{TxEnv, result::ResultAndState},
interpreter::instructions::utility::IntoU256,
precompile::{PrecompileError, PrecompileOutput, PrecompileResult},
primitives::HashMap,
state::Bytecode,
DatabaseCommit,
};
pub fn is_system_transaction(tx: &TransactionSigned) -> bool {
@ -102,7 +99,7 @@ where
{
/// Creates a new HlBlockExecutor.
pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self {
apply_precompiles(&mut evm, &ctx);
apply_precompiles(&mut evm, &ctx.extras);
Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx }
}
@ -110,7 +107,9 @@ where
const COREWRITER_ENABLED_BLOCK_NUMBER: u64 = 7578300;
const COREWRITER_CONTRACT_ADDRESS: Address =
address!("0x3333333333333333333333333333333333333333");
const COREWRITER_CODE: &[u8] = &hex!("608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033");
const COREWRITER_CODE: &[u8] = &hex!(
"608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033"
);
if self.evm.block().number != U256::from(COREWRITER_ENABLED_BLOCK_NUMBER) {
return Ok(());
@ -155,17 +154,16 @@ where
type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
apply_precompiles(&mut self.evm, &self.ctx);
apply_precompiles(&mut self.evm, &self.ctx.extras);
self.deploy_corewriter_contract()?;
Ok(())
}
fn execute_transaction_with_commit_condition(
fn execute_transaction_without_commit(
&mut self,
tx: impl ExecutableTx<Self>,
f: impl FnOnce(&ExecutionResult<<Self::Evm as Evm>::HaltReason>) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError> {
) -> Result<ResultAndState<<Self::Evm as Evm>::HaltReason>, BlockExecutionError> {
// The sum of the transaction's gas limit, Tg, and the gas utilized in this block prior,
// must be no greater than the block's gasLimit.
let block_available_gas = self.evm.block().gas_limit - self.gas_used;
@ -178,16 +176,20 @@ where
.into());
}
// Execute transaction.
let ResultAndState { result, mut state } = self
.evm
.transact(&tx)
.map_err(|err| BlockExecutionError::evm(err, tx.tx().trie_hash()))?;
if !f(&result).should_commit() {
return Ok(None);
// Execute transaction and return the result
self.evm.transact(&tx).map_err(|err| {
let hash = tx.tx().trie_hash();
BlockExecutionError::evm(err, hash)
})
}
fn commit_transaction(
&mut self,
output: ResultAndState<<Self::Evm as Evm>::HaltReason>,
tx: impl ExecutableTx<Self>,
) -> Result<u64, BlockExecutionError> {
let ResultAndState { result, mut state } = output;
let gas_used = result.gas_used();
// append gas used
@ -215,7 +217,7 @@ where
// Commit the state changes.
self.evm.db_mut().commit(state);
Ok(Some(gas_used))
Ok(gas_used)
}
fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<R::Receipt>), BlockExecutionError> {
@ -240,10 +242,9 @@ where
}
}
fn apply_precompiles<'a, DB, EVM>(evm: &mut EVM, ctx: &HlBlockExecutionCtx<'a>)
pub fn apply_precompiles<EVM>(evm: &mut EVM, extras: &HlExtras)
where
EVM: Evm<DB = &'a mut State<DB>, Precompiles = PrecompilesMap>,
DB: Database + 'a,
EVM: Evm<Precompiles = PrecompilesMap>,
{
let block_number = evm.block().number;
let precompiles_mut = evm.precompiles_mut();
@ -255,9 +256,7 @@ where
precompiles_mut.apply_precompile(&address, |_| None);
}
}
for (address, precompile) in
ctx.extras.read_precompile_calls.clone().unwrap_or_default().0.iter()
{
for (address, precompile) in extras.read_precompile_calls.clone().unwrap_or_default().0.iter() {
let precompile = precompile.clone();
precompiles_mut.apply_precompile(address, |_| {
let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> =
@ -271,7 +270,7 @@ where
// NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5
const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684;
if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) {
fill_all_precompiles(ctx, precompiles_mut);
fill_all_precompiles(extras, precompiles_mut);
}
}
@ -279,9 +278,9 @@ fn address_to_u64(address: Address) -> u64 {
address.into_u256().try_into().unwrap()
}
fn fill_all_precompiles<'a>(ctx: &HlBlockExecutionCtx<'a>, precompiles_mut: &mut PrecompilesMap) {
fn fill_all_precompiles(extras: &HlExtras, precompiles_mut: &mut PrecompilesMap) {
let lowest_address = 0x800;
let highest_address = ctx.extras.highest_precompile_address.map_or(0x80D, address_to_u64);
let highest_address = extras.highest_precompile_address.map_or(0x80D, address_to_u64);
for address in lowest_address..=highest_address {
let address = Address::from(U160::from(address));
precompiles_mut.apply_precompile(&address, |f| {

View File

@ -7,16 +7,16 @@ use crate::evm::{
spec::HlSpecId,
transaction::HlTxEnv,
};
use reth_evm::{precompiles::PrecompilesMap, Database, EvmEnv, EvmFactory};
use reth_evm::{Database, EvmEnv, EvmFactory, precompiles::PrecompilesMap};
use reth_revm::Context;
use revm::{
Inspector,
context::{
result::{EVMError, HaltReason},
TxEnv,
result::{EVMError, HaltReason},
},
inspector::NoOpInspector,
precompile::{PrecompileSpecId, Precompiles},
Inspector,
};
/// Factory producing [`HlEvm`].

View File

@ -1,6 +1,6 @@
use crate::{
evm::{
api::{ctx::HlContext, HlEvmInner},
api::{HlEvmInner, ctx::HlContext},
spec::HlSpecId,
transaction::HlTxEnv,
},
@ -10,18 +10,18 @@ use alloy_primitives::{Address, Bytes};
use config::HlEvmConfig;
use reth::{
api::FullNodeTypes,
builder::{components::ExecutorBuilder, BuilderContext},
builder::{BuilderContext, components::ExecutorBuilder},
};
use reth_evm::{Database, Evm, EvmEnv};
use revm::{
context::{
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
BlockEnv, TxEnv,
},
handler::{instructions::EthInstructions, EthPrecompiles, PrecompileProvider},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
state::EvmState,
Context, ExecuteEvm, InspectEvm, Inspector,
context::{
BlockEnv, TxEnv,
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
},
handler::{EthPrecompiles, PrecompileProvider, instructions::EthInstructions},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
};
use std::ops::{Deref, DerefMut};
@ -32,6 +32,8 @@ mod factory;
mod patch;
pub mod receipt_builder;
pub use executor::apply_precompiles;
/// HL EVM implementation.
///
/// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing)
@ -96,11 +98,7 @@ where
&mut self,
tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
if self.inspect {
self.inner.inspect_tx(tx)
} else {
self.inner.transact(tx)
}
if self.inspect { self.inner.inspect_tx(tx) } else { self.inner.transact(tx) }
}
fn transact_system_call(
@ -165,7 +163,6 @@ where
type EVM = HlEvmConfig;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
Ok(evm_config)
Ok(HlEvmConfig::hl(ctx.chain_spec()))
}
}

View File

@ -1,4 +1,4 @@
use alloy_primitives::{address, Address};
use alloy_primitives::{Address, address};
use reth_evm::block::BlockExecutionError;
use revm::{primitives::HashMap, state::Account};

View File

@ -1,5 +1,6 @@
use crate::node::primitives::TransactionSigned;
use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx};
use reth_codecs::alloy::transaction::Envelope;
use reth_evm::Evm;
use reth_primitives::Receipt;

429
src/node/migrate.rs Normal file
View File

@ -0,0 +1,429 @@
use alloy_consensus::Header;
use alloy_primitives::{B256, BlockHash, Bytes, U256, b256, hex::ToHexExt};
use reth::{
api::NodeTypesWithDBAdapter,
args::{DatabaseArgs, DatadirArgs},
dirs::{ChainPath, DataDirPath},
};
use reth_chainspec::EthChainSpec;
use reth_db::{
DatabaseEnv,
mdbx::{RO, tx::Tx},
models::CompactU256,
static_file::iter_static_files,
table::Decompress,
tables,
};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut},
};
use reth_errors::ProviderResult;
use reth_ethereum_primitives::EthereumReceipt;
use reth_provider::{
DatabaseProvider, ProviderFactory, ReceiptProvider, StaticFileProviderFactory,
StaticFileSegment, StaticFileWriter,
providers::{NodeTypesForProvider, StaticFileProvider},
static_file::SegmentRangeInclusive,
};
use std::{fs::File, io::Write, path::PathBuf, sync::Arc};
use tracing::{info, warn};
use crate::{HlHeader, HlPrimitives, chainspec::HlChainSpec};
pub(crate) trait HlNodeType:
NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>
{
}
impl<N: NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>> HlNodeType for N {}
pub(super) struct Migrator<N: HlNodeType> {
data_dir: ChainPath<DataDirPath>,
provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
}
impl<N: HlNodeType> Migrator<N> {
const MIGRATION_PATH_SUFFIX: &'static str = "migration-tmp";
pub fn new(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<Self> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let provider_factory = Self::provider_factory(chain_spec, datadir, database_args)?;
Ok(Self { data_dir, provider_factory })
}
pub fn sf_provider(&self) -> StaticFileProvider<HlPrimitives> {
self.provider_factory.static_file_provider()
}
pub fn migrate_db(&self) -> eyre::Result<()> {
let is_empty = Self::highest_block_number(&self.sf_provider()).is_none();
if is_empty {
return Ok(());
}
self.migrate_db_inner()
}
fn highest_block_number(sf_provider: &StaticFileProvider<HlPrimitives>) -> Option<u64> {
sf_provider.get_highest_static_file_block(StaticFileSegment::Headers)
}
fn migrate_db_inner(&self) -> eyre::Result<()> {
let migrated_mdbx = MigratorMdbx::<N>(self).migrate_mdbx()?;
let migrated_static_files = MigrateStaticFiles::<N>(self).migrate_static_files()?;
if migrated_mdbx || migrated_static_files {
info!("Database migrated successfully");
}
Ok(())
}
fn conversion_tmp_dir(&self) -> PathBuf {
self.data_dir.data_dir().join(Self::MIGRATION_PATH_SUFFIX)
}
fn provider_factory(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let db_env = reth_db::init_db(data_dir.db(), database_args.database_args())?;
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?;
let db = Arc::new(db_env);
Ok(ProviderFactory::new(db, Arc::new(chain_spec), static_file_provider))
}
}
struct MigratorMdbx<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigratorMdbx<'a, N> {
fn migrate_mdbx(&self) -> eyre::Result<bool> {
// if any header is in old format, we need to migrate it, so we pick the first and last one
let db_env = self.0.provider_factory.provider()?;
let mut cursor = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let migration_needed = {
let first_is_old = match cursor.first()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
let last_is_old = match cursor.last()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
first_is_old || last_is_old
};
if !migration_needed {
return Ok(false);
}
check_if_migration_enabled()?;
self.migrate_mdbx_inner()?;
Ok(true)
}
fn migrate_mdbx_inner(&self) -> eyre::Result<()> {
// There shouldn't be many headers in mdbx, but using file for safety
info!("Old database detected, migrating mdbx...");
let conversion_tmp = self.0.conversion_tmp_dir();
let tmp_path = conversion_tmp.join("headers.rmp");
if conversion_tmp.exists() {
std::fs::remove_dir_all(&conversion_tmp)?;
}
std::fs::create_dir_all(&conversion_tmp)?;
let count = self.export_old_headers(&tmp_path)?;
self.import_new_headers(tmp_path, count)?;
Ok(())
}
fn export_old_headers(&self, tmp_path: &PathBuf) -> Result<i32, eyre::Error> {
let db_env = self.0.provider_factory.provider()?;
let mut cursor_read = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let mut tmp_writer = File::create(tmp_path)?;
let mut count = 0;
let old_headers = cursor_read.walk(None)?.filter_map(|row| {
let (block_number, header) = row.ok()?;
if !using_old_header(block_number, &header) {
None
} else {
Some((block_number, Header::decompress(&header).ok()?))
}
});
for (block_number, header) in old_headers {
let receipt =
db_env.receipts_by_block(block_number.into())?.expect("Receipt not found");
let new_header = to_hl_header(receipt, header);
tmp_writer.write_all(&rmp_serde::to_vec(&(block_number, new_header))?)?;
count += 1;
}
Ok(count)
}
fn import_new_headers(&self, tmp_path: PathBuf, count: i32) -> Result<(), eyre::Error> {
let mut tmp_reader = File::open(tmp_path)?;
let db_env = self.0.provider_factory.provider_rw()?;
let mut cursor_write = db_env.tx_ref().cursor_write::<tables::Headers<Bytes>>()?;
for _ in 0..count {
let (number, header) = rmp_serde::from_read::<_, (u64, HlHeader)>(&mut tmp_reader)?;
cursor_write.upsert(number, &rmp_serde::to_vec(&header)?.into())?;
}
db_env.commit()?;
Ok(())
}
}
fn check_if_migration_enabled() -> Result<(), eyre::Error> {
if std::env::var("EXPERIMENTAL_MIGRATE_DB").is_err() {
let err_msg = concat!(
"Detected an old database format but experimental database migration is currently disabled. ",
"To enable migration, set EXPERIMENTAL_MIGRATE_DB=1, or alternatively, resync your node (safest option)."
);
warn!("{}", err_msg);
return Err(eyre::eyre!("{}", err_msg));
}
Ok(())
}
struct MigrateStaticFiles<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
fn iterate_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
dir: &PathBuf,
) -> eyre::Result<Vec<(PathBuf, String)>> {
let prefix = StaticFileSegment::Headers.filename(&block_range);
let entries = std::fs::read_dir(dir)?
.map(|res| res.map(|e| e.path()))
.collect::<Result<Vec<_>, _>>()?;
Ok(entries
.into_iter()
.filter_map(|path| {
let file_name = path.file_name().and_then(|f| f.to_str())?;
if file_name.starts_with(&prefix) {
Some((path.clone(), file_name.to_string()))
} else {
None
}
})
.collect())
}
fn create_placeholder(&self, block_range: SegmentRangeInclusive) -> eyre::Result<()> {
// The direction is opposite here
let src = self.0.data_dir.static_files();
let dst = self.0.conversion_tmp_dir();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
if dst_path.exists() {
std::fs::remove_file(&dst_path)?;
}
std::os::unix::fs::symlink(src_path, dst_path)?;
}
Ok(())
}
fn move_static_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
) -> eyre::Result<()> {
let src = self.0.conversion_tmp_dir();
let dst = self.0.data_dir.static_files();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
std::fs::remove_file(&dst_path)?;
std::fs::rename(&src_path, &dst_path)?;
}
// Still StaticFileProvider needs the file to exist, so we create a symlink
self.create_placeholder(block_range)
}
fn migrate_static_files(&self) -> eyre::Result<bool> {
let conversion_tmp = self.0.conversion_tmp_dir();
let old_path = self.0.data_dir.static_files();
if conversion_tmp.exists() {
std::fs::remove_dir_all(&conversion_tmp)?;
}
std::fs::create_dir_all(&conversion_tmp)?;
let mut all_static_files = iter_static_files(&old_path)?;
let all_static_files =
all_static_files.remove(&StaticFileSegment::Headers).unwrap_or_default();
let mut first = true;
for (block_range, _tx_ranges) in all_static_files {
let migration_needed = self.using_old_header(block_range.start())?
|| self.using_old_header(block_range.end())?;
if !migration_needed {
// Create a placeholder symlink
self.create_placeholder(block_range)?;
continue;
}
if first {
check_if_migration_enabled()?;
info!("Old database detected, migrating static files...");
first = false;
}
let sf_provider = self.0.sf_provider();
let sf_tmp_provider = StaticFileProvider::<HlPrimitives>::read_write(&conversion_tmp)?;
let provider = self.0.provider_factory.provider()?;
let block_range_for_filename = sf_provider.find_fixed_range(block_range.start());
migrate_single_static_file(&sf_tmp_provider, &sf_provider, &provider, block_range)?;
self.move_static_files_for_segment(block_range_for_filename)?;
}
Ok(!first)
}
fn using_old_header(&self, number: u64) -> eyre::Result<bool> {
let sf_provider = self.0.sf_provider();
let content = old_headers_range(&sf_provider, number..=number)?;
let &[row] = &content.as_slice() else {
warn!("No header found for block {}", number);
return Ok(false);
};
Ok(using_old_header(number, &row[0]))
}
}
// Problem is that decompress just panics when the header is not valid
// So we need heuristics...
fn is_old_header(header: &[u8]) -> bool {
const SHA3_UNCLE_OFFSET: usize = 0x24;
const SHA3_UNCLE_HASH: B256 =
b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347");
const GENESIS_PREFIX: [u8; 4] = [0x01, 0x20, 0x00, 0xf8];
let Some(sha3_uncle_hash) = header.get(SHA3_UNCLE_OFFSET..SHA3_UNCLE_OFFSET + 32) else {
return false;
};
if sha3_uncle_hash == SHA3_UNCLE_HASH {
return true;
}
// genesis block might be different
if header.starts_with(&GENESIS_PREFIX) {
return true;
}
false
}
fn is_new_header(header: &[u8]) -> bool {
rmp_serde::from_slice::<HlHeader>(header).is_ok()
}
fn migrate_single_static_file<N: HlNodeType>(
sf_out: &StaticFileProvider<HlPrimitives>,
sf_in: &StaticFileProvider<HlPrimitives>,
provider: &DatabaseProvider<Tx<RO>, NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
block_range: SegmentRangeInclusive,
) -> Result<(), eyre::Error> {
info!("Migrating block range {}...", block_range);
// block_ranges into chunks of 50000 blocks
const CHUNK_SIZE: u64 = 50000;
for chunk in (block_range.start()..=block_range.end()).step_by(CHUNK_SIZE as usize) {
let end = std::cmp::min(chunk + CHUNK_SIZE - 1, block_range.end());
let block_range = chunk..=end;
let headers = old_headers_range(sf_in, block_range.clone())?;
let receipts = provider.receipts_by_block_range(block_range.clone())?;
assert_eq!(headers.len(), receipts.len());
let mut writer = sf_out.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
let new_headers = std::iter::zip(headers, receipts)
.map(|(header, receipts)| {
let eth_header = Header::decompress(&header[0]).unwrap();
let hl_header = to_hl_header(receipts, eth_header);
let difficulty: U256 = CompactU256::decompress(&header[1]).unwrap().into();
let hash = BlockHash::decompress(&header[2]).unwrap();
(hl_header, difficulty, hash)
})
.collect::<Vec<_>>();
for header in new_headers {
writer.append_header(&header.0, header.1, &header.2)?;
}
writer.commit().unwrap();
info!("Migrated block range {:?}...", block_range);
}
Ok(())
}
fn to_hl_header(receipts: Vec<EthereumReceipt>, eth_header: Header) -> HlHeader {
let system_tx_count = receipts.iter().filter(|r| r.cumulative_gas_used == 0).count();
HlHeader::from_ethereum_header(eth_header, &receipts, system_tx_count as u64)
}
fn old_headers_range(
provider: &StaticFileProvider<HlPrimitives>,
block_range: impl std::ops::RangeBounds<u64>,
) -> ProviderResult<Vec<Vec<Vec<u8>>>> {
Ok(provider
.fetch_range_with_predicate(
StaticFileSegment::Headers,
to_range(block_range),
|cursor, number| {
cursor.get(number.into(), 0b111).map(|rows| {
rows.map(|columns| columns.into_iter().map(|column| column.to_vec()).collect())
})
},
|_| true,
)?
.into_iter()
.collect())
}
// Copied from reth
fn to_range<R: std::ops::RangeBounds<u64>>(bounds: R) -> std::ops::Range<u64> {
let start = match bounds.start_bound() {
std::ops::Bound::Included(&v) => v,
std::ops::Bound::Excluded(&v) => v + 1,
std::ops::Bound::Unbounded => 0,
};
let end = match bounds.end_bound() {
std::ops::Bound::Included(&v) => v + 1,
std::ops::Bound::Excluded(&v) => v,
std::ops::Bound::Unbounded => u64::MAX,
};
start..end
}
fn using_old_header(number: u64, header: &[u8]) -> bool {
let deserialized_old = is_old_header(header);
let deserialized_new = is_new_header(header);
assert!(
deserialized_old ^ deserialized_new,
"Header is not valid: {} {}\ndeserialized_old: {}\ndeserialized_new: {}",
number,
header.encode_hex(),
deserialized_old,
deserialized_new
);
deserialized_old && !deserialized_new
}

View File

@ -4,32 +4,36 @@ use crate::{
pool::HlPoolBuilder,
primitives::{HlBlock, HlPrimitives},
rpc::{
HlEthApiBuilder,
engine_api::{
builder::HlEngineApiBuilder, payload::HlPayloadTypes,
validator::HlPayloadValidatorBuilder,
},
HlEthApiBuilder,
},
storage::HlStorage,
},
pseudo_peer::BlockSourceConfig,
};
use consensus::HlConsensusBuilder;
use engine::HlPayloadServiceBuilder;
use evm::HlExecutorBuilder;
use network::HlNetworkBuilder;
use reth::{
api::{FullNodeTypes, NodeTypes},
builder::{components::ComponentsBuilder, rpc::RpcAddOns, Node, NodeAdapter},
builder::{
Node, NodeAdapter,
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
rpc::RpcAddOns,
},
};
use reth_engine_primitives::ConsensusEngineHandle;
use std::{marker::PhantomData, sync::Arc};
use tokio::sync::{oneshot, Mutex};
use tokio::sync::{Mutex, oneshot};
pub mod cli;
pub mod consensus;
pub mod engine;
pub mod evm;
pub mod migrate;
pub mod network;
pub mod primitives;
pub mod rpc;
@ -46,14 +50,23 @@ pub type HlNodeAddOns<N> =
pub struct HlNode {
engine_handle_rx: Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
block_source_config: BlockSourceConfig,
debug_cutoff_height: Option<u64>,
}
impl HlNode {
pub fn new(
block_source_config: BlockSourceConfig,
debug_cutoff_height: Option<u64>,
) -> (Self, oneshot::Sender<ConsensusEngineHandle<HlPayloadTypes>>) {
let (tx, rx) = oneshot::channel();
(Self { engine_handle_rx: Arc::new(Mutex::new(Some(rx))), block_source_config }, tx)
(
Self {
engine_handle_rx: Arc::new(Mutex::new(Some(rx))),
block_source_config,
debug_cutoff_height,
},
tx,
)
}
}
@ -65,7 +78,7 @@ impl HlNode {
) -> ComponentsBuilder<
Node,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,
@ -77,10 +90,11 @@ impl HlNode {
.node_types::<Node>()
.pool(HlPoolBuilder)
.executor(HlExecutorBuilder::default())
.payload(HlPayloadServiceBuilder::default())
.payload(NoopPayloadServiceBuilder::default())
.network(HlNetworkBuilder {
engine_handle_rx: self.engine_handle_rx.clone(),
block_source_config: self.block_source_config.clone(),
debug_cutoff_height: self.debug_cutoff_height,
})
.consensus(HlConsensusBuilder::default())
}
@ -100,7 +114,7 @@ where
type ComponentsBuilder = ComponentsBuilder<
N,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,

View File

@ -8,7 +8,7 @@ use reth_primitives::NodePrimitives;
use service::{BlockMsg, ImportEvent, Outcome};
use std::{
fmt,
task::{ready, Context, Poll},
task::{Context, Poll, ready},
};
use crate::node::network::HlNewBlock;

View File

@ -1,17 +1,17 @@
use super::handle::ImportHandle;
use crate::{
HlBlock, HlBlockBody,
consensus::HlConsensus,
node::{
network::HlNewBlock,
rpc::engine_api::payload::HlPayloadTypes,
types::{BlockAndReceipts, EvmBlock},
},
HlBlock, HlBlockBody,
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::U128;
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
use futures::{future::Either, stream::FuturesUnordered, StreamExt};
use futures::{StreamExt, future::Either, stream::FuturesUnordered};
use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes};
use reth_eth_wire::NewBlock;
use reth_network::{
@ -89,7 +89,6 @@ where
/// Process a new payload and return the outcome
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
let engine = self.engine.clone();
Box::pin(async move {
let sealed_block = block.block.0.block.clone().seal();
let payload = HlPayloadTypes::block_to_payload(sealed_block);
@ -107,7 +106,7 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
@ -117,15 +116,10 @@ where
let engine = self.engine.clone();
let consensus = self.consensus.clone();
let sealed_block = block.block.0.block.clone().seal();
let hash = sealed_block.hash();
let number = sealed_block.number();
let (hash, number) = (sealed_block.hash(), sealed_block.number());
Box::pin(async move {
let (head_block_hash, current_hash) = match consensus.canonical_head(hash, number) {
Ok(hash) => hash,
Err(_) => return None,
};
let (head_block_hash, _) = consensus.canonical_head(hash, number).ok()?;
let state = ForkchoiceState {
head_block_hash,
safe_block_hash: head_block_hash,
@ -146,18 +140,15 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
/// Add a new block import task to the pending imports
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
let payload_fut = self.new_payload(block.clone(), peer_id);
self.pending_imports.push(payload_fut);
let fcu_fut = self.update_fork_choice(block, peer_id);
self.pending_imports.push(fcu_fut);
self.pending_imports.push(self.new_payload(block.clone(), peer_id));
self.pending_imports.push(self.update_fork_choice(block, peer_id));
}
}
@ -176,37 +167,19 @@ where
}
// Process completed imports and send events to network
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
if let Some(outcome) = outcome {
while let Poll::Ready(Some(Some(outcome))) = this.pending_imports.poll_next_unpin(cx) {
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
return Poll::Ready(Err(Box::new(e)));
}
}
}
Poll::Pending
}
}
pub(crate) fn collect_block(height: u64) -> Option<BlockAndReceipts> {
let ingest_dir = "/home/user/personal/evm-blocks";
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{ingest_dir}/{f}/{s}/{height}.rmp.lz4");
if std::path::Path::new(&path).exists() {
let file = std::fs::File::open(path).unwrap();
let file = std::io::BufReader::new(file);
let mut decoder = lz4_flex::frame::FrameDecoder::new(file);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder).unwrap();
Some(blocks[0].clone())
} else {
None
}
}
#[cfg(test)]
mod tests {
use crate::chainspec::hl::hl_mainnet;
use crate::{chainspec::hl::hl_mainnet, HlHeader};
use super::*;
use alloy_primitives::{B256, U128};
@ -277,15 +250,12 @@ mod tests {
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
unimplemented!()
}
fn best_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn last_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
Ok(None)
}
@ -295,7 +265,6 @@ mod tests {
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
Ok(Some(B256::ZERO))
}
fn canonical_hashes_range(
&self,
_start: u64,
@ -315,14 +284,12 @@ mod tests {
fn both_valid() -> Self {
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
}
fn invalid_new_payload() -> Self {
Self {
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
fcu: PayloadStatusEnum::Valid,
}
}
fn invalid_fcu() -> Self {
Self {
new_payload: PayloadStatusEnum::Valid,
@ -342,19 +309,15 @@ mod tests {
let consensus = Arc::new(HlConsensus { provider: MockProvider });
let (to_engine, from_engine) = mpsc::unbounded_channel();
let engine_handle = ConsensusEngineHandle::new(to_engine);
handle_engine_msg(from_engine, responses).await;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
tokio::spawn(Box::pin(async move {
service.await.unwrap();
}));
Self { handle }
}
@ -392,7 +355,7 @@ mod tests {
/// Creates a test block message
fn create_test_block() -> NewBlockMessage<HlNewBlock> {
let block = HlBlock {
header: Header::default(),
header: HlHeader::default(),
body: HlBlockBody {
inner: BlockBody {
transactions: Vec::new(),

View File

@ -1,21 +1,20 @@
#![allow(clippy::owned_cow)]
use crate::{
HlBlock,
consensus::HlConsensus,
node::{
network::block_import::{handle::ImportHandle, service::ImportService, HlBlockImport},
HlNode,
network::block_import::{HlBlockImport, handle::ImportHandle, service::ImportService},
primitives::HlPrimitives,
rpc::engine_api::payload::HlPayloadTypes,
types::ReadPrecompileCalls,
HlNode,
},
pseudo_peer::{start_pseudo_peer, BlockSourceConfig},
HlBlock,
pseudo_peer::{BlockSourceConfig, start_pseudo_peer},
};
use alloy_rlp::{Decodable, Encodable};
// use handshake::HlHandshake;
use reth::{
api::{FullNodeTypes, TxTy},
builder::{components::NetworkBuilder, BuilderContext},
builder::{BuilderContext, components::NetworkBuilder},
transaction_pool::{PoolTransaction, TransactionPool},
};
use reth_discv4::NodeRecord;
@ -27,7 +26,7 @@ use reth_network_api::PeersInfo;
use reth_provider::StageCheckpointReader;
use reth_stages_types::StageId;
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::sync::{Mutex, mpsc, oneshot};
use tracing::info;
pub mod block_import;
@ -39,10 +38,10 @@ pub struct HlNewBlock(pub NewBlock<HlBlock>);
mod rlp {
use super::*;
use crate::{
HlBlockBody, HlHeader,
node::primitives::{BlockBody, TransactionSigned},
HlBlockBody,
};
use alloy_consensus::{BlobTransactionSidecar, Header};
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::{Address, U128};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use alloy_rpc_types::Withdrawals;
@ -51,9 +50,9 @@ mod rlp {
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockHelper<'a> {
header: Cow<'a, Header>,
header: Cow<'a, HlHeader>,
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>,
ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
}
@ -69,32 +68,22 @@ mod rlp {
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
fn from(value: &'a HlNewBlock) -> Self {
let HlNewBlock(NewBlock {
block:
HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
},
td,
}) = value;
let b = &value.0.block;
Self {
block: BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
header: Cow::Borrowed(&b.header),
transactions: Cow::Borrowed(&b.body.inner.transactions),
ommers: Cow::Borrowed(&b.body.inner.ommers),
withdrawals: b.body.inner.withdrawals.as_ref().map(Cow::Borrowed),
},
td: *td,
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
td: value.0.td,
sidecars: b.body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: b.body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: b
.body
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
}
@ -111,30 +100,24 @@ mod rlp {
impl Decodable for HlNewBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let HlNewBlockHelper {
block: BlockHelper { header, transactions, ommers, withdrawals },
td,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = HlNewBlockHelper::decode(buf)?;
let h = HlNewBlockHelper::decode(buf)?;
Ok(HlNewBlock(NewBlock {
block: HlBlock {
header: header.into_owned(),
header: h.block.header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
transactions: h.block.transactions.into_owned(),
ommers: h.block.ommers.into_owned(),
withdrawals: h.block.withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address
sidecars: h.sidecars.map(|s| s.into_owned()),
read_precompile_calls: h.read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: h
.highest_precompile_address
.map(|s| s.into_owned()),
},
},
td,
td: h.td,
}))
}
}
@ -159,6 +142,8 @@ pub struct HlNetworkBuilder {
Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
pub(crate) block_source_config: BlockSourceConfig,
pub(crate) debug_cutoff_height: Option<u64>,
}
impl HlNetworkBuilder {
@ -172,41 +157,32 @@ impl HlNetworkBuilder {
where
Node: FullNodeTypes<Types = HlNode>,
{
let Self { engine_handle_rx, .. } = self;
let network_builder = ctx.network_config_builder()?;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
ctx.task_executor().spawn_critical("block import", async move {
let handle = engine_handle_rx
let handle = self
.engine_handle_rx
.lock()
.await
.take()
.expect("node should only be launched once")
.await
.unwrap();
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
});
let network_builder = network_builder
Ok(ctx.build_network_config(
ctx.network_config_builder()?
.disable_dns_discovery()
.disable_nat()
.boot_nodes(boot_nodes())
.set_head(ctx.head())
.with_pow()
.block_import(Box::new(HlBlockImport::new(handle)));
// .discovery(discv4)
// .eth_rlpx_handshake(Arc::new(HlHandshake::default()));
let network_config = ctx.build_network_config(network_builder);
Ok(network_config)
.block_import(Box::new(HlBlockImport::new(handle))),
))
}
}
@ -229,11 +205,10 @@ where
pool: Pool,
) -> eyre::Result<Self::Network> {
let block_source_config = self.block_source_config.clone();
let network_config = self.network_config(ctx)?;
let network = NetworkManager::builder(network_config).await?;
let handle = ctx.start_network(network, pool);
let debug_cutoff_height = self.debug_cutoff_height;
let handle =
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
let local_node_record = handle.local_node_record();
let chain_spec = ctx.chain_spec();
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
let next_block_number = ctx
@ -243,10 +218,16 @@ where
.block_number +
1;
let chain_spec = ctx.chain_spec();
ctx.task_executor().spawn_critical("pseudo peer", async move {
let block_source =
block_source_config.create_cached_block_source((&*chain_spec).clone(), next_block_number).await;
start_pseudo_peer(chain_spec, local_node_record.to_string(), block_source)
start_pseudo_peer(
chain_spec.clone(),
local_node_record.to_string(),
block_source_config
.create_cached_block_source((*chain_spec).clone(), next_block_number)
.await,
debug_cutoff_height,
)
.await
.unwrap();
});

View File

@ -6,12 +6,12 @@
//! Ethereum transaction pool only supports TransactionSigned (EthereumTxEnvelope<TxEip4844>),
//! hence this placeholder for the transaction pool.
use crate::node::{primitives::TransactionSigned, HlNode};
use crate::node::{HlNode, primitives::TransactionSigned};
use alloy_consensus::{
error::ValueError, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844,
EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844, error::ValueError,
};
use alloy_eips::{eip7702::SignedAuthorization, Typed2718};
use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256};
use alloy_eips::{Typed2718, eip7702::SignedAuthorization};
use alloy_primitives::{Address, B256, Bytes, ChainId, TxHash, TxKind, U256};
use alloy_rpc_types::AccessList;
use reth::{
api::FullNodeTypes, builder::components::PoolBuilder, transaction_pool::PoolTransaction,
@ -19,7 +19,7 @@ use reth::{
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_primitives::Recovered;
use reth_primitives_traits::InMemorySize;
use reth_transaction_pool::{noop::NoopTransactionPool, EthPoolTransaction};
use reth_transaction_pool::{EthPoolTransaction, noop::NoopTransactionPool};
use std::sync::Arc;
pub struct HlPoolBuilder;

View File

@ -0,0 +1,49 @@
use super::{HlBlockBody, HlHeader, rlp};
use alloy_rlp::Encodable;
use reth_primitives_traits::{Block, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: HlHeader,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = HlHeader;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}

View File

@ -0,0 +1,77 @@
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::{BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
use crate::{HlHeader, node::primitives::TransactionSigned};
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned, HlHeader>;
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size()
+ self
.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>())
+ self
.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = super::HlHeader;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}

View File

@ -0,0 +1,241 @@
use alloy_consensus::Header;
use alloy_primitives::{Address, B64, B256, BlockNumber, Bloom, Bytes, Sealable, U256};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use reth_cli_commands::common::CliHeader;
use reth_codecs::Compact;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives::{SealedHeader, logs_bloom};
use reth_primitives_traits::{BlockHeader, InMemorySize, serde_bincode_compat::RlpBincode};
use reth_rpc_convert::transaction::FromConsensusHeader;
use serde::{Deserialize, Serialize};
/// The header type of this node
///
/// This type extends the regular ethereum header with an extension.
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
derive_more::AsRef,
derive_more::Deref,
Default,
RlpEncodable,
RlpDecodable,
Serialize,
Deserialize,
)]
#[serde(rename_all = "camelCase")]
pub struct HlHeader {
/// The regular eth header
#[as_ref]
#[deref]
pub inner: Header,
/// The extended header fields that is not part of the block hash
pub extras: HlHeaderExtras,
}
#[derive(
Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Hash,
)]
pub struct HlHeaderExtras {
pub logs_bloom_with_system_txs: Bloom,
pub system_tx_count: u64,
}
impl HlHeader {
pub(crate) fn from_ethereum_header(header: Header, receipts: &[EthereumReceipt], system_tx_count: u64) -> HlHeader {
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs));
HlHeader {
inner: header,
extras: HlHeaderExtras { logs_bloom_with_system_txs: logs_bloom, system_tx_count },
}
}
}
impl From<Header> for HlHeader {
fn from(_value: Header) -> Self {
unreachable!()
}
}
impl AsRef<Self> for HlHeader {
fn as_ref(&self) -> &Self {
self
}
}
impl Sealable for HlHeader {
fn hash_slow(&self) -> B256 {
self.inner.hash_slow()
}
}
impl alloy_consensus::BlockHeader for HlHeader {
fn parent_hash(&self) -> B256 {
self.inner.parent_hash()
}
fn ommers_hash(&self) -> B256 {
self.inner.ommers_hash()
}
fn beneficiary(&self) -> Address {
self.inner.beneficiary()
}
fn state_root(&self) -> B256 {
self.inner.state_root()
}
fn transactions_root(&self) -> B256 {
self.inner.transactions_root()
}
fn receipts_root(&self) -> B256 {
self.inner.receipts_root()
}
fn withdrawals_root(&self) -> Option<B256> {
self.inner.withdrawals_root()
}
fn logs_bloom(&self) -> Bloom {
self.extras.logs_bloom_with_system_txs
}
fn difficulty(&self) -> U256 {
self.inner.difficulty()
}
fn number(&self) -> BlockNumber {
self.inner.number()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_used(&self) -> u64 {
self.inner.gas_used()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn mix_hash(&self) -> Option<B256> {
self.inner.mix_hash()
}
fn nonce(&self) -> Option<B64> {
self.inner.nonce()
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.inner.base_fee_per_gas()
}
fn blob_gas_used(&self) -> Option<u64> {
self.inner.blob_gas_used()
}
fn excess_blob_gas(&self) -> Option<u64> {
self.inner.excess_blob_gas()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.inner.parent_beacon_block_root()
}
fn requests_hash(&self) -> Option<B256> {
self.inner.requests_hash()
}
fn extra_data(&self) -> &Bytes {
self.inner.extra_data()
}
fn is_empty(&self) -> bool {
self.extras.system_tx_count == 0 && self.inner.is_empty()
}
}
impl InMemorySize for HlHeader {
fn size(&self) -> usize {
self.inner.size() + self.extras.size()
}
}
impl InMemorySize for HlHeaderExtras {
fn size(&self) -> usize {
self.logs_bloom_with_system_txs.data().len() + self.system_tx_count.size()
}
}
impl reth_codecs::Compact for HlHeader {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: alloy_rlp::bytes::BufMut + AsMut<[u8]>,
{
// Because Header ends with extra_data which is `Bytes`, we can't use to_compact for extras,
// because Compact trait requires the Bytes field to be placed at the end of the struct.
// Bytes::from_compact just reads all trailing data as the Bytes field.
//
// Hence we need to use other form of serialization, since extra headers are not Compact-compatible.
// We just treat all header fields as rmp-serialized one `Bytes` field.
let result: Bytes = rmp_serde::to_vec(&self).unwrap().into();
result.to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (bytes, remaining) = Bytes::from_compact(buf, len);
let header: HlHeader = rmp_serde::from_slice(&bytes).unwrap();
(header, remaining)
}
}
impl reth_db_api::table::Compress for HlHeader {
type Compressed = Vec<u8>;
fn compress_to_buf<B: alloy_primitives::bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl reth_db_api::table::Decompress for HlHeader {
fn decompress(value: &[u8]) -> Result<Self, reth_db_api::DatabaseError> {
let (obj, _) = Compact::from_compact(value, value.len());
Ok(obj)
}
}
impl BlockHeader for HlHeader {}
impl RlpBincode for HlHeader {}
impl CliHeader for HlHeader {
fn set_number(&mut self, number: u64) {
self.inner.set_number(number);
}
}
impl From<HlHeader> for Header {
fn from(value: HlHeader) -> Self {
value.inner
}
}
pub fn to_ethereum_ommers(ommers: &[HlHeader]) -> Vec<Header> {
ommers.iter().map(|ommer| ommer.clone().into()).collect()
}
impl FromConsensusHeader<HlHeader> for alloy_rpc_types::Header {
fn from_consensus_header(header: SealedHeader<HlHeader>, block_size: usize) -> Self {
FromConsensusHeader::<Header>::from_consensus_header(
SealedHeader::<Header>::new(header.inner.clone(), header.hash()),
block_size,
)
}
}

View File

@ -1,17 +1,18 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::{BlobTransactionSidecar, Header};
use alloy_primitives::Address;
use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable};
use reth_ethereum_primitives::Receipt;
use reth_primitives::NodePrimitives;
use reth_primitives_traits::{Block, BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
pub mod transaction;
pub use transaction::TransactionSigned;
pub mod tx_wrapper;
pub use tx_wrapper::{BlockBody, TransactionSigned};
pub mod block;
pub use block::HlBlock;
pub mod body;
pub use body::{BlockBody, HlBlockBody};
pub mod header;
pub use header::HlHeader;
pub mod rlp;
pub mod serde_bincode_compat;
/// Primitive types for HyperEVM.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
@ -20,332 +21,8 @@ pub struct HlPrimitives;
impl NodePrimitives for HlPrimitives {
type Block = HlBlock;
type BlockHeader = Header;
type BlockHeader = HlHeader;
type BlockBody = HlBlockBody;
type SignedTx = TransactionSigned;
type Receipt = Receipt;
}
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size() +
self.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>()) +
self.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = Header;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: Header,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = Header;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}
mod rlp {
use super::*;
use alloy_eips::eip4895::Withdrawals;
use alloy_rlp::Decodable;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, Header>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<Header>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}
}
pub mod serde_bincode_compat {
use super::*;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, Header>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: Header::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}
}

142
src/node/primitives/rlp.rs Normal file
View File

@ -0,0 +1,142 @@
#![allow(clippy::owned_cow)]
use super::{HlBlock, HlBlockBody, TransactionSigned};
use crate::{node::types::ReadPrecompileCalls, HlHeader};
use alloy_consensus::{BlobTransactionSidecar, BlockBody};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::Address;
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use std::borrow::Cow;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, HlHeader>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<HlHeader>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}

View File

@ -0,0 +1,64 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use super::{HlBlock, HlBlockBody};
use crate::{node::{primitives::BlockBody, types::ReadPrecompileCalls}, HlHeader};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, HlHeader>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: HlHeader::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}

View File

@ -1,33 +1,35 @@
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
//! except that it supports pseudo signer for system transactions.
use std::convert::Infallible;
use crate::evm::transaction::HlTxEnv;
use alloy_consensus::{
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, EthereumTypedTransaction,
SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559,
TxEip2930, TxEip4844, TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction,
TxEip2930, TxEip4844, TxEip7702, TxLegacy, TxType, TypedTransaction, crypto::RecoveryError,
error::ValueError, transaction::TxHashRef,
};
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718};
use alloy_eips::Encodable2718;
use alloy_network::TxSigner;
use alloy_primitives::{address, Address, TxHash, U256};
use alloy_primitives::{Address, TxHash, U256, address};
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
use alloy_signer::Signature;
use reth_codecs::alloy::transaction::FromTxCompact;
use reth_codecs::alloy::transaction::{Envelope, FromTxCompact};
use reth_db::{
table::{Compress, Decompress},
DatabaseError,
table::{Compress, Decompress},
};
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_evm::FromRecoveredTx;
use reth_primitives::Recovered;
use reth_primitives_traits::{
serde_bincode_compat::SerdeBincodeCompat, InMemorySize, SignedTransaction, SignerRecoverable,
InMemorySize, SignedTransaction, SignerRecoverable, serde_bincode_compat::SerdeBincodeCompat,
};
use reth_rpc_eth_api::{
transaction::{FromConsensusTx, TryIntoTxEnv},
EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx,
transaction::{FromConsensusTx, TryIntoTxEnv},
};
use revm::context::{BlockEnv, CfgEnv, TxEnv};
use crate::evm::transaction::HlTxEnv;
type InnerType = alloy_consensus::EthereumTxEnvelope<TxEip4844>;
#[derive(Debug, Clone, TransactionEnvelope)]
@ -46,6 +48,12 @@ fn s_to_address(s: U256) -> Address {
Address::from_slice(&buf)
}
impl TxHashRef for TransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
}
impl SignerRecoverable for TransactionSigned {
fn recover_signer(&self) -> Result<Address, RecoveryError> {
if self.is_system_transaction() {
@ -69,11 +77,7 @@ impl SignerRecoverable for TransactionSigned {
}
}
impl SignedTransaction for TransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
}
impl SignedTransaction for TransactionSigned {}
// ------------------------------------------------------------
// NOTE: All lines below are just wrappers for the inner type.
@ -114,11 +118,6 @@ impl reth_codecs::Compact for TransactionSigned {
}
}
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
let (tx, signer) = value.into_parts();
Recovered::new_unchecked(tx.into_inner(), signer)
}
impl FromRecoveredTx<TransactionSigned> for TxEnv {
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
TxEnv::from_recovered_tx(&tx.inner(), sender)
@ -162,16 +161,8 @@ impl TransactionSigned {
}
}
pub fn signature(&self) -> &Signature {
self.inner().signature()
}
pub const fn tx_type(&self) -> TxType {
self.inner().tx_type()
}
pub fn is_system_transaction(&self) -> bool {
self.gas_price().is_some() && self.gas_price().unwrap() == 0
matches!(self.gas_price(), Some(0))
}
}
@ -190,40 +181,16 @@ impl SerdeBincodeCompat for TransactionSigned {
}
}
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
fn from(value: TransactionSigned) -> Self {
value.into_inner()
}
}
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
impl TryFrom<TransactionSigned> for PooledTransactionVariant {
type Error = <InnerType as TryInto<PooledTransactionVariant>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl TryFrom<TransactionSigned>
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
{
type Error = <InnerType as TryInto<
EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>>
for TransactionSigned
{
fn from(
value: EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
) -> Self {
impl From<PooledTransactionVariant> for TransactionSigned {
fn from(value: PooledTransactionVariant) -> Self {
Self::Default(value.into())
}
}
@ -231,10 +198,6 @@ impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>
impl Compress for TransactionSigned {
type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
self.into_inner().compress()
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
self.inner().compress_to_buf(buf);
}
@ -246,22 +209,6 @@ impl Decompress for TransactionSigned {
}
}
pub fn convert_to_eth_block_body(value: BlockBody) -> alloy_consensus::BlockBody<InnerType> {
alloy_consensus::BlockBody {
transactions: value.transactions.into_iter().map(|tx| tx.into_inner()).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
pub fn convert_to_hl_block_body(value: alloy_consensus::BlockBody<InnerType>) -> BlockBody {
BlockBody {
transactions: value.transactions.into_iter().map(TransactionSigned::Default).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
impl TryIntoSimTx<TransactionSigned> for TransactionRequest {
fn try_into_sim_tx(self) -> Result<TransactionSigned, ValueError<Self>> {
let tx = self
@ -289,9 +236,17 @@ impl TryIntoTxEnv<HlTxEnv<TxEnv>> for TransactionRequest {
impl FromConsensusTx<TransactionSigned> for Transaction {
type TxInfo = TransactionInfo;
type Err = Infallible;
fn from_consensus_tx(tx: TransactionSigned, signer: Address, tx_info: Self::TxInfo) -> Self {
Self::from_transaction(Recovered::new_unchecked(tx.into_inner().into(), signer), tx_info)
fn from_consensus_tx(
tx: TransactionSigned,
signer: Address,
tx_info: Self::TxInfo,
) -> Result<Self, Self::Err> {
Ok(Self::from_transaction(
Recovered::new_unchecked(tx.into_inner().into(), signer),
tx_info,
))
}
}
@ -300,26 +255,7 @@ impl SignableTxRequest<TransactionSigned> for TransactionRequest {
self,
signer: impl TxSigner<Signature> + Send,
) -> Result<TransactionSigned, SignTxRequestError> {
let mut tx =
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
let signed = match tx {
EthereumTypedTransaction::Legacy(tx) => {
EthereumTxEnvelope::Legacy(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip2930(tx) => {
EthereumTxEnvelope::Eip2930(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip1559(tx) => {
EthereumTxEnvelope::Eip1559(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip4844(tx) => {
EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature))
}
EthereumTypedTransaction::Eip7702(tx) => {
EthereumTxEnvelope::Eip7702(tx.into_signed(signature))
}
};
let signed = SignableTxRequest::<InnerType>::try_build_and_sign(self, signer).await?;
Ok(TransactionSigned::Default(signed))
}
}

View File

@ -1,17 +1,17 @@
use crate::node::rpc::HlEthApi;
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use reth::rpc::server_types::eth::{
builder::config::PendingBlockKind, error::FromEvmError, EthApiError, PendingBlock,
EthApiError, PendingBlock, builder::config::PendingBlockKind, error::FromEvmError,
};
use reth_rpc_eth_api::{
RpcConvert,
helpers::{
pending_block::PendingEnvBuilder, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt,
EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, pending_block::PendingEnvBuilder,
},
RpcConvert, RpcNodeCore,
};
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -19,7 +19,7 @@ where
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -27,9 +27,9 @@ where
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> {
@ -49,8 +49,7 @@ where
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}

View File

@ -1,32 +1,45 @@
use super::HlEthApi;
use core::fmt;
use super::{HlEthApi, HlRpcNodeCore};
use crate::{HlBlock, node::evm::apply_precompiles};
use alloy_consensus::transaction::TxHashRef;
use alloy_evm::Evm;
use alloy_primitives::B256;
use reth::rpc::server_types::eth::EthApiError;
use reth_evm::TxEnvFor;
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TxEnvFor};
use reth_primitives::{NodePrimitives, Recovered};
use reth_provider::{ProviderError, ProviderTx};
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
FromEvmError, RpcConvert, RpcNodeCore,
helpers::{Call, EthCall},
};
use revm::{DatabaseCommit, context::result::ResultAndState};
impl<N> HlRpcNodeCore for N where N: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
{
}
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> Call for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
@ -37,4 +50,75 @@ where
fn max_simulate_blocks(&self) -> u64 {
self.inner.eth_api.max_simulate_blocks()
}
fn transact<DB>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn transact_with_inspector<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn replay_transactions_until<'a, DB, I>(
&self,
db: &mut DB,
evm_env: EvmEnvFor<Self::Evm>,
transactions: I,
target_tx_hash: B256,
) -> Result<usize, Self::Error>
where
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let mut index = 0;
for tx in transactions {
if *tx.tx_hash() == target_tx_hash {
// reached the target transaction
break;
}
let tx_env = self.evm_config().tx_env(tx);
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
index += 1;
}
Ok(index)
}
}

View File

@ -9,7 +9,7 @@ use alloy_primitives::B256;
use alloy_rpc_types_engine::PayloadError;
use reth::{
api::{FullNodeComponents, NodeTypes},
builder::{rpc::PayloadValidatorBuilder, AddOnsContext},
builder::{AddOnsContext, rpc::PayloadValidatorBuilder},
};
use reth_engine_primitives::{ExecutionPayload, PayloadValidator};
use reth_payload_primitives::NewPayloadError;
@ -36,7 +36,7 @@ where
}
}
/// Validator for Optimism engine API.
/// Validator for HyperEVM engine API.
#[derive(Debug, Clone)]
pub struct HlPayloadValidator {
inner: HlExecutionPayloadValidator<HlChainSpec>,
@ -123,7 +123,7 @@ where
return Err(PayloadError::BlockHash {
execution: sealed_block.hash(),
consensus: expected_hash,
})?;
});
}
Ok(sealed_block)

214
src/node/rpc/estimate.rs Normal file
View File

@ -0,0 +1,214 @@
use super::{HlEthApi, HlRpcNodeCore, apply_precompiles};
use alloy_evm::overrides::{StateOverrideError, apply_state_overrides};
use alloy_network::TransactionBuilder;
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types_eth::state::StateOverride;
use reth_chainspec::MIN_TRANSACTION_GAS;
use reth_errors::ProviderError;
use reth_evm::{ConfigureEvm, Evm, EvmEnvFor, SpecFor, TransactionEnv, TxEnvFor};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_convert::{RpcConvert, RpcTxReq};
use reth_rpc_eth_api::{
AsEthApiError, IntoEthApiError, RpcNodeCore,
helpers::{
Call,
estimate::{EstimateCall, update_estimated_gas_range},
},
};
use reth_rpc_eth_types::{
EthApiError, RevertError, RpcInvalidTransactionError,
error::{FromEvmError, api::FromEvmHalt},
};
use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO};
use reth_storage_api::StateProvider;
use revm::context_interface::{Transaction, result::ExecutionResult};
use tracing::trace;
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
where
Self: Call,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm> + From<StateOverrideError<ProviderError>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
// Modified version that adds `apply_precompiles`; comments are stripped out.
fn estimate_gas_with<S>(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
state: S,
state_override: Option<StateOverride>,
) -> Result<U256, Self::Error>
where
S: StateProvider,
{
evm_env.cfg_env.disable_eip3607 = true;
evm_env.cfg_env.disable_base_fee = true;
request.as_mut().take_nonce();
let tx_request_gas_limit = request.as_ref().gas_limit();
let tx_request_gas_price = request.as_ref().gas_price();
let max_gas_limit = evm_env
.cfg_env
.tx_gas_limit_cap
.map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit));
let mut highest_gas_limit = tx_request_gas_limit
.map(|mut tx_gas_limit| {
if max_gas_limit < tx_gas_limit {
tx_gas_limit = max_gas_limit;
}
tx_gas_limit
})
.unwrap_or(max_gas_limit);
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if let Some(state_override) = state_override {
apply_state_overrides(state_override, &mut db).map_err(
|err: StateOverrideError<ProviderError>| {
let eth_api_error: EthApiError = EthApiError::from(err);
Self::Error::from(eth_api_error)
},
)?;
}
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
let mut is_basic_transfer = false;
if tx_env.input().is_empty() &&
let TxKind::Call(to) = tx_env.kind() &&
let Ok(code) = db.db.account_code(&to)
{
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true);
}
if tx_env.gas_price() > 0 {
highest_gas_limit =
highest_gas_limit.min(self.caller_gas_allowance(&mut db, &evm_env, &tx_env)?);
}
tx_env.set_gas_limit(tx_env.gas_limit().min(highest_gas_limit));
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(&mut db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
if is_basic_transfer {
let mut min_tx_env = tx_env.clone();
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) &&
res.result.is_success()
{
return Ok(U256::from(MIN_TRANSACTION_GAS));
}
}
trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation");
let mut res = match evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err) {
Err(err)
if err.is_gas_too_high() &&
(tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) =>
{
return Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit);
}
Err(err) if err.is_gas_too_low() => {
return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance {
gas_limit: tx_env.gas_limit(),
}
.into_eth_err());
}
ethres => ethres?,
};
let gas_refund = match res.result {
ExecutionResult::Success { gas_refunded, .. } => gas_refunded,
ExecutionResult::Halt { reason, .. } => {
return Err(Self::Error::from_evm_halt(reason, tx_env.gas_limit()));
}
ExecutionResult::Revert { output, .. } => {
return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() {
Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit)
} else {
Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err())
};
}
};
highest_gas_limit = tx_env.gas_limit();
let mut gas_used = res.result.gas_used();
let mut lowest_gas_limit = gas_used.saturating_sub(1);
let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63;
if optimistic_gas_limit < highest_gas_limit {
let mut optimistic_tx_env = tx_env.clone();
optimistic_tx_env.set_gas_limit(optimistic_gas_limit);
res = evm.transact(optimistic_tx_env).map_err(Self::Error::from_evm_err)?;
gas_used = res.result.gas_used();
update_estimated_gas_range(
res.result,
optimistic_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
};
let mut mid_gas_limit = std::cmp::min(
gas_used * 3,
((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64,
);
trace!(target: "rpc::eth::estimate", ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas");
while lowest_gas_limit + 1 < highest_gas_limit {
if (highest_gas_limit - lowest_gas_limit) as f64 / (highest_gas_limit as f64) <
ESTIMATE_GAS_ERROR_RATIO
{
break;
};
let mut mid_tx_env = tx_env.clone();
mid_tx_env.set_gas_limit(mid_gas_limit);
match evm.transact(mid_tx_env).map_err(Self::Error::from_evm_err) {
Err(err) if err.is_gas_too_high() => {
highest_gas_limit = mid_gas_limit;
}
Err(err) if err.is_gas_too_low() => {
lowest_gas_limit = mid_gas_limit;
}
ethres => {
res = ethres?;
update_estimated_gas_range(
res.result,
mid_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
}
}
mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64;
}
Ok(U256::from(highest_gas_limit))
}
}

View File

@ -1,45 +1,58 @@
use crate::{
HlBlock, HlPrimitives,
chainspec::HlChainSpec,
node::{evm::apply_precompiles, types::HlExtras},
};
use alloy_eips::BlockId;
use alloy_evm::Evm;
use alloy_network::Ethereum;
use alloy_primitives::U256;
use reth::{
api::{FullNodeTypes, HeaderTy, NodeTypes, PrimitivesTy},
builder::{
rpc::{EthApiBuilder, EthApiCtx},
FullNodeComponents,
rpc::{EthApiBuilder, EthApiCtx},
},
rpc::{
eth::{core::EthApiInner, DevSigner, FullEthApiServer},
eth::{DevSigner, FullEthApiServer, core::EthApiInner},
server_types::eth::{
receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache,
GasPriceOracle,
EthApiError, EthStateCache, FeeHistoryCache, GasPriceOracle,
receipt::EthReceiptConverter,
},
},
tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner,
pool::{BlockingTaskGuard, BlockingTaskPool},
},
};
use reth_evm::ConfigureEvm;
use reth_provider::{ChainSpecProvider, ProviderHeader, ProviderTx};
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, TxEnvFor};
use reth_primitives::NodePrimitives;
use reth_provider::{
BlockReaderIdExt, ChainSpecProvider, ProviderError, ProviderHeader, ProviderTx,
};
use reth_rpc::RpcTypes;
use reth_rpc_eth_api::{
helpers::{
pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees,
EthState, LoadFee, LoadState, SpawnBlocking, Trace,
},
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
SignableTxRequest,
helpers::{
AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, LoadState,
SpawnBlocking, Trace, pending_block::BuildPendingEnv, spec::SignersForApi,
},
};
use revm::context::result::ResultAndState;
use std::{fmt, marker::PhantomData, sync::Arc};
use crate::chainspec::HlChainSpec;
mod block;
mod call;
pub mod engine_api;
mod estimate;
pub mod precompile;
mod transaction;
pub trait HlRpcNodeCore: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
/// Container type `HlEthApi`
pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
pub(crate) struct HlEthApiInner<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) eth_api: EthApiInner<N, Rpc>,
}
@ -47,15 +60,20 @@ pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
type HlRpcConvert<N, NetworkT> =
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
#[derive(Clone)]
pub struct HlEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
pub struct HlEthApi<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
}
impl<N: HlRpcNodeCore, Rpc: RpcConvert> Clone for HlEthApi<N, Rpc> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -65,8 +83,8 @@ where
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Error = EthApiError;
type NetworkTypes = Rpc::Network;
@ -79,7 +97,7 @@ where
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Primitives = N::Primitives;
@ -111,7 +129,7 @@ where
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
@ -122,7 +140,7 @@ where
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
type Transaction = ProviderTx<Self::Provider>;
@ -141,8 +159,8 @@ where
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn io_task_spawner(&self) -> impl TaskSpawner {
@ -162,7 +180,7 @@ where
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -179,15 +197,17 @@ where
impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
}
impl<N, Rpc> EthState for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
#[inline]
fn max_proof_window(&self) -> u64 {
@ -197,7 +217,7 @@ where
impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
@ -205,15 +225,50 @@ where
impl<N, Rpc> Trace for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn inspect<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError>,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
}
}
impl<N, Rpc> HlEthApi<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn get_hl_extras(&self, block: BlockId) -> Result<HlExtras, ProviderError> {
Ok(self
.provider()
.block_by_id(block)?
.map(|block| HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
})
.unwrap_or_default())
}
}
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>,
@ -239,7 +294,7 @@ impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
where
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>>
+ RpcNodeCore<
Primitives = PrimitivesTy<N::Types>,
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,

View File

@ -0,0 +1,44 @@
use alloy_eips::BlockId;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee_core::{RpcResult, async_trait};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_types::EthApiError;
use tracing::trace;
use crate::node::{
rpc::{HlEthApi, HlRpcNodeCore},
types::HlExtras,
};
/// A custom RPC trait for fetching block precompile data.
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait HlBlockPrecompileApi {
/// Fetches precompile data for a given block.
#[method(name = "blockPrecompileData")]
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras>;
}
pub struct HlBlockPrecompileExt<N: HlRpcNodeCore, Rpc: RpcConvert> {
eth_api: HlEthApi<N, Rpc>,
}
impl<N: HlRpcNodeCore, Rpc: RpcConvert> HlBlockPrecompileExt<N, Rpc> {
/// Creates a new instance of the [`HlBlockPrecompileExt`].
pub fn new(eth_api: HlEthApi<N, Rpc>) -> Self {
Self { eth_api }
}
}
#[async_trait]
impl<N, Rpc> HlBlockPrecompileApiServer for HlBlockPrecompileExt<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras> {
trace!(target: "rpc::eth", ?block, "Serving eth_blockPrecompileData");
let hl_extras = self.eth_api.get_hl_extras(block).map_err(EthApiError::from)?;
Ok(hl_extras)
}
}

View File

@ -1,21 +1,23 @@
use crate::node::rpc::HlEthApi;
use alloy_primitives::{Bytes, B256};
use std::time::Duration;
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use alloy_primitives::{B256, Bytes};
use reth::rpc::server_types::eth::EthApiError;
use reth_rpc_eth_api::{
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
RpcConvert, RpcNodeCore,
RpcConvert,
helpers::{EthTransactions, LoadTransaction, spec::SignersForRpc},
};
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
where
N: RpcNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
@ -25,4 +27,8 @@ where
async fn send_raw_transaction(&self, _tx: Bytes) -> Result<B256, Self::Error> {
unreachable!()
}
fn send_raw_transaction_sync_timeout(&self) -> Duration {
self.inner.eth_api.send_raw_transaction_sync_timeout()
}
}

View File

@ -5,6 +5,8 @@ use std::collections::BTreeMap;
use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID};
mod patch;
#[derive(Debug, Clone, Serialize, Deserialize)]
struct EvmContract {
address: Address,
@ -58,5 +60,10 @@ pub(crate) fn erc20_contract_to_spot_token(chain_id: u64) -> Result<BTreeMap<Add
map.insert(evm_contract.address, SpotId { index: token.index });
}
}
if chain_id == TESTNET_CHAIN_ID {
patch::patch_testnet_spot_meta(&mut map);
}
Ok(map)
}

View File

@ -0,0 +1,8 @@
use crate::node::spot_meta::SpotId;
use alloy_primitives::{Address, address};
use std::collections::BTreeMap;
/// Testnet-specific fix for #67
pub(super) fn patch_testnet_spot_meta(map: &mut BTreeMap<Address, SpotId>) {
map.insert(address!("0xd9cbec81df392a88aeff575e962d149d57f4d6bc"), SpotId { index: 0 });
}

View File

@ -1,29 +1,27 @@
use crate::{
node::{
primitives::tx_wrapper::{convert_to_eth_block_body, convert_to_hl_block_body},
types::HlExtras,
},
HlBlock, HlBlockBody, HlPrimitives,
HlBlock, HlBlockBody, HlHeader, HlPrimitives,
node::{primitives::TransactionSigned, types::HlExtras},
};
use alloy_consensus::BlockHeader;
use alloy_primitives::Bytes;
use reth_chainspec::EthereumHardforks;
use reth_db::{
DbTxUnwindExt,
cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut},
DbTxUnwindExt,
};
use reth_primitives_traits::Block;
use reth_provider::{
providers::{ChainStorage, NodeTypesForProvider},
BlockBodyReader, BlockBodyWriter, ChainSpecProvider, ChainStorageReader, ChainStorageWriter,
DBProvider, DatabaseProvider, EthStorage, ProviderResult, ReadBodyInput, StorageLocation,
providers::{ChainStorage, NodeTypesForProvider},
};
pub mod tables;
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct HlStorage(EthStorage);
pub struct HlStorage(EthStorage<TransactionSigned, HlHeader>);
impl HlStorage {
fn write_precompile_calls<Provider>(
@ -89,30 +87,17 @@ where
let mut read_precompile_calls = Vec::with_capacity(bodies.len());
for (block_number, body) in bodies {
match body {
let (inner_opt, extras) = match body {
Some(HlBlockBody {
inner,
sidecars: _,
read_precompile_calls: rpc,
read_precompile_calls,
highest_precompile_address,
}) => {
eth_bodies.push((block_number, Some(convert_to_eth_block_body(inner))));
read_precompile_calls.push((
block_number,
HlExtras { read_precompile_calls: rpc, highest_precompile_address },
));
}
None => {
eth_bodies.push((block_number, None));
read_precompile_calls.push((
block_number,
HlExtras {
read_precompile_calls: Default::default(),
highest_precompile_address: None,
},
));
}
}
}) => (Some(inner), HlExtras { read_precompile_calls, highest_precompile_address }),
None => Default::default(),
};
eth_bodies.push((block_number, inner_opt));
read_precompile_calls.push((block_number, extras));
}
self.0.write_block_bodies(provider, eth_bodies, write_to)?;
@ -146,22 +131,16 @@ where
inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<HlBlockBody>> {
let read_precompile_calls = self.read_precompile_calls(provider, &inputs)?;
let eth_bodies = self.0.read_block_bodies(
provider,
inputs
.into_iter()
.map(|(header, transactions)| {
(header, transactions.into_iter().map(|tx| tx.into_inner()).collect())
})
.collect(),
)?;
let inputs: Vec<(&<Self::Block as Block>::Header, _)> = inputs;
let eth_bodies = self.0.read_block_bodies(provider, inputs)?;
let eth_bodies: Vec<alloy_consensus::BlockBody<_, HlHeader>> = eth_bodies;
// NOTE: sidecars are not used in HyperEVM yet.
Ok(eth_bodies
.into_iter()
.zip(read_precompile_calls)
.map(|(inner, extra)| HlBlockBody {
inner: convert_to_hl_block_body(inner),
inner,
sidecars: None,
read_precompile_calls: extra.read_precompile_calls,
highest_precompile_address: extra.highest_precompile_address,

View File

@ -1,5 +1,5 @@
use alloy_primitives::{BlockNumber, Bytes};
use reth_db::{table::TableInfo, tables, TableSet, TableType, TableViewer};
use reth_db::{TableSet, TableType, TableViewer, table::TableInfo, tables};
use std::fmt;
tables! {

View File

@ -2,16 +2,19 @@
//!
//! Changes:
//! - ReadPrecompileCalls supports RLP encoding / decoding
use alloy_primitives::{Address, Bytes, Log, B256};
use alloy_consensus::TxType;
use alloy_primitives::{Address, B256, Bytes, Log};
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::BufMut;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives_traits::InMemorySize;
use serde::{Deserialize, Serialize};
use crate::HlBlock;
pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>);
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default, Hash)]
pub struct ReadPrecompileCalls(pub Vec<ReadPrecompileCall>);
pub(crate) mod reth_compat;
@ -22,6 +25,13 @@ pub struct HlExtras {
pub highest_precompile_address: Option<Address>,
}
impl InMemorySize for HlExtras {
fn size(&self) -> usize {
self.read_precompile_calls.as_ref().map_or(0, |s| s.0.len()) +
self.highest_precompile_address.as_ref().map_or(0, |_| 20)
}
}
impl Encodable for ReadPrecompileCalls {
fn encode(&self, out: &mut dyn BufMut) {
let buf: Bytes = rmp_serde::to_vec(&self.0).unwrap().into();
@ -56,6 +66,7 @@ impl BlockAndReceipts {
self.read_precompile_calls.clone(),
self.highest_precompile_address,
self.system_txs.clone(),
self.receipts.clone(),
chain_id,
)
}
@ -84,6 +95,23 @@ pub struct LegacyReceipt {
logs: Vec<Log>,
}
impl From<LegacyReceipt> for EthereumReceipt {
fn from(r: LegacyReceipt) -> Self {
EthereumReceipt {
tx_type: match r.tx_type {
LegacyTxType::Legacy => TxType::Legacy,
LegacyTxType::Eip2930 => TxType::Eip2930,
LegacyTxType::Eip1559 => TxType::Eip1559,
LegacyTxType::Eip4844 => TxType::Eip4844,
LegacyTxType::Eip7702 => TxType::Eip7702,
},
success: r.success,
cumulative_gas_used: r.cumulative_gas_used,
logs: r.logs,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
enum LegacyTxType {
Legacy = 0,
@ -99,6 +127,19 @@ pub struct SystemTx {
pub receipt: Option<LegacyReceipt>,
}
impl SystemTx {
pub fn gas_limit(&self) -> u64 {
use reth_compat::Transaction;
match &self.tx {
Transaction::Legacy(tx) => tx.gas_limit,
Transaction::Eip2930(tx) => tx.gas_limit,
Transaction::Eip1559(tx) => tx.gas_limit,
Transaction::Eip4844(tx) => tx.gas_limit,
Transaction::Eip7702(tx) => tx.gas_limit,
}
}
}
#[derive(
Debug,
Clone,
@ -117,7 +158,7 @@ pub struct ReadPrecompileInput {
pub gas_limit: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub enum ReadPrecompileResult {
Ok { gas_used: u64, bytes: Bytes },
OutOfGas,

View File

@ -10,12 +10,12 @@ use std::{
use tracing::info;
use crate::{
HlBlock, HlBlockBody, HlHeader,
node::{
primitives::TransactionSigned as TxSigned,
spot_meta::{erc20_contract_to_spot_token, SpotId},
types::{ReadPrecompileCalls, SystemTx},
spot_meta::{SpotId, erc20_contract_to_spot_token},
types::{LegacyReceipt, ReadPrecompileCalls, SystemTx},
},
HlBlock, HlBlockBody,
};
/// A raw transaction.
@ -114,22 +114,39 @@ impl SealedBlock {
read_precompile_calls: ReadPrecompileCalls,
highest_precompile_address: Option<Address>,
system_txs: Vec<super::SystemTx>,
receipts: Vec<LegacyReceipt>,
chain_id: u64,
) -> HlBlock {
// NOTE: Filter out system transactions that may be rejected by the EVM (tracked by #97, testnet only).
let system_txs: Vec<_> = system_txs.into_iter().filter(|tx| tx.gas_limit() != 0).collect();
let mut merged_txs = vec![];
merged_txs.extend(system_txs.iter().map(|tx| system_tx_to_reth_transaction(tx, chain_id)));
merged_txs.extend(self.body.transactions.iter().map(|tx| tx.to_reth_transaction()));
let mut merged_receipts = vec![];
merged_receipts.extend(system_txs.iter().map(|tx| tx.receipt.clone().unwrap().into()));
merged_receipts.extend(receipts.into_iter().map(From::from));
let block_body = HlBlockBody {
inner: reth_primitives::BlockBody {
transactions: merged_txs,
withdrawals: self.body.withdrawals.clone(),
ommers: self.body.ommers.clone(),
ommers: vec![],
},
sidecars: None,
read_precompile_calls: Some(read_precompile_calls),
highest_precompile_address,
};
HlBlock { header: self.header.header.clone(), body: block_body }
let system_tx_count = system_txs.len() as u64;
HlBlock {
header: HlHeader::from_ethereum_header(
self.header.header.clone(),
&merged_receipts,
system_tx_count,
),
body: block_body,
}
}
}

View File

@ -1,3 +1,7 @@
use std::time::Duration;
use crate::pseudo_peer::HlNodeBlockSourceArgs;
use super::config::BlockSourceConfig;
use clap::{Args, Parser};
use reth_node_core::args::LogArgs;
@ -13,7 +17,7 @@ pub struct BlockSourceArgs {
block_source: Option<String>,
#[arg(long, alias = "local-ingest-dir")]
block_source_from_node: Option<String>,
local_ingest_dir: Option<String>,
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
#[arg(long, default_value_t = false)]
@ -22,6 +26,19 @@ pub struct BlockSourceArgs {
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
#[arg(long)]
local: bool,
/// Interval for polling new blocks in S3 in milliseconds.
#[arg(id = "s3.polling-interval", long = "s3.polling-interval", default_value = "25")]
s3_polling_interval: u64,
/// Maximum allowed delay for the hl-node block source in milliseconds.
/// If this threshold is exceeded, the client falls back to other sources.
#[arg(
id = "local.fallback-threshold",
long = "local.fallback-threshold",
default_value = "5000"
)]
local_fallback_threshold: u64,
}
impl BlockSourceArgs {
@ -33,7 +50,10 @@ impl BlockSourceArgs {
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
if self.s3 {
return Ok(BlockSourceConfig::s3_default().await);
return Ok(BlockSourceConfig::s3_default(Duration::from_millis(
self.s3_polling_interval,
))
.await);
}
if self.local {
@ -47,18 +67,25 @@ impl BlockSourceArgs {
};
if let Some(bucket) = value.strip_prefix("s3://") {
Ok(BlockSourceConfig::s3(bucket.to_string()).await)
Ok(BlockSourceConfig::s3(
bucket.to_string(),
Duration::from_millis(self.s3_polling_interval),
)
.await)
} else {
Ok(BlockSourceConfig::local(value.into()))
}
}
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
let Some(block_source_from_node) = self.block_source_from_node.as_ref() else {
let Some(local_ingest_dir) = self.local_ingest_dir.as_ref() else {
return config;
};
config.with_block_source_from_node(block_source_from_node.to_string())
config.with_block_source_from_node(HlNodeBlockSourceArgs {
root: local_ingest_dir.into(),
fallback_threshold: Duration::from_millis(self.local_fallback_threshold),
})
}
}

View File

@ -1,31 +1,38 @@
use crate::chainspec::HlChainSpec;
use super::sources::{
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, LocalBlockSource, S3BlockSource,
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, HlNodeBlockSourceArgs,
LocalBlockSource, S3BlockSource,
};
use aws_config::BehaviorVersion;
use std::{env::home_dir, path::PathBuf, sync::Arc};
use std::{env::home_dir, path::PathBuf, sync::Arc, time::Duration};
#[derive(Debug, Clone)]
pub struct BlockSourceConfig {
pub source_type: BlockSourceType,
pub block_source_from_node: Option<String>,
pub block_source_from_node: Option<HlNodeBlockSourceArgs>,
}
#[derive(Debug, Clone)]
pub enum BlockSourceType {
S3Default,
S3 { bucket: String },
S3Default { polling_interval: Duration },
S3 { bucket: String, polling_interval: Duration },
Local { path: PathBuf },
}
impl BlockSourceConfig {
pub async fn s3_default() -> Self {
Self { source_type: BlockSourceType::S3Default, block_source_from_node: None }
pub async fn s3_default(polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3Default { polling_interval },
block_source_from_node: None,
}
}
pub async fn s3(bucket: String) -> Self {
Self { source_type: BlockSourceType::S3 { bucket }, block_source_from_node: None }
pub async fn s3(bucket: String, polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3 { bucket, polling_interval },
block_source_from_node: None,
}
}
pub fn local(path: PathBuf) -> Self {
@ -39,21 +46,28 @@ impl BlockSourceConfig {
.expect("home dir not found")
.join("hl")
.join("data")
.join("evm_blocks_and_receipts"),
.join("evm_block_and_receipts"),
},
block_source_from_node: None,
}
}
pub fn with_block_source_from_node(mut self, block_source_from_node: String) -> Self {
pub fn with_block_source_from_node(
mut self,
block_source_from_node: HlNodeBlockSourceArgs,
) -> Self {
self.block_source_from_node = Some(block_source_from_node);
self
}
pub async fn create_block_source(&self, chain_spec: HlChainSpec) -> BlockSourceBoxed {
match &self.source_type {
BlockSourceType::S3Default => s3_block_source(chain_spec.official_s3_bucket()).await,
BlockSourceType::S3 { bucket } => s3_block_source(bucket).await,
BlockSourceType::S3Default { polling_interval } => {
s3_block_source(chain_spec.official_s3_bucket(), *polling_interval).await
}
BlockSourceType::S3 { bucket, polling_interval } => {
s3_block_source(bucket, *polling_interval).await
}
BlockSourceType::Local { path } => {
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
}
@ -72,7 +86,7 @@ impl BlockSourceConfig {
Arc::new(Box::new(
HlNodeBlockSource::new(
fallback_block_source,
PathBuf::from(block_source_from_node.clone()),
block_source_from_node.clone(),
next_block_number,
)
.await,
@ -91,9 +105,9 @@ impl BlockSourceConfig {
}
}
async fn s3_block_source(bucket: impl AsRef<str>) -> BlockSourceBoxed {
async fn s3_block_source(bucket: impl AsRef<str>, polling_interval: Duration) -> BlockSourceBoxed {
let client = aws_sdk_s3::Client::new(
&aws_config::defaults(BehaviorVersion::latest()).region("ap-northeast-1").load().await,
);
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string())))
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string(), polling_interval)))
}

View File

@ -1 +0,0 @@
pub const MAX_CONCURRENCY: usize = 100;

View File

@ -1,36 +0,0 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum PseudoPeerError {
#[error("Block source error: {0}")]
BlockSource(String),
#[error("Network error: {0}")]
Network(#[from] reth_network::error::NetworkError),
#[error("Configuration error: {0}")]
Config(String),
#[error("AWS S3 error: {0}")]
S3(#[from] aws_sdk_s3::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serialization error: {0}")]
Serialization(#[from] rmp_serde::encode::Error),
#[error("Deserialization error: {0}")]
Deserialization(#[from] rmp_serde::decode::Error),
#[error("Compression error: {0}")]
Compression(String),
}
impl From<eyre::Error> for PseudoPeerError {
fn from(err: eyre::Error) -> Self {
PseudoPeerError::Config(err.to_string())
}
}
pub type Result<T> = std::result::Result<T, PseudoPeerError>;

View File

@ -5,33 +5,25 @@
pub mod cli;
pub mod config;
pub mod consts;
pub mod error;
pub mod network;
pub mod service;
pub mod sources;
pub mod utils;
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::{error, info};
pub use cli::*;
pub use config::*;
pub use error::*;
pub use network::*;
pub use service::*;
pub use sources::*;
#[cfg(test)]
mod tests;
use tokio::sync::mpsc;
use tracing::info;
/// Re-export commonly used types
pub mod prelude {
pub use super::{
config::BlockSourceConfig,
error::{PseudoPeerError, Result},
service::{BlockPoller, PseudoPeer},
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
};
@ -45,6 +37,7 @@ pub async fn start_pseudo_peer(
chain_spec: Arc<HlChainSpec>,
destination_peer: String,
block_source: BlockSourceBoxed,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> {
let blockhash_cache = new_blockhash_cache();
@ -54,6 +47,7 @@ pub async fn start_pseudo_peer(
destination_peer,
block_source.clone(),
blockhash_cache.clone(),
debug_cutoff_height,
)
.await?;
@ -86,9 +80,12 @@ pub async fn start_pseudo_peer(
_ = transaction_rx.recv() => {}
Some(eth_req) = eth_rx.recv() => {
service.process_eth_request(eth_req).await?;
if let Err(e) = service.process_eth_request(eth_req).await {
error!("Error processing eth request: {e:?}");
} else {
info!("Processed eth request");
}
}
}
}
}

View File

@ -1,12 +1,16 @@
use super::service::{BlockHashCache, BlockPoller};
use crate::{chainspec::HlChainSpec, node::network::HlNetworkPrimitives, HlPrimitives};
use crate::{HlPrimitives, chainspec::HlChainSpec, node::network::HlNetworkPrimitives};
use reth_network::{
config::{rng_secret_key, SecretKey},
NetworkConfig, NetworkManager, PeersConfig,
config::{SecretKey, rng_secret_key},
};
use reth_network_peers::TrustedPeer;
use reth_provider::test_utils::NoopProvider;
use std::{str::FromStr, sync::Arc};
use std::{
net::{Ipv4Addr, SocketAddr},
str::FromStr,
sync::Arc,
};
use tokio::sync::mpsc;
pub struct NetworkBuilder {
@ -16,6 +20,7 @@ pub struct NetworkBuilder {
discovery_port: u16,
listener_port: u16,
chain_spec: HlChainSpec,
debug_cutoff_height: Option<u64>,
}
impl Default for NetworkBuilder {
@ -27,34 +32,24 @@ impl Default for NetworkBuilder {
discovery_port: 0,
listener_port: 0,
chain_spec: HlChainSpec::default(),
debug_cutoff_height: None,
}
}
}
impl NetworkBuilder {
pub fn with_secret(mut self, secret: SecretKey) -> Self {
self.secret = secret;
self
}
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
self.peer_config = peer_config;
self
}
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
self.boot_nodes = boot_nodes;
self
}
pub fn with_ports(mut self, discovery_port: u16, listener_port: u16) -> Self {
self.discovery_port = discovery_port;
self.listener_port = listener_port;
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
self.chain_spec = chain_spec;
self
}
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
self.chain_spec = chain_spec;
pub fn with_debug_cutoff_height(mut self, debug_cutoff_height: Option<u64>) -> Self {
self.debug_cutoff_height = debug_cutoff_height;
self
}
@ -66,12 +61,16 @@ impl NetworkBuilder {
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
.boot_nodes(self.boot_nodes)
.peer_config(self.peer_config)
.discovery_port(self.discovery_port)
.listener_port(self.listener_port);
.discovery_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.discovery_port))
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
let chain_id = self.chain_spec.inner.chain().id();
let (block_poller, start_tx) =
BlockPoller::new_suspended(chain_id, block_source, blockhash_cache);
let (block_poller, start_tx) = BlockPoller::new_suspended(
chain_id,
block_source,
blockhash_cache,
self.debug_cutoff_height,
);
let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::<
HlChainSpec,
HlPrimitives,
@ -89,10 +88,12 @@ pub async fn create_network_manager<BS>(
destination_peer: String,
block_source: Arc<Box<dyn super::sources::BlockSource>>,
blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> {
NetworkBuilder::default()
.with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()])
.with_chain_spec(chain_spec)
.with_debug_cutoff_height(debug_cutoff_height)
.build::<BS>(block_source, blockhash_cache)
.await
}

View File

@ -26,7 +26,6 @@ use std::{
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
time::Duration,
};
use tokio::{sync::mpsc, task::JoinHandle};
use tracing::{debug, info};
@ -49,18 +48,16 @@ pub struct BlockPoller {
}
impl BlockPoller {
const POLL_INTERVAL: Duration = Duration::from_millis(25);
pub fn new_suspended<BS: BlockSource>(
chain_id: u64,
block_source: BS,
blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> (Self, mpsc::Sender<()>) {
let block_source = Arc::new(block_source);
let (start_tx, start_rx) = mpsc::channel(1);
let (block_tx, block_rx) = mpsc::channel(100);
let block_tx_clone = block_tx.clone();
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx_clone));
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx, debug_cutoff_height));
(Self { chain_id, block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
}
@ -72,25 +69,33 @@ impl BlockPoller {
async fn task<BS: BlockSource>(
mut start_rx: mpsc::Receiver<()>,
block_source: Arc<BS>,
block_tx_clone: mpsc::Sender<(u64, BlockAndReceipts)>,
block_tx: mpsc::Sender<(u64, BlockAndReceipts)>,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> {
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
info!("Starting block poller");
let latest_block_number = block_source
let polling_interval = block_source.polling_interval();
let mut next_block_number = block_source
.find_latest_block_number()
.await
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
let mut next_block_number = latest_block_number;
loop {
let Ok(block) = block_source.collect_block(next_block_number).await else {
tokio::time::sleep(Self::POLL_INTERVAL).await;
continue;
};
block_tx_clone.send((next_block_number, block)).await?;
if let Some(debug_cutoff_height) = debug_cutoff_height
&& next_block_number > debug_cutoff_height
{
next_block_number = debug_cutoff_height;
}
match block_source.collect_block(next_block_number).await {
Ok(block) => {
block_tx.send((next_block_number, block)).await?;
next_block_number += 1;
}
Err(_) => tokio::time::sleep(polling_interval).await,
}
}
}
}
@ -111,8 +116,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
},
}))
}
Poll::Ready(None) => Poll::Pending,
Poll::Pending => Poll::Pending,
Poll::Ready(None) | Poll::Pending => Poll::Pending,
}
}
@ -155,14 +159,14 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn collect_blocks(
&self,
block_numbers: impl IntoIterator<Item = u64>,
) -> Vec<BlockAndReceipts> {
) -> eyre::Result<Vec<BlockAndReceipts>> {
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
let blocks = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await.unwrap())
let res = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await)
.buffered(self.block_source.recommended_chunk_size() as usize)
.collect::<Vec<_>>()
.await;
blocks
res.into_iter().collect()
}
pub async fn process_eth_request(
@ -179,7 +183,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
debug!(
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
);
let number = match start_block {
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
HashOrNumber::Number(number) => number,
@ -190,7 +193,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
HeadersDirection::Falling => {
self.collect_blocks((number + 1 - limit..number + 1).rev()).await
}
}
}?
.into_par_iter()
.map(|block| block.to_reth_block(chain_id).header.clone())
.collect::<Vec<_>>();
@ -208,19 +211,15 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let block_bodies = self
.collect_blocks(numbers)
.await
.await?
.into_iter()
.map(|block| block.to_reth_block(chain_id).body)
.collect::<Vec<_>>();
let _ = response.send(Ok(BlockBodies(block_bodies)));
}
IncomingEthRequest::GetNodeData { .. } => {
debug!("GetNodeData request: {eth_req:?}");
}
eth_req => {
debug!("New eth protocol request: {eth_req:?}");
}
IncomingEthRequest::GetNodeData { .. } => debug!("GetNodeData request: {eth_req:?}"),
eth_req => debug!("New eth protocol request: {eth_req:?}"),
}
Ok(())
}
@ -251,7 +250,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
// official RPC. The latter is much easier but has 300/day rate limit.
use jsonrpsee::http_client::HttpClientBuilder;
use jsonrpsee_core::client::ClientT;
@ -259,7 +257,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let client =
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
self.cache_blocks([(hash, target_block.header.number)]);
Ok(target_block.header.number)
@ -272,10 +269,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
}
return Some(block_number);
}
Some(block_number)
} else {
None
}
}
/// Backfill the cache with blocks to find the target hash
async fn backfill_cache_for_hash(
@ -319,10 +317,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
self.if_hit_then_warm_around.lock().unwrap().insert(start);
self.if_hit_then_warm_around.lock().unwrap().insert(end);
{
let mut guard = self.if_hit_then_warm_around.lock().unwrap();
guard.insert(start);
guard.insert(end);
}
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
}
@ -348,15 +347,12 @@ impl<BS: BlockSource> PseudoPeer<BS> {
}
debug!("Backfilling from {start_number} to {end_number}");
// Collect blocks and cache them
let blocks = self.collect_blocks(uncached_block_numbers).await;
let blocks = self.collect_blocks(uncached_block_numbers).await?;
let block_map: HashMap<B256, u64> =
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
let maybe_block_number = block_map.get(&target_hash).copied();
self.cache_blocks(block_map);
Ok(maybe_block_number)
}

View File

@ -0,0 +1,48 @@
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::{FutureExt, future::BoxFuture};
use reth_network::cache::LruMap;
use std::sync::{Arc, RwLock};
/// Block source wrapper that caches blocks in memory
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
fn polling_interval(&self) -> std::time::Duration {
self.block_source.polling_interval()
}
}

View File

@ -1,635 +0,0 @@
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::{BlockAndReceipts, EvmBlock};
use futures::future::BoxFuture;
use rangemap::RangeInclusiveMap;
use reth_network::cache::LruMap;
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{BufRead, BufReader, Read, Seek, SeekFrom},
ops::RangeInclusive,
path::{Path, PathBuf},
sync::Arc,
};
use time::{macros::format_description, Date, Duration, OffsetDateTime, Time};
use tokio::sync::Mutex;
use tracing::{info, warn};
const TAIL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(25);
const HOURLY_SUBDIR: &str = "hourly";
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
// Lightweight range map to track the ranges of blocks in the local ingest directory
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
// 3660 blocks per hour
const CACHE_SIZE: u32 = 8000;
fn new() -> Self {
Self { cache: LruMap::new(Self::CACHE_SIZE), ranges: RangeInclusiveMap::new() }
}
fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct LocalBlockAndReceipts(String, BlockAndReceipts);
struct ScanResult {
path: PathBuf,
next_expected_height: u64,
new_blocks: Vec<BlockAndReceipts>,
new_block_ranges: Vec<RangeInclusive<u64>>,
}
struct ScanOptions {
start_height: u64,
only_load_ranges: bool,
}
fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_block_timestamp, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
let file = File::open(path).expect("Failed to open hour file path");
let reader = BufReader::new(file);
let ScanOptions { start_height, only_load_ranges } = options;
let mut new_blocks = Vec::new();
let mut last_height = start_height;
let lines: Vec<String> = reader.lines().collect::<Result<_, _>>().unwrap();
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
if line_idx < *last_line || line.trim().is_empty() {
continue;
}
match line_to_evm_block(line) {
Ok((parsed_block, height)) => {
if height >= start_height {
last_height = last_height.max(height);
if !only_load_ranges {
new_blocks.push(parsed_block);
}
*last_line = line_idx;
}
match current_range {
Some((start, end)) if end + 1 == height => {
current_range = Some((start, height));
}
_ => {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
}
Err(_) => {
warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line));
continue;
}
}
}
if let Some((start, end)) = current_range {
block_ranges.push(start..=end);
}
ScanResult {
path: path.to_path_buf(),
next_expected_height: last_height + 1,
new_blocks,
new_block_ranges: block_ranges,
}
}
fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_ingest_dir: PathBuf,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, // height → block
// for rate limiting requests to fallback
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
Box::pin(async move {
let now = OffsetDateTime::now_utc();
if let Some(block) = self.try_collect_local_block(height).await {
self.update_last_fetch(height, now).await;
return Ok(block);
}
if let Some((last_height, last_poll_time)) = *self.last_local_fetch.lock().await {
let more_recent = last_height < height;
let too_soon = now - last_poll_time < Self::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK;
if more_recent && too_soon {
return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
));
}
}
let block = self.fallback.collect_block(height).await?;
self.update_last_fetch(height, now).await;
Ok(block)
})
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
Box::pin(async move {
let Some(dir) = Self::find_latest_hourly_file(&self.local_ingest_dir) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
self.local_ingest_dir
);
return self.fallback.find_latest_block_number().await;
};
let mut file = File::open(&dir).expect("Failed to open hour file path");
if let Some((_, height)) = read_last_complete_line(&mut file) {
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
} else {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
file
);
self.fallback.find_latest_block_number().await
}
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
const CHUNK_SIZE: u64 = 50000;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line = Vec::new();
while pos > 0 {
let read_size = std::cmp::min(pos, CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if let Ok((evm_block, height)) = line_to_evm_block(str::from_utf8(candidate).unwrap()) {
return Some((evm_block, height));
}
// Incomplete line; truncate and continue
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
}
impl HlNodeBlockSource {
/// [HlNodeBlockSource] picks the faster one between local ingest directory and s3/ingest-dir.
/// But if we immediately fallback to s3/ingest-dir, in case of S3, it may cause unnecessary
/// requests to S3 while it'll return 404.
///
/// To avoid unnecessary fallback, we set a short threshold period.
/// This threshold is several times longer than the expected block time, reducing redundant
/// fallback attempts.
pub(crate) const MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK: Duration = Duration::milliseconds(5000);
async fn update_last_fetch(&self, height: u64, now: OffsetDateTime) {
let mut last_fetch = self.last_local_fetch.lock().await;
if let Some((last_height, _)) = *last_fetch {
if last_height >= height {
return;
}
}
*last_fetch = Some((height, now));
}
async fn try_collect_local_block(&self, height: u64) -> Option<BlockAndReceipts> {
let mut u_cache = self.local_blocks_cache.lock().await;
if let Some(block) = u_cache.cache.remove(&height) {
return Some(block);
}
let path = u_cache.ranges.get(&height).cloned()?;
info!("Loading block data from {:?}", path);
u_cache.load_scan_result(scan_hour_file(
&path,
&mut 0,
ScanOptions { start_height: 0, only_load_ranges: false },
));
u_cache.cache.get(&height).cloned()
}
fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let dt_part = path.parent()?.file_name()?.to_str()?;
let hour_part = path.file_name()?.to_str()?;
let hour: u8 = hour_part.parse().ok()?;
Some(OffsetDateTime::new_utc(
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour, 0, 0).ok()?,
))
}
fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let dir = root.join(HOURLY_SUBDIR);
let mut files = Vec::new();
for entry in std::fs::read_dir(dir).ok()? {
let file = entry.ok()?.path();
let subfiles: Vec<_> = std::fs::read_dir(&file)
.ok()?
.filter_map(|f| f.ok().map(|f| f.path()))
.filter(|p| Self::datetime_from_path(p).is_some())
.collect();
files.extend(subfiles);
}
files.sort();
Some(files)
}
fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.last().cloned()
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in Self::all_hourly_files(root).unwrap_or_default() {
let mut file = File::open(&subfile).expect("Failed to open hour file path");
if let Some((_, height)) = read_last_complete_line(&mut file) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file, fallback to slow path: {:?}", subfile);
}
let mut scan_result = scan_hour_file(
&subfile,
&mut 0,
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
);
// Only store the block ranges for now; actual block data will be loaded lazily later to
// optimize memory usage
scan_result.new_blocks.clear();
u_cache.load_scan_result(scan_result);
}
if u_cache.ranges.is_empty() {
warn!("No ranges found in {:?}", root);
} else {
let (min, _) = u_cache.ranges.first_range_value().unwrap();
let (max, _) = u_cache.ranges.last_range_value().unwrap();
info!(
"Populated {} ranges (min: {}, max: {})",
u_cache.ranges.len(),
min.start(),
max.end()
);
}
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.local_ingest_dir.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
// Wait for the first hourly file to be created
let mut dt = loop {
if let Some(latest_file) = Self::find_latest_hourly_file(&root) {
break Self::datetime_from_path(&latest_file).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let mut hour = dt.hour();
let mut day_str = date_from_datetime(dt);
let mut last_line = 0;
info!("Starting local ingest loop from height: {:?}", current_head);
loop {
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
if hour_file.exists() {
let scan_result = scan_hour_file(
&hour_file,
&mut last_line,
ScanOptions { start_height: next_height, only_load_ranges: false },
);
next_height = scan_result.next_expected_height;
let mut u_cache = cache.lock().await;
u_cache.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + Duration::HOUR < now {
dt += Duration::HOUR;
hour = dt.hour();
day_str = date_from_datetime(dt);
last_line = 0;
info!(
"Moving to a new file. {:?}",
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.local_ingest_dir,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
local_ingest_dir: PathBuf,
next_block_number: u64,
) -> Self {
let block_source = HlNodeBlockSource {
fallback,
local_ingest_dir,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new())),
last_local_fetch: Arc::new(Mutex::new(None)),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
node::types::{reth_compat, ReadPrecompileCalls},
pseudo_peer::sources::LocalBlockSource,
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
use std::{io::Write, time::Duration};
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = HlNodeBlockSource::datetime_from_path(path).unwrap();
println!("{dt:?}");
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new()));
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
println!("{:?}", u_cache.ranges);
assert_eq!(
u_cache.ranges.get(&9735058),
Some(&test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
fn scan_result_from_single_block(block: BlockAndReceipts) -> ScanResult {
let height = match &block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
ScanResult {
path: PathBuf::from("/nonexistent-block"),
next_expected_height: height + 1,
new_blocks: vec![block],
new_block_ranges: vec![height..=height],
}
}
fn empty_block(
number: u64,
timestamp: u64,
extra_data: &'static [u8],
) -> LocalBlockAndReceipts {
let extra_data = Bytes::from_static(extra_data);
let res = BlockAndReceipts {
block: EvmBlock::Reth115(reth_compat::SealedBlock {
header: reth_compat::SealedHeader {
header: Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Bloom::ZERO,
difficulty: U256::ZERO,
number,
gas_limit: 0,
gas_used: 0,
timestamp,
extra_data,
mix_hash: B256::ZERO,
nonce: B64::ZERO,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
hash: B256::ZERO,
},
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
}),
receipts: vec![],
system_txs: vec![],
read_precompile_calls: ReadPrecompileCalls(vec![]),
highest_precompile_address: None,
};
LocalBlockAndReceipts(timestamp.to_string(), res)
}
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, File)> {
let now = OffsetDateTime::now_utc();
let day_str = date_from_datetime(now);
let hour = now.hour();
let temp_dir = tempfile::tempdir()?;
let path = temp_dir.path().join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
std::fs::create_dir_all(path.parent().unwrap())?;
Ok((temp_dir, File::create(path)?))
}
struct BlockSourceHierarchy {
block_source: HlNodeBlockSource,
_temp_dir: tempfile::TempDir,
file1: File,
current_block: LocalBlockAndReceipts,
future_block_hl_node: LocalBlockAndReceipts,
future_block_fallback: LocalBlockAndReceipts,
}
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
// Setup fallback block source
let block_source_fallback = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
PathBuf::from("/nonexistent"),
1000000,
)
.await;
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
let block_source = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
temp_dir1.path().to_path_buf(),
1000000,
)
.await;
block_source_fallback
.local_blocks_cache
.lock()
.await
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
Ok(BlockSourceHierarchy {
block_source,
_temp_dir: temp_dir1,
file1,
current_block: block_hl_node_0,
future_block_hl_node: block_hl_node_1,
future_block_fallback: block_fallback_1,
})
}
#[tokio::test]
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source,
current_block,
future_block_hl_node,
mut file1,
..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
let block = block_source.collect_block(1000001).await;
assert!(block.is_err());
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
tokio::time::sleep(Duration::from_millis(100)).await;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_hl_node.1);
Ok(())
}
#[tokio::test]
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source,
current_block,
future_block_fallback,
mut file1,
..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
tokio::time::sleep(HlNodeBlockSource::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK.unsigned_abs())
.await;
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_fallback.1);
Ok(())
}
}

View File

@ -0,0 +1,51 @@
use super::scan::ScanResult;
use crate::node::types::{BlockAndReceipts, EvmBlock};
use rangemap::RangeInclusiveMap;
use reth_network::cache::LruMap;
use std::path::{Path, PathBuf};
use tracing::{info, warn};
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
pub fn new(cache_size: u32) -> Self {
Self { cache: LruMap::new(cache_size), ranges: RangeInclusiveMap::new() }
}
pub fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
self.cache.get(&height).cloned()
}
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {
self.ranges.get(&height).cloned()
}
pub fn log_range_summary(&self, root: &Path) {
if self.ranges.is_empty() {
warn!("No ranges found in {:?}", root);
} else {
let (min, max) =
(self.ranges.first_range_value().unwrap(), self.ranges.last_range_value().unwrap());
info!(
"Populated {} ranges (min: {}, max: {})",
self.ranges.len(),
min.0.start(),
max.0.end()
);
}
}
}

View File

@ -0,0 +1,67 @@
use super::{HOURLY_SUBDIR, scan::Scanner, time_utils::TimeUtils};
use crate::node::types::BlockAndReceipts;
use std::{
fs::File,
io::{Read, Seek, SeekFrom},
path::{Path, PathBuf},
};
pub struct FileOperations;
impl FileOperations {
pub fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let mut files = Vec::new();
for entry in std::fs::read_dir(root.join(HOURLY_SUBDIR)).ok()? {
let dir = entry.ok()?.path();
if let Ok(subentries) = std::fs::read_dir(&dir) {
files.extend(
subentries
.filter_map(|f| f.ok().map(|f| f.path()))
.filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
);
}
}
files.sort();
Some(files.into_iter().map(|(_, p)| p).collect())
}
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.into_iter().last()
}
pub fn read_last_block_from_file(path: &Path) -> Option<(BlockAndReceipts, u64)> {
let mut file = File::open(path).ok()?;
Self::read_last_complete_line(&mut file)
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
const CHUNK_SIZE: u64 = 50000;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line = Vec::new();
while pos > 0 {
let read_size = pos.min(CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if let Ok(result) = Scanner::line_to_evm_block(str::from_utf8(candidate).unwrap()) {
return Some(result);
}
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
Scanner::line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
}
}

View File

@ -0,0 +1,270 @@
mod cache;
mod file_ops;
mod scan;
#[cfg(test)]
mod tests;
mod time_utils;
use self::{
cache::LocalBlocksCache,
file_ops::FileOperations,
scan::{LineStream, ScanOptions, Scanner},
time_utils::TimeUtils,
};
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::future::BoxFuture;
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use time::OffsetDateTime;
use tokio::sync::Mutex;
use tracing::{info, warn};
const HOURLY_SUBDIR: &str = "hourly";
const CACHE_SIZE: u32 = 8000; // 3660 blocks per hour
const ONE_HOUR: Duration = Duration::from_secs(60 * 60);
const TAIL_INTERVAL: Duration = Duration::from_millis(25);
#[derive(Debug, Clone)]
pub struct HlNodeBlockSourceArgs {
pub root: PathBuf,
pub fallback_threshold: Duration,
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
pub args: HlNodeBlockSourceArgs,
pub metrics: HlNodeBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.hl_node")]
pub struct HlNodeBlockSourceMetrics {
/// How many times the HL node block source is polling for a block
pub fetched_from_hl_node: Counter,
/// How many times the HL node block source is fetched from the fallback
pub fetched_from_fallback: Counter,
/// How many times `try_collect_local_block` was faster than ingest loop
pub file_read_triggered: Counter,
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
let local_blocks_cache = self.local_blocks_cache.clone();
let last_local_fetch = self.last_local_fetch.clone();
let metrics = self.metrics.clone();
Box::pin(async move {
let now = OffsetDateTime::now_utc();
if let Some(block) =
Self::try_collect_local_block(&metrics, local_blocks_cache, height).await
{
Self::update_last_fetch(last_local_fetch, height, now).await;
metrics.fetched_from_hl_node.increment(1);
return Ok(block);
}
if let Some((last_height, last_poll_time)) = *last_local_fetch.lock().await {
let more_recent = last_height < height;
let too_soon = now - last_poll_time < args.fallback_threshold;
if more_recent && too_soon {
return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
));
}
}
let block = fallback.collect_block(height).await?;
metrics.fetched_from_fallback.increment(1);
Self::update_last_fetch(last_local_fetch, height, now).await;
Ok(block)
})
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
Box::pin(async move {
let Some(dir) = FileOperations::find_latest_hourly_file(&args.root) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
args.root
);
return fallback.find_latest_block_number().await;
};
match FileOperations::read_last_block_from_file(&dir) {
Some((_, height)) => {
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
}
None => {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
dir
);
fallback.find_latest_block_number().await
}
}
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
struct CurrentFile {
path: PathBuf,
line_stream: Option<LineStream>,
}
impl CurrentFile {
pub fn from_datetime(dt: OffsetDateTime, root: &Path) -> Self {
let (hour, day_str) = (dt.hour(), TimeUtils::date_from_datetime(dt));
let path = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{}", hour));
Self { path, line_stream: None }
}
pub fn open(&mut self) -> eyre::Result<()> {
if self.line_stream.is_some() {
return Ok(());
}
self.line_stream = Some(LineStream::from_path(&self.path)?);
Ok(())
}
}
impl HlNodeBlockSource {
async fn update_last_fetch(
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
height: u64,
now: OffsetDateTime,
) {
let mut last_fetch = last_local_fetch.lock().await;
if last_fetch.is_none_or(|(h, _)| h < height) {
*last_fetch = Some((height, now));
}
}
async fn try_collect_local_block(
metrics: &HlNodeBlockSourceMetrics,
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
height: u64,
) -> Option<BlockAndReceipts> {
let mut u_cache = local_blocks_cache.lock().await;
if let Some(block) = u_cache.get_block(height) {
return Some(block);
}
let path = u_cache.get_path_for_height(height)?;
info!("Loading block data from {:?}", path);
metrics.file_read_triggered.increment(1);
let mut line_stream = LineStream::from_path(&path).ok()?;
let scan_result = Scanner::scan_hour_file(
&mut line_stream,
ScanOptions { start_height: 0, only_load_ranges: false },
);
u_cache.load_scan_result(scan_result);
u_cache.get_block(height)
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in FileOperations::all_hourly_files(root).unwrap_or_default() {
if let Some((_, height)) = FileOperations::read_last_block_from_file(&subfile) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file: {:?}", subfile);
}
let mut line_stream =
LineStream::from_path(&subfile).expect("Failed to open line stream");
let mut scan_result = Scanner::scan_hour_file(
&mut line_stream,
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
);
scan_result.new_blocks.clear(); // Only store ranges, load data lazily
u_cache.load_scan_result(scan_result);
}
u_cache.log_range_summary(root);
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.args.root.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
let mut dt = loop {
if let Some(f) = FileOperations::find_latest_hourly_file(&root) {
break TimeUtils::datetime_from_path(&f).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let mut current_file = CurrentFile::from_datetime(dt, &root);
info!("Starting local ingest loop from height: {}", current_head);
loop {
let _ = current_file.open();
if let Some(line_stream) = &mut current_file.line_stream {
let scan_result = Scanner::scan_hour_file(
line_stream,
ScanOptions { start_height: next_height, only_load_ranges: false },
);
next_height = scan_result.next_expected_height;
cache.lock().await.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + ONE_HOUR < now {
dt += ONE_HOUR;
current_file = CurrentFile::from_datetime(dt, &root);
info!("Moving to new file: {:?}", current_file.path);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.args.root,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
args: HlNodeBlockSourceArgs,
next_block_number: u64,
) -> Self {
let block_source = Self {
fallback,
args,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
last_local_fetch: Arc::new(Mutex::new(None)),
metrics: HlNodeBlockSourceMetrics::default(),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}

View File

@ -0,0 +1,132 @@
use crate::node::types::{BlockAndReceipts, EvmBlock};
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{BufRead, BufReader, Seek, SeekFrom},
ops::RangeInclusive,
path::{Path, PathBuf},
};
use tracing::warn;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LocalBlockAndReceipts(pub String, pub BlockAndReceipts);
pub struct ScanResult {
pub path: PathBuf,
pub next_expected_height: u64,
pub new_blocks: Vec<BlockAndReceipts>,
pub new_block_ranges: Vec<RangeInclusive<u64>>,
}
pub struct ScanOptions {
pub start_height: u64,
pub only_load_ranges: bool,
}
pub struct Scanner;
/// Stream for sequentially reading lines from a file.
///
/// This struct allows sequential iteration over lines over [Self::next] method.
/// It is resilient to cases where the line producer process is interrupted while writing:
/// - If a line is incomplete but still ends with a line ending, it is skipped: later, the fallback
/// block source will be used to retrieve the missing block.
/// - If a line does not end with a newline (i.e., the write was incomplete), the method returns
/// `None` to break out of the loop and avoid reading partial data.
/// - If a temporary I/O error occurs, the stream exits the loop without rewinding the cursor, which
/// will result in skipping ahead to the next unread bytes.
pub struct LineStream {
path: PathBuf,
reader: BufReader<File>,
}
impl LineStream {
pub fn from_path(path: &Path) -> std::io::Result<Self> {
let reader = BufReader::with_capacity(1024 * 1024, File::open(path)?);
Ok(Self { path: path.to_path_buf(), reader })
}
pub fn next(&mut self) -> Option<String> {
let mut line_buffer = vec![];
let Ok(size) = self.reader.read_until(b'\n', &mut line_buffer) else {
// Temporary I/O error; restart the loop
return None;
};
// Now cursor is right after the end of the line
// On UTF-8 error, skip the line
let Ok(mut line) = String::from_utf8(line_buffer) else {
return Some(String::new());
};
// If line is not completed yet, return None so that we can break the loop
if line.ends_with('\n') {
if line.ends_with('\r') {
line.pop();
}
line.pop();
return Some(line);
}
// info!("Line is not completed yet: {}", line);
if size != 0 {
self.reader.seek(SeekFrom::Current(-(size as i64))).unwrap();
}
None
}
}
impl Scanner {
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
pub fn scan_hour_file(line_stream: &mut LineStream, options: ScanOptions) -> ScanResult {
let mut new_blocks = Vec::new();
let mut last_height = options.start_height;
let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
while let Some(line) = line_stream.next() {
match Self::line_to_evm_block(&line) {
Ok((parsed_block, height)) => {
if height >= options.start_height {
last_height = last_height.max(height);
if !options.only_load_ranges {
new_blocks.push(parsed_block);
}
}
match current_range {
Some((start, end)) if end + 1 == height => {
current_range = Some((start, height))
}
_ => {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
}
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(&line)),
}
}
if let Some((start, end)) = current_range {
block_ranges.push(start..=end);
}
ScanResult {
path: line_stream.path.clone(),
next_expected_height: last_height + current_range.is_some() as u64,
new_blocks,
new_block_ranges: block_ranges,
}
}
}

View File

@ -0,0 +1,214 @@
use super::*;
use crate::{
node::types::{ReadPrecompileCalls, reth_compat},
pseudo_peer::sources::{LocalBlockSource, hl_node::scan::LocalBlockAndReceipts},
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, B64, B256, Bloom, Bytes, U256};
use std::{io::Write, time::Duration};
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = TimeUtils::datetime_from_path(path).unwrap();
println!("{dt:?}");
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE)));
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
assert_eq!(
u_cache.get_path_for_height(9735058),
Some(test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
fn scan_result_from_single_block(block: BlockAndReceipts) -> scan::ScanResult {
use crate::node::types::EvmBlock;
let height = match &block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
scan::ScanResult {
path: PathBuf::from("/nonexistent-block"),
next_expected_height: height + 1,
new_blocks: vec![block],
new_block_ranges: vec![height..=height],
}
}
fn empty_block(number: u64, timestamp: u64, extra_data: &'static [u8]) -> LocalBlockAndReceipts {
use crate::node::types::EvmBlock;
LocalBlockAndReceipts(
timestamp.to_string(),
BlockAndReceipts {
block: EvmBlock::Reth115(reth_compat::SealedBlock {
header: reth_compat::SealedHeader {
header: Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Bloom::ZERO,
difficulty: U256::ZERO,
number,
gas_limit: 0,
gas_used: 0,
timestamp,
extra_data: Bytes::from_static(extra_data),
mix_hash: B256::ZERO,
nonce: B64::ZERO,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
hash: B256::ZERO,
},
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
}),
receipts: vec![],
system_txs: vec![],
read_precompile_calls: ReadPrecompileCalls(vec![]),
highest_precompile_address: None,
},
)
}
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, std::fs::File)> {
let now = OffsetDateTime::now_utc();
let temp_dir = tempfile::tempdir()?;
let path = temp_dir
.path()
.join(HOURLY_SUBDIR)
.join(TimeUtils::date_from_datetime(now))
.join(format!("{}", now.hour()));
std::fs::create_dir_all(path.parent().unwrap())?;
Ok((temp_dir, std::fs::File::create(path)?))
}
struct BlockSourceHierarchy {
block_source: HlNodeBlockSource,
_temp_dir: tempfile::TempDir,
file1: std::fs::File,
current_block: LocalBlockAndReceipts,
future_block_hl_node: LocalBlockAndReceipts,
future_block_fallback: LocalBlockAndReceipts,
}
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
// Setup fallback block source
let block_source_fallback = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
HlNodeBlockSourceArgs {
root: { PathBuf::from("/nonexistent") },
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
let block_source = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
HlNodeBlockSourceArgs {
root: temp_dir1.path().to_path_buf(),
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
block_source_fallback
.local_blocks_cache
.lock()
.await
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
Ok(BlockSourceHierarchy {
block_source,
_temp_dir: temp_dir1,
file1,
current_block: block_hl_node_0,
future_block_hl_node: block_hl_node_1,
future_block_fallback: block_fallback_1,
})
}
#[tokio::test]
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_hl_node, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
let block = block_source.collect_block(1000001).await;
assert!(block.is_err());
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
tokio::time::sleep(Duration::from_millis(100)).await;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_hl_node.1);
Ok(())
}
#[tokio::test]
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_fallback, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
tokio::time::sleep(DEFAULT_FALLBACK_THRESHOLD_FOR_TEST).await;
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_fallback.1);
Ok(())
}
#[test]
fn test_hourly_files_sort() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir()?;
// create 20250826/9, 20250826/14
let targets = [("20250826", "9"), ("20250826", "14")];
for (date, hour) in targets {
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
let parent = hourly_file.parent().unwrap();
std::fs::create_dir_all(parent)?;
std::fs::File::create(hourly_file)?;
}
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
let file_names: Vec<_> =
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
assert_eq!(file_names, ["9", "14"]);
Ok(())
}

View File

@ -0,0 +1,19 @@
use std::path::Path;
use time::{Date, OffsetDateTime, Time, macros::format_description};
pub struct TimeUtils;
impl TimeUtils {
pub fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let (dt_part, hour_part) =
(path.parent()?.file_name()?.to_str()?, path.file_name()?.to_str()?);
Some(OffsetDateTime::new_utc(
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour_part.parse().ok()?, 0, 0).ok()?,
))
}
pub fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
}

View File

@ -0,0 +1,79 @@
use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts;
use eyre::Context;
use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::path::PathBuf;
use tracing::info;
/// Block source that reads blocks from local filesystem (--ingest-dir)
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
metrics: LocalBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.local")]
pub struct LocalBlockSourceMetrics {
/// How many times the local block source is polling for a block
pub polling_attempt: Counter,
/// How many times the local block source is fetched from the local filesystem
pub fetched: Counter,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into(), metrics: LocalBlockSourceMetrics::default() }
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
let metrics = self.metrics.clone();
async move {
let path = dir.join(utils::rmp_path(height));
metrics.polling_attempt.increment(1);
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
metrics.fetched.increment(1);
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}

View File

@ -1,269 +1,40 @@
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use eyre::Context;
use futures::{future::BoxFuture, FutureExt};
use reth_network::cache::LruMap;
use std::{
path::PathBuf,
sync::{Arc, RwLock},
};
use tracing::info;
use auto_impl::auto_impl;
use futures::future::BoxFuture;
use std::{sync::Arc, time::Duration};
// Module declarations
mod cached;
mod hl_node;
pub use hl_node::HlNodeBlockSource;
mod local;
mod s3;
mod utils;
// Public exports
pub use cached::CachedBlockSource;
pub use hl_node::{HlNodeBlockSource, HlNodeBlockSourceArgs};
pub use local::LocalBlockSource;
pub use s3::S3BlockSource;
const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(25);
/// Trait for block sources that can retrieve blocks from various sources
#[auto_impl(&, &mut, Box, Arc)]
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>>;
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>>;
/// Retrieves a block at the specified height
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>>;
/// Finds the latest block number available from this source
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>>;
/// Returns the recommended chunk size for batch operations
fn recommended_chunk_size(&self) -> u64;
/// Returns the polling interval
fn polling_interval(&self) -> Duration {
DEFAULT_POLLING_INTERVAL
}
}
/// Type alias for a boxed block source
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: aws_sdk_s3::Client,
bucket: String,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String) -> Self {
Self { client, bucket }
}
async fn pick_path_with_highest_number(
client: aws_sdk_s3::Client,
bucket: String,
dir: String,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(&bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes
.unwrap()
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents
.unwrap()
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let path = rmp_path(height);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
"".to_string(),
true,
)
.await?;
let (_, second_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
first_level,
true,
)
.await?;
let (block_number, third_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
second_level,
false,
)
.await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
async move {
let path = dir.join(rmp_path(height));
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into() }
}
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().map(|(number, file)| (*number, file.to_string()))
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
Self::name_with_largest_number_static(&files, is_dir)
}
}
fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{f}/{s}/{height}.rmp.lz4");
path
}
impl BlockSource for BlockSourceBoxed {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
self.as_ref().collect_block(height)
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.as_ref().find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.as_ref().recommended_chunk_size()
}
}
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
}

View File

@ -0,0 +1,115 @@
use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{sync::Arc, time::Duration};
use tracing::info;
/// Block source that reads blocks from S3 (--s3)
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: Arc<aws_sdk_s3::Client>,
bucket: String,
polling_interval: Duration,
metrics: S3BlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.s3")]
pub struct S3BlockSourceMetrics {
/// How many times the S3 block source is polling for a block
pub polling_attempt: Counter,
/// How many times the S3 block source has polled a block
pub fetched: Counter,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
Self {
client: client.into(),
bucket,
polling_interval,
metrics: S3BlockSourceMetrics::default(),
}
}
async fn pick_path_with_highest_number(
client: &aws_sdk_s3::Client,
bucket: &str,
dir: &str,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes?
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents?
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
let metrics = self.metrics.clone();
async move {
let path = utils::rmp_path(height);
metrics.polling_attempt.increment(1);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
metrics.fetched.increment(1);
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) =
Self::pick_path_with_highest_number(&client, &bucket, "", true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(&client, &bucket, &first_level, true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(&client, &bucket, &second_level, false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
fn polling_interval(&self) -> Duration {
self.polling_interval
}
}

View File

@ -0,0 +1,26 @@
//! Shared utilities for block sources
/// Finds the file/directory with the largest number in its name from a list of files
pub fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
/// Generates the RMP file path for a given block height
pub fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
format!("{f}/{s}/{height}.rmp.lz4")
}

View File

@ -1,30 +0,0 @@
use std::path::Path;
use crate::pseudo_peer::{prelude::*, BlockSourceType};
#[tokio::test]
async fn test_block_source_config_s3() {
let config = BlockSourceConfig::s3("test-bucket".to_string()).await;
assert!(
matches!(config.source_type, BlockSourceType::S3 { bucket } if bucket == "test-bucket")
);
}
#[tokio::test]
async fn test_block_source_config_local() {
let config = BlockSourceConfig::local("/test/path".into());
assert!(
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
);
}
#[test]
fn test_error_types() {
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
let benchmark_error: PseudoPeerError = io_error.into();
match benchmark_error {
PseudoPeerError::Io(_) => (),
_ => panic!("Expected Io error"),
}
}

35
src/version.rs Normal file
View File

@ -0,0 +1,35 @@
use std::borrow::Cow;
use reth_node_core::version::{RethCliVersionConsts, try_init_version_metadata};
pub fn init_reth_hl_version() {
let cargo_pkg_version = env!("CARGO_PKG_VERSION").to_string();
let short = env!("RETH_HL_SHORT_VERSION").to_string();
let long = format!(
"{}\n{}\n{}\n{}\n{}",
env!("RETH_HL_LONG_VERSION_0"),
env!("RETH_HL_LONG_VERSION_1"),
env!("RETH_HL_LONG_VERSION_2"),
env!("RETH_HL_LONG_VERSION_3"),
env!("RETH_HL_LONG_VERSION_4"),
);
let p2p = env!("RETH_HL_P2P_CLIENT_VERSION").to_string();
let meta = RethCliVersionConsts {
name_client: Cow::Borrowed("reth_hl"),
cargo_pkg_version: Cow::Owned(cargo_pkg_version.clone()),
vergen_git_sha_long: Cow::Owned(env!("VERGEN_GIT_SHA").to_string()),
vergen_git_sha: Cow::Owned(env!("VERGEN_GIT_SHA_SHORT").to_string()),
vergen_build_timestamp: Cow::Owned(env!("VERGEN_BUILD_TIMESTAMP").to_string()),
vergen_cargo_target_triple: Cow::Owned(env!("VERGEN_CARGO_TARGET_TRIPLE").to_string()),
vergen_cargo_features: Cow::Owned(env!("VERGEN_CARGO_FEATURES").to_string()),
short_version: Cow::Owned(short),
long_version: Cow::Owned(long),
build_profile_name: Cow::Owned(env!("RETH_HL_BUILD_PROFILE").to_string()),
p2p_client_version: Cow::Owned(p2p),
extra_data: Cow::Owned(format!("reth_hl/v{}/{}", cargo_pkg_version, std::env::consts::OS)),
};
let _ = try_init_version_metadata(meta);
}

49
tests/run_tests.sh Normal file
View File

@ -0,0 +1,49 @@
#!/bin/bash
set -e
export ETH_RPC_URL="${ETH_RPC_URL:-wss://hl-archive-node.xyz}"
success() {
echo "Success: $1"
}
fail() {
echo "Failed: $1"
exit 1
}
ensure_cmd() {
command -v "$1" > /dev/null 2>&1 || fail "$1 is required"
}
ensure_cmd jq
ensure_cmd cast
ensure_cmd wscat
if [[ ! "$ETH_RPC_URL" =~ ^wss?:// ]]; then
fail "ETH_RPC_URL must be a websocket url"
fi
TITLE="Issue #78 - eth_getLogs should return system transactions"
cast logs \
--rpc-url "$ETH_RPC_URL" \
--from-block 15312567 \
--to-block 15312570 \
--address 0x9fdbda0a5e284c32744d2f17ee5c74b284993463 \
0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef \
| grep -q "0x00000000000000000000000020000000000000000000000000000000000000c5" \
&& success "$TITLE" || fail "$TITLE"
TITLE="Issue #78 - eth_getBlockByNumber should return the same logsBloom as official RPC"
OFFICIAL_RPC="https://rpc.hyperliquid.xyz/evm"
A=$(cast block 1394092 --rpc-url "$ETH_RPC_URL" -f logsBloom | md5sum)
B=$(cast block 1394092 --rpc-url "$OFFICIAL_RPC" -f logsBloom | md5sum)
echo node "$A"
echo rpc\ "$B"
[[ "$A" == "$B" ]] && success "$TITLE" || fail "$TITLE"
TITLE="eth_subscribe newHeads via wscat"
CMD='{"jsonrpc":"2.0","id":1,"method":"eth_subscribe","params":["newHeads"]}'
wscat -w 2 -c "$ETH_RPC_URL" -x "$CMD" | tail -1 | jq -r .params.result.nonce | grep 0x \
&& success "$TITLE" || fail "$TITLE"