116 Commits

Author SHA1 Message Date
0e49e65068 Merge pull request #86 from hl-archive-node/breaking/hl-header
feat(breaking): Use custom header format (HlHeader)
2025-10-09 02:51:09 -04:00
13b63ff136 feat: add migrator for mdbx as well 2025-10-09 06:35:56 +00:00
233026871f perf: chunkify block ranges 2025-10-08 13:54:16 +00:00
7e169d409d chore: Change branch to v1.8.2-fork-hl-header 2025-10-08 13:04:11 +00:00
47aaad6ed9 feat: add migrator 2025-10-08 13:03:51 +00:00
9f73b1ede0 refactor: Move BlockBody from transaction to body 2025-10-06 06:43:17 +00:00
bcdf4d933d feat(breaking): Use HlHeader for HlPrimitives 2025-10-06 06:21:08 +00:00
2390ed864a feat(breaking): Use HlHeader for storing header 2025-10-06 06:21:08 +00:00
567d6ce2e4 feat: Introduce HlHeader 2025-10-06 06:21:08 +00:00
8b2c3a4a34 refactor: Move primitives into files 2025-10-06 06:21:08 +00:00
92759f04db Merge pull request #84 from hl-archive-node/fix/no-panic
fix: Fix panic when block receipts are called on non-existing blocks
2025-10-05 19:47:22 -04:00
71bb70bca6 fix: Fix panic when block receipts are called on non-existing blocks 2025-10-05 14:54:55 +00:00
5327ebc97a Merge pull request #82 from hl-archive-node/fix/local-reader
fix(local-ingest-dir): Use more robust resumption for hl-node line reader, fix block number increment for reading files
2025-10-05 07:36:32 -04:00
4d83b687d4 feat: Add metrics for file read triggered
Usually, "Loading block data from ..." shouldn't be shown in logs at all. Add metrics to detect the file read.
2025-10-05 11:28:11 +00:00
12f366573e fix: Do not increase block counter when no block is read
This made ingest loop to infinitely increase the block number
2025-10-05 11:28:11 +00:00
b8bae7cde9 fix: Utillize LruMap better
LruMap was introduced to allow getting the same block twice, so removing the item when getting the block doesn't make sense.
2025-10-05 11:28:11 +00:00
0fd4b7943f refactor: Use offsets instead of lines, wrap related structs in one 2025-10-05 11:28:04 +00:00
bfd61094ee chore: cargo fmt 2025-10-05 09:58:13 +00:00
3b33b0a526 Merge pull request #81 from hl-archive-node/fix/typo-local
fix: Fix typo in --local (default hl-node dir)
2025-10-05 05:54:35 -04:00
de7b524f0b fix: Fix typo in --local (default hl-node dir) 2025-10-05 04:39:09 -04:00
24f2460337 Merge pull request #80 from hl-archive-node/chore/v1.8.2
chore: Upgrade to reth v1.8.2
2025-10-05 04:38:54 -04:00
b55ddc54ad chore: clippy 2025-10-05 04:04:30 -04:00
aa73fab281 chore: Now cargo fmt sorts imports and trait methods 2025-10-05 03:56:23 -04:00
ae0cb0da6d chore: Move sprites0/reth to hl-archive-node/reth 2025-10-05 03:56:23 -04:00
8605be9864 chore: Upgrade to reth v1.8.2 2025-10-05 03:56:23 -04:00
c93ff90f94 Merge pull request #79 from hl-archive-node/fix/issue-78
fix: Do not filter out logs based on bloom (which is for perf optimization)
2025-10-05 00:43:20 -04:00
ce64e00e2f fix: Do not filter out logs based on bloom (which is for perf optimization)
Resolves #78
2025-10-05 00:33:44 -04:00
8d8da57d3a Merge pull request #77 from hl-archive-node/feat/cutoff-latest
feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
2025-10-02 10:57:04 -04:00
875304f891 feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
This is useful when syncing to specific testnet blocks
2025-10-02 14:53:47 +00:00
b37ba15765 Merge pull request #74 from Quertyy/feat/block-precompila-data-rpc-method
feat(rpc): add HlBlockPrecompile rpc API
2025-09-19 02:42:21 -04:00
3080665702 style: pass clippy check 2025-09-19 13:23:49 +07:00
4896e4f0ea refactor: use BlockId as block type 2025-09-19 12:41:14 +07:00
458f506ad2 refactor: use BlockHashOrNumber as block type 2025-09-19 12:33:32 +07:00
1c7136bfab feat(rpc): add HlBlockPrecompile rpc API 2025-09-18 04:57:49 +07:00
491e902904 Merge pull request #69 from hl-archive-node/fix/call-and-estimate
fix: Apply precompiles for eth_call and eth_estimateGas
2025-09-15 02:22:21 -04:00
45648a7a98 fix: Apply precompiles for eth_call and eth_estimateGas 2025-09-15 02:21:45 -04:00
c87c5a055a Merge pull request #68 from hl-archive-node/fix/testnet-token
fix: Add a manual mapping for testnet
2025-09-14 23:31:19 -04:00
c9416a3948 fix: Add a manual mapping for testnet 2025-09-14 23:24:00 -04:00
db10c23c56 Merge pull request #66 from hl-archive-node/feat/nb-release
fix: Fix tag format
2025-09-13 16:48:00 -04:00
fc395123f3 fix: Fix tag format 2025-09-13 16:47:05 -04:00
84ea1af682 Merge pull request #64 from sentioxyz/node-builder
fix docker build args
2025-09-13 16:43:35 -04:00
bd3e0626ed fix docker build args 2025-09-13 15:28:36 +08:00
7d223a464e Merge pull request #63 from hl-archive-node/feat/nb-release
feat: Add nb tag to docker releases
2025-09-11 19:36:43 -04:00
afcc551f67 feat: Add nb tag to docker releases 2025-09-11 19:35:50 -04:00
0dfd7a4c7f Merge pull request #62 from hl-archive-node/doc/testnet
doc: Update testnet instruction, add support channel
2025-09-11 19:33:50 -04:00
8faac526b7 doc: Add support channel 2025-09-11 19:32:55 -04:00
acfabf969c doc: Update testnet block number 2025-09-11 19:31:37 -04:00
fccf877a3a Merge pull request #61 from hl-archive-node/chore/v1.7.0
chore: Upgrade to reth v1.7.0
2025-09-11 19:26:47 -04:00
9e3f0c722e chore: Upgrade to reth v1.7.0 2025-09-11 19:25:48 -04:00
cd5bcc4cb0 chore: Add issue templates from reth 2025-09-11 19:00:09 -04:00
d831a459bb Merge pull request #60 from hl-archive-node/feat/block-metrics
feat: Add block source metrics
2025-09-11 18:56:18 -04:00
66c2ee654c feat: Add block source metrics 2025-09-11 18:50:22 -04:00
701e6a25e6 refactor: Remove duplications 2025-09-11 18:47:58 -04:00
ab11ce513f Merge pull request #57 from Quertyy/chore/reth-hl-version
chore(build): add reth-hl version output
2025-09-09 09:43:12 -04:00
37b852e810 chore(build): add reth-hl version output 2025-09-09 20:19:52 +07:00
51c43d6dbd Create a docker release github action (#54)
* create docker release action

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .
2025-09-08 10:26:20 -04:00
3f08b0a4e6 Merge pull request #55 from hl-archive-node/fix/txenv-on-trace
fix: Fill precompiles when tracing
2025-09-04 20:39:16 -04:00
d7992ab8ff remove: Remove unnecessary trait implementation 2025-09-04 20:38:41 -04:00
b37a30fb37 fix: Fill precompiles in tracing APIs 2025-09-04 20:37:10 -04:00
f6432498d8 refactor: Relax apply_precompiles and expose 2025-09-04 20:37:07 -04:00
772ff250ce Merge pull request #52 from hl-archive-node/fix/avoid-crash-on-eth-failure
fix: Do not crash when collect_block failed
2025-08-29 02:51:10 +09:00
5ee9053286 fix: Do not crash when collect_block failed
Just gracefully return it as error and log it
2025-08-28 13:47:44 -04:00
29e6972d58 Merge pull request #51 from hl-archive-node/feat/no-eth-proof
fix: Disable eth_getProof by default
2025-08-29 02:07:24 +09:00
e87b9232cc fix: Disable eth_getProof by default
No need to give malfunctioning feature by default. Issue #15 affects
StoragesTrie, AccountsTrie table which is used only for state root and
proof generation.
Also clearing the table does not affect any other parts of reth node.

Meanwhile, add --experimental-eth-get-proof flag to enable eth_getProof
forcefully.
2025-08-28 10:27:32 -04:00
b004263f82 Merge pull request #50 from Quertyy/feat/rpc-system-tx-receipts
chore(rpc): add eth_getEvmSystemTxsReceiptsByBlockHash and eth_getEvmSystemTxsReceiptsByBlockHash rpc method
2025-08-28 23:26:05 +09:00
74e27b5ee2 refactor(rpc): extract common logic for getting system txs 2025-08-28 16:10:41 +02:00
09fcf0751f chore(rpc): add eth_getSystemTxsReceiptsByBlockNumber and eth_getSystemTxsReceiptsByBlockNumber rpc method 2025-08-28 15:39:37 +02:00
8f2eca4754 Merge pull request #48 from Quertyy/feat/rpc-block-system-tx
chore(rpc): add eth_getEvmSystemTxsByBlockNumber and eth_getEvmSystemTxsByBlockHash rpc methods
2025-08-28 17:45:43 +09:00
707b4fb709 chore(rpc): return types compliance 2025-08-27 10:34:34 +02:00
62dd5a71b5 chore(rpc): change methods name 2025-08-26 22:03:40 +02:00
412c38a8cd chore(rpc): add eth_getSystemTxsByBlockNumber and eth_getSystemTxsByBlockNumber rpc method 2025-08-26 21:24:28 +02:00
796ea518bd Merge pull request #47 from hl-archive-node/fix/issue-46
fix: Sort hl-node files correctly
2025-08-27 02:49:16 +09:00
dd2c925af2 fix: Sort hl-node files correctly 2025-08-26 13:47:34 -04:00
3ffd7bb351 Merge pull request #45 from hl-archive-node/feat/add-cli-params-for-sources
feat: Add --local.fallback-threshold, --s3.polling-interval
2025-08-26 11:29:34 +09:00
52909eea3f feat: Add --local.fallback-threshold, --s3.polling-interval 2025-08-25 22:27:26 -04:00
0f9c2c5897 chore: Code style 2025-08-25 21:12:57 -04:00
ad4a8cd365 remove: Remove unnecssary tests 2025-08-25 21:12:34 -04:00
80506a7a43 fix(hl-node-compliance): Fix transaction index on block response 2025-08-25 10:00:43 -04:00
2af312b628 remove: Remove unused code 2025-08-25 10:00:43 -04:00
1908e9f414 Merge pull request #40 from sentioxyz/node-builder
fix: correct ingest local blocks
2025-08-24 18:13:41 +09:00
65cdc27b51 fix: line_to_evm_block don't hold equivalent semantic after refactor 2025-08-24 16:46:45 +08:00
4f430487d6 refactor: Move RPC addons to addons/ 2025-08-24 01:18:52 -04:00
19f35a6b54 chore: clippy, fmt 2025-08-24 01:15:36 -04:00
d61020e996 refactor: Split files for block sources
By claude code
2025-08-24 01:14:33 -04:00
657df240f4 fix: Avoid unnecessarily exposing pseudo peer 2025-08-23 22:17:03 -04:00
73a34a4bc1 chore: clippy 2025-08-23 22:17:03 -04:00
d8eef6305b remove: Reduce unnecessary LoC 2025-08-23 22:17:03 -04:00
bae68ef8db refactor: Reduce unnecessary LoC
By claude code
2025-08-23 04:21:23 -04:00
f576dddfa6 remove: Remove unused code 2025-08-23 03:10:05 -04:00
894ebcbfa5 Merge pull request #36 from hl-archive-node/fix/support-new-api
fix: Support new reth API
2025-08-23 01:51:36 +09:00
b91fa639f7 chore: Patch reth to allow zero state root on unwind 2025-08-22 12:51:30 -04:00
cf4e76db20 fix: Setup correct context for context_for_payload as well 2025-08-22 12:51:02 -04:00
5af7182919 Merge pull request #35 from hl-archive-node/feat/testnet-node-builder
feat: Support testnet (node-builder)
2025-08-22 23:42:52 +09:00
b6d5031865 feat: Support testnet sync
- Add testnet S3 bucket
- Use testnet RPC properly
- Use testnet chainspec on pseudo peer
2025-08-22 10:40:36 -04:00
7daf203bc2 fix: Initialize DB with HL-specific tables on init-state 2025-08-22 10:40:14 -04:00
20610ccc82 chore: Patch reth to allow zero state root on init-state 2025-08-22 10:39:25 -04:00
6543fac314 feat: Add testnet chainspec 2025-08-22 10:38:52 -04:00
26c1973503 Merge pull request #34 from hl-archive-node/fix/hl-node-compliance-receipts
fix: Fix block and transaction receipts' gas and transaction index
2025-08-22 23:10:22 +09:00
095ad0f65d fix: Fix block and transaction receipts' gas and transaction index 2025-08-22 10:09:35 -04:00
67cc8b8360 fix: Fix typo 2025-08-22 05:46:16 -04:00
ff67ae87c8 Merge pull request #33 from hl-archive-node/refactor/1.6.0-dev
chore: Upgrade to reth v1.6.0-dev
2025-08-22 18:40:25 +09:00
8cebe6db10 Merge pull request #31 from sentioxyz/node-builder
chore: add dockerfile for nodebuilder node
2025-08-22 10:51:27 +09:00
78b9028ded . 2025-08-22 07:57:47 +08:00
7f0f7c94a6 chore: Make constants consistent 2025-08-21 06:52:12 -04:00
2712cbb413 chore: Simplify traits 2025-08-21 06:52:01 -04:00
4be1aa83de Port to reth 1.6.0-dev 2025-08-21 05:58:37 -04:00
dd455d3a41 add docker file 2025-08-21 14:42:18 +08:00
239ee5f8e8 remove: Remove logs that degrades performance
This happens when syncing from genesis while local-ingest-dir exists. Find better way of logging them, and before that, disable it.
2025-08-08 02:06:45 -04:00
0c5a40884b Merge pull request #28 from hl-archive-node/feat/backfill-from-local-node
feat: Add backfill support from local node
2025-08-04 02:51:20 -04:00
bd9a0020e6 refactor: Code style, lint 2025-08-04 02:50:39 -04:00
a8df1fdaeb fix: Reduce fallback request before even next block is mined
Currently it was doing 0.5s + (0.25s x N); now it's 5s + (0.25s x N), assuming 5 block behind is bad enough to request fallback. Also usually fallback "exists" so that it updates last poll time acordingly, so it won't poll much after fallback is triggered.
2025-08-02 23:57:51 -04:00
c27e5e5a64 fix/perf: Fix last line scanner, wait 0.5s before fallback if it's more recent, add tests 2025-08-02 17:10:30 -04:00
c0b3acf181 perf: Reduce log 2025-08-01 17:37:29 +00:00
77158aa164 perf: Do not allocate much when backfilling ranges 2025-08-01 17:30:33 +00:00
2d6b5e5cd2 chore: Improve log 2025-08-01 17:28:31 +00:00
ff2e55b5a2 perf: Do not use cutoff when backfilling 2025-08-01 17:20:09 +00:00
91 changed files with 5834 additions and 4212 deletions

127
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Bug Report
description: Create a bug report
labels: ["C-bug", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report! Please provide as much detail as possible.
If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead.
- type: textarea
id: what-happened
attributes:
label: Describe the bug
description: |
A clear and concise description of what the bug is.
If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well.
validations:
required: true
- type: textarea
id: reproduction-steps
attributes:
label: Steps to reproduce
description: Please provide any steps you think might be relevant to reproduce the bug.
placeholder: |
Steps to reproduce:
1. Start '...'
2. Then '...'
3. Check '...'
4. See error
validations:
required: true
- type: textarea
id: logs
attributes:
label: Node logs
description: |
If applicable, please provide the node logs leading up to the bug.
**Please also provide debug logs.** By default, these can be found in:
- `~/.cache/reth/logs` on Linux
- `~/Library/Caches/reth/logs` on macOS
- `%localAppData%/reth/logs` on Windows
render: text
validations:
required: false
- type: dropdown
id: platform
attributes:
label: Platform(s)
description: What platform(s) did this occur on?
multiple: true
options:
- Linux (x86)
- Linux (ARM)
- Mac (Intel)
- Mac (Apple Silicon)
- Windows (x86)
- Windows (ARM)
- type: dropdown
id: container_type
attributes:
label: Container Type
description: Were you running it in a container?
multiple: true
options:
- Not running in a container
- Docker
- Kubernetes
- LXC/LXD
- Other
validations:
required: true
- type: textarea
id: client-version
attributes:
label: What version/commit are you on?
description: This can be obtained with `reth --version`
validations:
required: true
- type: textarea
id: database-version
attributes:
label: What database version are you on?
description: This can be obtained with `reth db version`
validations:
required: true
- type: textarea
id: network
attributes:
label: Which chain / network are you on?
description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet.
validations:
required: true
- type: dropdown
id: node-type
attributes:
label: What type of node are you running?
options:
- Archive (default)
- Full via --full flag
- Pruned with custom reth.toml config
validations:
required: true
- type: textarea
id: prune-config
attributes:
label: What prune config do you use, if any?
description: The `[prune]` section in `reth.toml` file
validations:
required: false
- type: input
attributes:
label: If you've built Reth from source, provide the full command you used
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md#code-of-conduct)
options:
- label: I agree to follow the Code of Conduct
required: true

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: GitHub Discussions
url: https://github.com/paradigmxyz/reth/discussions
about: Please ask and answer questions here to keep the issue tracker clean.

19
.github/ISSUE_TEMPLATE/docs.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Documentation
description: Suggest a change to our documentation
labels: ["C-docs", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
If you are unsure if the docs are relevant or needed, please open up a discussion first.
- type: textarea
attributes:
label: Describe the change
description: |
Please describe the documentation you want to change or add, and if it is for end-users or contributors.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

21
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Feature request
description: Suggest a feature
labels: ["C-enhancement", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Please ensure that the feature has not already been requested in the issue tracker.
- type: textarea
attributes:
label: Describe the feature
description: |
Please describe the feature and what it is aiming to solve, if relevant.
If the feature is for a crate, please include a proposed API surface.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

38
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,38 @@
# Publishes the Docker image.
name: docker
on:
push:
tags:
- v*
- nb-*
env:
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
CARGO_TERM_COLOR: always
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
DOCKER_USERNAME: ${{ github.actor }}
jobs:
build:
name: build and push as latest
runs-on: ubuntu-24.04
permissions:
packages: write
contents: read
steps:
- uses: actions/checkout@v5
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Log in to Docker
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
- name: Set up Docker builder
run: |
docker buildx create --use --name builder
- name: Build and push nanoreth image
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest

1981
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,8 @@
[package]
name = "reth_hl"
version = "0.1.0"
edition = "2021"
edition = "2024"
build = "build.rs"
[lib]
name = "reth_hl"
@ -25,86 +26,92 @@ lto = "fat"
codegen-units = 1
[dependencies]
reth = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-cli = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-db = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-evm = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-revm = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-network = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-provider = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "fc754e5983f055365325dc9a04632d5ba2c4a8bc" }
revm = { version = "26.0.1" }
reth = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-commands = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-basic-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-chainspec = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-util = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-discv4 = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-engine-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-forks = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-core = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-revm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-p2p = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-peers = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-payload-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives-traits = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-provider = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-engine-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-tracing = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-common = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-codecs = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-transaction-pool = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-stages-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-storage-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-errors = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-convert = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-server-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-metrics = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
revm = { version = "29.0.1", default-features = false }
# alloy dependencies
alloy-genesis = "1.0.13"
alloy-consensus = { version = "1.0.13", features = ["serde"] }
alloy-chains = "0.2.0"
alloy-eips = "1.0.13"
alloy-evm = "0.12"
alloy-json-abi = { version = "1.0.0", default-features = false }
alloy-json-rpc = { version = "1.0.13", default-features = false }
alloy-dyn-abi = "1.2.0"
alloy-network = "1.0.13"
alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] }
alloy-genesis = { version = "1.0.37", default-features = false }
alloy-consensus = { version = "1.0.37", default-features = false }
alloy-chains = { version = "0.2.5", default-features = false }
alloy-eips = { version = "1.0.37", default-features = false }
alloy-evm = { version = "0.21.0", default-features = false }
alloy-json-abi = { version = "1.3.1", default-features = false }
alloy-json-rpc = { version = "1.0.37", default-features = false }
alloy-dyn-abi = "1.3.1"
alloy-network = { version = "1.0.37", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] }
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
alloy-rpc-types = { version = "1.0.13", features = ["engine"] }
alloy-rpc-types-eth = "1.0.13"
alloy-rpc-types-engine = "1.0.13"
alloy-signer = "1.0.13"
alloy-sol-macro = "1.2.0"
alloy-sol-types = { version = "1.2.0", default-features = false }
alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.37", default-features = false }
alloy-rpc-types-engine = { version = "1.0.37", default-features = false }
alloy-signer = { version = "1.0.37", default-features = false }
alloy-sol-macro = "1.3.1"
alloy-sol-types = { version = "1.3.1", default-features = false }
jsonrpsee = "0.25.1"
jsonrpsee-core = { version = "0.25.1" }
jsonrpsee-types = "0.25.1"
jsonrpsee = "0.26.0"
jsonrpsee-core = "0.26.0"
jsonrpsee-types = "0.26.0"
# misc dependencies
auto_impl = "1"
async-trait = "0.1"
bytes = "1.5"
async-trait = "0.1.68"
bytes = { version = "1.5", default-features = false }
clap = { version = "4", features = ["derive"] }
cfg-if = { version = "1.0", default-features = false }
derive_more = { version = "2", default-features = false, features = ["full"] }
eyre = "0.6"
futures = "0.3"
lazy_static = "1.4.0"
once_cell = { version = "1.19", default-features = false, features = ["alloc"] }
once_cell = { version = "1.19", default-features = false, features = ["critical-section"] }
parking_lot = "0.12"
serde = { version = "1.0", features = ["derive"], default-features = false }
serde_json = "1.0"
serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
thiserror = { version = "2.0.0", default-features = false }
tokio = { version = "1.44.2", features = ["full"] }
tokio-stream = "0.1"
tracing = "0.1"
tokio-stream = "0.1.11"
tracing = { version = "0.1.0", default-features = false }
rmp-serde = "1.3"
lz4_flex = "0.11"
ureq = "3.0.12"
@ -164,5 +171,9 @@ client = [
"reth-rpc-eth-api/client",
]
[profile.test]
inherits = "release"
[dev-dependencies]
tempfile = "3.20.0"
[build-dependencies]
vergen = { version = "9.0.4", features = ["build", "cargo", "emit_and_set"] }
vergen-git2 = "1.0.5"

56
Dockerfile Normal file
View File

@ -0,0 +1,56 @@
# syntax=docker.io/docker/dockerfile:1.7-labs
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
WORKDIR /app
LABEL org.opencontainers.image.source=https://github.com/hl-archive-node/nanoreth
LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0"
# Install system dependencies
RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config
# Builds a cargo-chef plan
FROM chef AS planner
COPY --exclude=.git --exclude=dist . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
COPY --from=planner /app/recipe.json recipe.json
# Build profile, release by default
ARG BUILD_PROFILE=release
ENV BUILD_PROFILE=$BUILD_PROFILE
# Extra Cargo flags
ARG RUSTFLAGS=""
ENV RUSTFLAGS="$RUSTFLAGS"
# Extra Cargo features
ARG FEATURES=""
ENV FEATURES=$FEATURES
# Builds dependencies
RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json
# Build application
COPY --exclude=dist . .
RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --locked --bin reth-hl
# ARG is not resolved in COPY so we have to hack around it by copying the
# binary to a temporary location
RUN cp /app/target/$BUILD_PROFILE/reth-hl /app/reth-hl
# Use Ubuntu as the release image
FROM ubuntu AS runtime
WORKDIR /app
# Install root certificates for aws sdk to work
RUN apt-get update && apt-get install -y ca-certificates && update-ca-certificates
# Copy reth over from the build stage
COPY --from=builder /app/reth-hl /usr/local/bin
# Copy licenses
COPY LICENSE-* ./
EXPOSE 9001 8545 8546
ENTRYPOINT ["/usr/local/bin/reth-hl"]

View File

@ -1,6 +1,8 @@
# Modifed from reth Makefile
.DEFAULT_GOAL := help
GIT_SHA ?= $(shell git rev-parse HEAD)
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
BIN_DIR = "dist/bin"
# List of features to use when building. Can be overridden via the environment.
@ -17,6 +19,9 @@ PROFILE ?= release
# Extra flags for Cargo
CARGO_INSTALL_EXTRA_FLAGS ?=
# The docker image name
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
##@ Help
.PHONY: help
@ -207,3 +212,49 @@ check-features:
--package reth-primitives-traits \
--package reth-primitives \
--feature-powerset
##@ Docker
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-git-sha
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-latest
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
$(call docker_build_push,$(GIT_TAG),latest)
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --name cross-builder`
.PHONY: docker-build-push-nightly
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
$(call docker_build_push,nightly,nightly)
# Create a Docker image using the main Dockerfile
define docker_build_push
docker buildx build --file ./Dockerfile . \
--platform linux/amd64 \
--tag $(DOCKER_IMAGE_NAME):$(1) \
--tag $(DOCKER_IMAGE_NAME):$(2) \
--build-arg BUILD_PROFILE="$(PROFILE)" \
--build-arg FEATURES="jemalloc,asm-keccak" \
--provenance=false \
--push
endef

View File

@ -3,6 +3,8 @@
HyperEVM archive node implementation based on [reth](https://github.com/paradigmxyz/reth).
NodeBuilder API version is heavily inspired by [reth-bsc](https://github.com/loocapro/reth-bsc).
Got questions? Drop by the [Hyperliquid Discord](https://discord.gg/hyperliquid) #node-operators channel.
## ⚠️ IMPORTANT: System Transactions Appear as Pseudo Transactions
Deposit transactions from [System Addresses](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/hypercore-less-than-greater-than-hyperevm-transfers#system-addresses) like `0x222..22` / `0x200..xx` to user addresses are intentionally recorded as pseudo transactions.
@ -58,19 +60,19 @@ $ reth-hl node --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \
## How to run (testnet)
Testnet is supported since block 21304281.
Testnet is supported since block 30281484.
```sh
# Get testnet genesis at block 21304281
# Get testnet genesis at block 30281484
$ cd ~
$ git clone https://github.com/sprites0/hl-testnet-genesis
$ zstd --rm -d ~/hl-testnet-genesis/*.zst
# Init node
$ make install
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/21304281.rlp \
--header-hash 0x5b10856d2b1ad241c9bd6136bcc60ef7e8553560ca53995a590db65f809269b4 \
~/hl-testnet-genesis/21304281.jsonl --total-difficulty 0
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/30281484.rlp \
--header-hash 0x147cc3c09e9ddbb11799c826758db284f77099478ab5f528d3a57a6105516c21 \
~/hl-testnet-genesis/30281484.jsonl --total-difficulty 0
# Run node
$ reth-hl node --chain testnet --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \

91
build.rs Normal file
View File

@ -0,0 +1,91 @@
use std::{env, error::Error};
use vergen::{BuildBuilder, CargoBuilder, Emitter};
use vergen_git2::Git2Builder;
fn main() -> Result<(), Box<dyn Error>> {
let mut emitter = Emitter::default();
let build_builder = BuildBuilder::default().build_timestamp(true).build()?;
emitter.add_instructions(&build_builder)?;
let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?;
emitter.add_instructions(&cargo_builder)?;
let git_builder =
Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?;
emitter.add_instructions(&git_builder)?;
emitter.emit_and_set()?;
let sha = env::var("VERGEN_GIT_SHA")?;
let sha_short = &sha[0..7];
let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true";
// > git describe --always --tags
// if not on a tag: v0.2.0-beta.3-82-g1939939b
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_HL_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);
// Set the build profile
let out_dir = env::var("OUT_DIR").unwrap();
let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap();
println!("cargo:rustc-env=RETH_HL_BUILD_PROFILE={profile}");
// Set formatted version strings
let pkg_version = env!("CARGO_PKG_VERSION");
// The short version information for reth.
// - The latest version from Cargo.toml
// - The short SHA of the latest commit.
// Example: 0.1.0 (defa64b2)
println!("cargo:rustc-env=RETH_HL_SHORT_VERSION={pkg_version}{version_suffix} ({sha_short})");
// LONG_VERSION
// The long version information for reth.
//
// - The latest version from Cargo.toml + version suffix (if any)
// - The full SHA of the latest commit
// - The build datetime
// - The build features
// - The build profile
//
// Example:
//
// ```text
// Version: 0.1.0
// Commit SHA: defa64b2
// Build Timestamp: 2023-05-19T01:47:19.815651705Z
// Build Features: jemalloc
// Build Profile: maxperf
// ```
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_0=Version: {pkg_version}{version_suffix}");
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_1=Commit SHA: {sha}");
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_2=Build Timestamp: {}",
env::var("VERGEN_BUILD_TIMESTAMP")?
);
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_3=Build Features: {}",
env::var("VERGEN_CARGO_FEATURES")?
);
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_4=Build Profile: {profile}");
// The version information for reth formatted for P2P (devp2p).
// - The latest version from Cargo.toml
// - The target triple
//
// Example: reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin
println!(
"cargo:rustc-env=RETH_HL_P2P_CLIENT_VERSION={}",
format_args!("reth/v{pkg_version}-{sha_short}/{}", env::var("VERGEN_CARGO_TARGET_TRIPLE")?)
);
Ok(())
}

View File

@ -1,27 +1,28 @@
use alloy_eips::BlockId;
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Bytes, U256};
use alloy_rpc_types_eth::{
state::{EvmOverrides, StateOverride},
transaction::TransactionRequest,
BlockOverrides,
state::{EvmOverrides, StateOverride},
};
use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc,
rpc_params,
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
types::{ErrorObject, error::INTERNAL_ERROR_CODE},
};
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
use reth_rpc_eth_api::helpers::EthCall;
use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth_rpc::eth::EthApiTypes;
use reth_rpc_eth_api::{RpcTxReq, helpers::EthCall};
#[rpc(server, namespace = "eth")]
pub(crate) trait CallForwarderApi {
pub(crate) trait CallForwarderApi<TxReq: RpcObject> {
/// Executes a new message call immediately without creating a transaction on the block chain.
#[method(name = "call")]
async fn call(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
request: TxReq,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes>;
@ -31,8 +32,8 @@ pub(crate) trait CallForwarderApi {
#[method(name = "estimateGas")]
async fn estimate_gas(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
request: TxReq,
block_id: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256>;
}
@ -52,23 +53,24 @@ impl<EthApi> CallForwarderExt<EthApi> {
}
#[async_trait]
impl<EthApi> CallForwarderApiServer for CallForwarderExt<EthApi>
impl<EthApi> CallForwarderApiServer<RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>>
for CallForwarderExt<EthApi>
where
EthApi: EthCall + Send + Sync + 'static,
{
async fn call(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
request: RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes> {
let is_latest = block_number.as_ref().map(|b| b.is_latest()).unwrap_or(true);
let is_latest = block_id.as_ref().map(|b| b.is_latest()).unwrap_or(true);
let result = if is_latest {
self.upstream_client
.request(
"eth_call",
rpc_params![request, block_number, state_overrides, block_overrides],
rpc_params![request, block_id, state_overrides, block_overrides],
)
.await
.map_err(|e| match e {
@ -83,7 +85,7 @@ where
EthCall::call(
&self.eth_api,
request,
block_number,
block_id,
EvmOverrides::new(state_overrides, block_overrides),
)
.await
@ -97,14 +99,14 @@ where
async fn estimate_gas(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
request: RpcTxReq<<EthApi as EthApiTypes>::NetworkTypes>,
block_id: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256> {
let is_latest = block_number.as_ref().map(|b| b.is_latest()).unwrap_or(true);
let is_latest = block_id.as_ref().map(|b| b.is_latest()).unwrap_or(true);
let result = if is_latest {
self.upstream_client
.request("eth_estimateGas", rpc_params![request, block_number, state_override])
.request("eth_estimateGas", rpc_params![request, block_id, state_override])
.await
.map_err(|e| match e {
ClientError::Call(e) => e,
@ -118,7 +120,7 @@ where
EthCall::estimate_gas_at(
&self.eth_api,
request,
block_number.unwrap_or_default(),
block_id.unwrap_or_default(),
state_override,
)
.await

View File

@ -0,0 +1,700 @@
//! Overrides for RPC methods to post-filter system transactions and logs.
//!
//! System transactions are always at the beginning of the block,
//! so we can use the transaction index to determine if the log is from a system transaction,
//! and if it is, we can exclude it.
//!
//! For non-system transactions, we can just return the log as is, and the client will
//! adjust the transaction index accordingly.
use alloy_consensus::{
BlockHeader, TxReceipt,
transaction::{TransactionMeta, TxHashRef},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{B256, U256};
use alloy_rpc_types::{
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
TransactionInfo,
pubsub::{Params, SubscriptionKind},
};
use jsonrpsee::{PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink, proc_macros::rpc};
use jsonrpsee_core::{RpcResult, async_trait};
use jsonrpsee_types::{ErrorObject, error::INTERNAL_ERROR_CODE};
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
use reth_primitives_traits::SignedTransaction;
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
use reth_rpc::{EthFilter, EthPubSub, RpcTypes, eth::pubsub::SubscriptionSerializeError};
use reth_rpc_eth_api::{
EthApiServer, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock,
RpcConvert, RpcHeader, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
helpers::{EthBlocks, EthTransactions, LoadReceipt},
transaction::ConvertReceiptInput,
};
use reth_rpc_eth_types::EthApiError;
use serde::Serialize;
use std::{marker::PhantomData, sync::Arc};
use tokio_stream::{Stream, StreamExt};
use tracing::{Instrument, trace};
use crate::{HlBlock, node::primitives::HlPrimitives};
pub trait EthWrapper:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
impl<T> EthWrapper for T where
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
#[method(name = "getEvmSystemTxsByBlockHash")]
async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsByBlockNumber")]
async fn get_evm_system_txs_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<T>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<R>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<R>>>;
}
pub struct HlSystemTransactionExt<Eth: EthWrapper> {
eth_api: Eth,
_marker: PhantomData<Eth>,
}
impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
pub fn new(eth_api: Eth) -> Self {
Self { eth_api, _marker: PhantomData }
}
async fn get_system_txs_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
let system_txs = block
.transactions_with_sender()
.enumerate()
.filter_map(|(index, (signer, tx))| {
if tx.is_system_transaction() {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
self.eth_api
.tx_resp_builder()
.fill(tx.clone().with_signer(*signer), tx_info)
.ok()
} else {
None
}
})
.collect();
Ok(Some(system_txs))
} else {
Ok(None)
}
}
async fn get_system_txs_receipts_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some((block, receipts)) =
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
{
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas;
let timestamp = block.timestamp;
let mut gas_used = 0;
let mut next_log_index = 0;
let mut inputs = Vec::new();
for (idx, (tx, receipt)) in
block.transactions_recovered().zip(receipts.iter()).enumerate()
{
if receipt.cumulative_gas_used() != 0 {
break;
}
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: receipt.clone(),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
inputs.push(input);
}
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
Ok(Some(receipts))
} else {
Ok(None)
}
}
}
#[async_trait]
impl<Eth: EthWrapper>
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
for HlSystemTransactionExt<Eth>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
/// Returns the system transactions for a given block hash.
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
Ok(txs) => Ok(txs),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the system transactions for a given block number, or the latest block if no block
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_number(
&self,
id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
Some(txs) => Ok(Some(txs)),
None => {
// hl-node returns an error if the block is not found
Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {id:?}"),
Some(()),
))
}
}
}
/// Returns the receipts for the system transactions for a given block hash.
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
Ok(receipts) => Ok(receipts),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the receipts for the system transactions for a given block number, or the latest
/// block if no block
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
Some(receipts) => Ok(Some(receipts)),
None => Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {block_id:?}"),
Some(()),
)),
}
}
}
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
filter: Arc<EthFilter<Eth>>,
provider: Arc<Eth::Provider>,
}
impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
pub fn new(filter: Arc<EthFilter<Eth>>, provider: Arc<Eth::Provider>) -> Self {
Self { filter, provider }
}
}
#[async_trait]
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterHttp<Eth>
{
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newFilter");
self.filter.new_filter(filter).await
}
async fn new_block_filter(&self) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
self.filter.new_block_filter().await
}
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newPendingTransactionFilter");
self.filter.new_pending_transaction_filter(kind).await
}
async fn filter_changes(
&self,
id: FilterId,
) -> RpcResult<FilterChanges<RpcTransaction<Eth::NetworkTypes>>> {
trace!(target: "rpc::eth", "Serving eth_getFilterChanges");
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
}
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
}
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
self.filter.uninstall_filter(id).await
}
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getLogs");
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &self.provider)).collect())
}
}
pub struct HlNodeFilterWs<Eth: EthWrapper> {
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
}
impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
pub fn new(
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
) -> Self {
Self { pubsub, provider, subscription_task_spawner }
}
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
where
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::Logs {
let filter = match params {
Some(Params::Logs(f)) => *f,
Some(Params::Bool(_)) => return,
_ => Default::default(),
};
let _ = pipe_from_stream(
sink,
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
)
.await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
}
}));
Ok(())
}
}
fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
let (tx_idx, log_idx) = (log.transaction_index?, log.log_index?);
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
let (mut sys_tx_count, mut sys_log_count) = (0u64, 0u64);
for receipt in receipts {
if receipt.cumulative_gas_used() == 0 {
sys_tx_count += 1;
sys_log_count += receipt.logs().len() as u64;
}
}
if sys_tx_count > tx_idx {
return None;
}
log.transaction_index = Some(tx_idx - sys_tx_count);
log.log_index = Some(log_idx - sys_log_count);
Some(log)
}
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => break Ok(()),
maybe_item = stream.next() => {
let Some(item) = maybe_item else { break Ok(()) };
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(SubscriptionSerializeError::from)?;
if sink.send(msg).await.is_err() { break Ok(()); }
}
}
}
}
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
eth_api: Arc<Eth>,
_marker: PhantomData<Eth>,
}
impl<Eth: EthWrapper> HlNodeBlockFilterHttp<Eth> {
pub fn new(eth_api: Arc<Eth>) -> Self {
Self { eth_api, _marker: PhantomData }
}
}
#[rpc(server, namespace = "eth")]
pub trait EthBlockApi<B: RpcObject, R: RpcObject> {
/// Returns information about a block by hash.
#[method(name = "getBlockByHash")]
async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult<Option<B>>;
/// Returns information about a block by number.
#[method(name = "getBlockByNumber")]
async fn block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult<Option<B>>;
/// Returns all transaction receipts for a given block.
#[method(name = "getBlockReceipts")]
async fn block_receipts(&self, block_id: BlockId) -> RpcResult<Option<Vec<R>>>;
#[method(name = "getBlockTransactionCountByHash")]
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>>;
#[method(name = "getBlockTransactionCountByNumber")]
async fn block_transaction_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>>;
#[method(name = "getTransactionReceipt")]
async fn transaction_receipt(&self, hash: B256) -> RpcResult<Option<R>>;
}
macro_rules! engine_span {
() => {
tracing::trace_span!(target: "rpc", "engine")
};
}
fn adjust_block<Eth: EthWrapper>(
recovered_block: &RpcBlock<Eth::NetworkTypes>,
eth_api: &Eth,
) -> RpcBlock<Eth::NetworkTypes> {
let system_tx_count = system_tx_count_for_block(eth_api, recovered_block.number().into());
let mut new_block = recovered_block.clone();
new_block.transactions = match new_block.transactions {
BlockTransactions::Full(mut transactions) => {
transactions.drain(..system_tx_count);
transactions.iter_mut().for_each(|tx| {
if let Some(idx) = &mut tx.transaction_index {
*idx -= system_tx_count as u64;
}
});
BlockTransactions::Full(transactions)
}
BlockTransactions::Hashes(mut hashes) => {
hashes.drain(..system_tx_count);
BlockTransactions::Hashes(hashes)
}
BlockTransactions::Uncle => BlockTransactions::Uncle,
};
new_block
}
async fn adjust_block_receipts<Eth: EthWrapper>(
block_id: BlockId,
eth_api: &Eth,
) -> Result<Option<(usize, Vec<RpcReceipt<Eth::NetworkTypes>>)>, Eth::Error> {
// Modified from EthBlocks::block_receipt. See `NOTE` comment below.
let system_tx_count = system_tx_count_for_block(eth_api, block_id);
if let Some((block, receipts)) = EthBlocks::load_block_and_receipts(eth_api, block_id).await? {
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas;
let timestamp = block.timestamp;
let mut gas_used = 0;
let mut next_log_index = 0;
let inputs = block
.transactions_recovered()
.zip(receipts.iter())
.enumerate()
.filter_map(|(idx, (tx, receipt))| {
if receipt.cumulative_gas_used() == 0 {
// NOTE: modified to exclude system tx
return None;
}
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: (idx - system_tx_count) as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: receipt.clone(),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
Some(input)
})
.collect::<Vec<_>>();
return eth_api
.tx_resp_builder()
.convert_receipts(inputs)
.map(|receipts| Some((system_tx_count, receipts)));
}
Ok(None)
}
async fn adjust_transaction_receipt<Eth: EthWrapper>(
tx_hash: B256,
eth_api: &Eth,
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>, Eth::Error> {
match eth_api.load_transaction_and_receipt(tx_hash).await? {
Some((_, meta, _)) => {
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
// doesn't hurt performance much
let Some((system_tx_count, block_receipts)) =
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
else {
unreachable!();
};
Ok(Some(block_receipts.into_iter().nth(meta.index as usize - system_tx_count).unwrap()))
}
None => Ok(None),
}
}
// This function assumes that `block_id` is already validated by the caller.
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
let provider = eth_api.provider();
let header = provider.header_by_id(block_id).unwrap().unwrap();
header.extras.system_tx_count.try_into().unwrap()
}
#[async_trait]
impl<Eth: EthWrapper> EthBlockApiServer<RpcBlock<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
for HlNodeBlockFilterHttp<Eth>
where
Eth: EthApiTypes + 'static,
ErrorObject<'static>: From<Eth::Error>,
{
/// Handler for: `eth_getBlockByHash`
async fn block_by_hash(
&self,
hash: B256,
full: bool,
) -> RpcResult<Option<RpcBlock<Eth::NetworkTypes>>> {
let res = self.eth_api.block_by_hash(hash, full).instrument(engine_span!()).await?;
Ok(res.map(|block| adjust_block(&block, &*self.eth_api)))
}
/// Handler for: `eth_getBlockByNumber`
async fn block_by_number(
&self,
number: BlockNumberOrTag,
full: bool,
) -> RpcResult<Option<RpcBlock<Eth::NetworkTypes>>> {
trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber");
let res = self.eth_api.block_by_number(number, full).instrument(engine_span!()).await?;
Ok(res.map(|block| adjust_block(&block, &*self.eth_api)))
}
/// Handler for: `eth_getBlockTransactionCountByHash`
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash");
let res =
self.eth_api.block_transaction_count_by_hash(hash).instrument(engine_span!()).await?;
Ok(res.map(|count| {
let sys_tx_count =
system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into()));
count - U256::from(sys_tx_count)
}))
}
/// Handler for: `eth_getBlockTransactionCountByNumber`
async fn block_transaction_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber");
let res = self
.eth_api
.block_transaction_count_by_number(number)
.instrument(engine_span!())
.await?;
Ok(res.map(|count| {
count - U256::from(system_tx_count_for_block(&*self.eth_api, number.into()))
}))
}
async fn transaction_receipt(
&self,
hash: B256,
) -> RpcResult<Option<RpcReceipt<Eth::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt");
let eth_api = &*self.eth_api;
Ok(adjust_transaction_receipt(hash, eth_api).instrument(engine_span!()).await?)
}
/// Handler for: `eth_getBlockReceipts`
async fn block_receipts(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts");
if self.eth_api.provider().block_by_id(block_id).map_err(EthApiError::from)?.is_none() {
return Ok(None);
}
let result =
adjust_block_receipts(block_id, &*self.eth_api).instrument(engine_span!()).await?;
Ok(result.map(|(_, receipts)| receipts))
}
}
pub fn install_hl_node_compliance<Node, EthApi>(
ctx: &mut RpcContext<Node, EthApi>,
) -> Result<(), eyre::Error>
where
Node: FullNodeComponents,
Node::Provider: BlockIdReader + BlockReader<Block = crate::HlBlock>,
EthApi: EthWrapper,
ErrorObject<'static>: From<EthApi::Error>,
{
ctx.modules.replace_configured(
HlNodeFilterHttp::new(
Arc::new(ctx.registry.eth_handlers().filter.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
)
.into_rpc(),
)?;
ctx.modules.replace_configured(
HlNodeFilterWs::new(
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
Box::new(ctx.node().task_executor().clone()),
)
.into_rpc(),
)?;
ctx.modules.replace_configured(
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
)?;
ctx.modules
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
Ok(())
}

3
src/addons/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod call_forwarder;
pub mod hl_node_compliance;
pub mod tx_forwarder;

View File

@ -2,14 +2,14 @@ use std::time::Duration;
use alloy_json_rpc::RpcObject;
use alloy_network::Ethereum;
use alloy_primitives::{Bytes, B256};
use alloy_primitives::{B256, Bytes};
use alloy_rpc_types::TransactionRequest;
use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc,
types::{error::INTERNAL_ERROR_CODE, ErrorObject},
types::{ErrorObject, error::INTERNAL_ERROR_CODE},
};
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult};
use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth::rpc::{result::internal_rpc_err, server_types::eth::EthApiError};
use reth_rpc_eth_api::RpcReceipt;
@ -37,7 +37,7 @@ impl EthForwarderExt {
Self { client }
}
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject {
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject<'static> {
match e {
ClientError::Call(e) => e,
_ => ErrorObject::owned(

View File

@ -1,5 +1,5 @@
use alloy_chains::{Chain, NamedChain};
use alloy_primitives::{b256, Address, Bytes, B256, B64, U256};
use alloy_primitives::{Address, B64, B256, Bytes, U256, b256};
use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, Hardfork};
use reth_primitives::{Header, SealedHeader};
use std::sync::LazyLock;
@ -7,7 +7,6 @@ use std::sync::LazyLock;
static GENESIS_HASH: B256 =
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
/// Dev hardforks
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
@ -34,12 +33,10 @@ pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
])
});
/// The Hyperliqiud Mainnet spec
pub fn hl_mainnet() -> ChainSpec {
pub fn hl_chainspec(chain: Chain, genesis: &'static str) -> ChainSpec {
ChainSpec {
chain: Chain::from_named(NamedChain::Hyperliquid),
genesis: serde_json::from_str(include_str!("genesis.json"))
.expect("Can't deserialize Hyperliquid Mainnet genesis json"),
chain,
genesis: serde_json::from_str(genesis).expect("Can't deserialize Hyperliquid genesis json"),
genesis_header: empty_genesis_header(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks: HL_HARDFORKS.clone(),
@ -48,6 +45,18 @@ pub fn hl_mainnet() -> ChainSpec {
}
}
/// The Hyperliqiud Mainnet spec
pub fn hl_mainnet() -> ChainSpec {
hl_chainspec(Chain::from_named(NamedChain::Hyperliquid), include_str!("genesis.json"))
}
/// The Hyperliqiud Testnet spec
pub fn hl_testnet() -> ChainSpec {
// Note: Testnet sync starts from snapshotted state [1] instead of genesis block.
// So the `alloc` field is not used, which makes it fine to reuse mainnet genesis file.
hl_chainspec(Chain::from_id_unchecked(998), include_str!("genesis.json"))
}
/// Empty genesis header for Hyperliquid Mainnet.
///
/// The exact value is not known per se, but the parent hash of block 1 is known to be

View File

@ -1,9 +1,7 @@
//! Chain specification for HyperEVM.
pub mod hl;
pub mod parser;
use crate::hardforks::{hl::HlHardfork, HlHardforks};
use alloy_consensus::Header;
use crate::{hardforks::HlHardforks, node::primitives::{header::HlHeaderExtras, HlHeader}};
use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256, U256};
@ -13,17 +11,19 @@ use reth_chainspec::{
};
use reth_discv4::NodeRecord;
use reth_evm::eth::spec::EthExecutorSpec;
use std::{fmt::Display, sync::Arc};
use std::fmt::Display;
pub const MAINNET_CHAIN_ID: u64 = 999;
pub const TESTNET_CHAIN_ID: u64 = 998;
/// Hl chain spec type.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct HlChainSpec {
/// [`ChainSpec`].
pub inner: ChainSpec,
pub genesis_header: HlHeader,
}
impl EthChainSpec for HlChainSpec {
type Header = Header;
type Header = HlHeader;
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> {
self.inner.blob_params_at_timestamp(timestamp)
@ -37,10 +37,6 @@ impl EthChainSpec for HlChainSpec {
self.inner.chain()
}
fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_block(block_number)
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp)
}
@ -61,8 +57,8 @@ impl EthChainSpec for HlChainSpec {
Box::new(self.inner.display_hardforks())
}
fn genesis_header(&self) -> &Header {
self.inner.genesis_header()
fn genesis_header(&self) -> &HlHeader {
&self.genesis_header
}
fn genesis(&self) -> &Genesis {
@ -72,10 +68,6 @@ impl EthChainSpec for HlChainSpec {
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.inner.bootnodes()
}
fn is_optimism(&self) -> bool {
false
}
}
impl Hardforks for HlChainSpec {
@ -102,23 +94,13 @@ impl Hardforks for HlChainSpec {
}
}
impl From<ChainSpec> for HlChainSpec {
fn from(value: ChainSpec) -> Self {
Self { inner: value }
}
}
impl EthereumHardforks for HlChainSpec {
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
self.inner.ethereum_fork_activation(fork)
}
}
impl HlHardforks for HlChainSpec {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl HlHardforks for HlChainSpec {}
impl EthExecutorSpec for HlChainSpec {
fn deposit_contract_address(&self) -> Option<Address> {
@ -126,27 +108,29 @@ impl EthExecutorSpec for HlChainSpec {
}
}
impl From<HlChainSpec> for ChainSpec {
fn from(value: HlChainSpec) -> Self {
value.inner
}
}
impl HlHardforks for Arc<HlChainSpec> {
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
self.as_ref().hl_fork_activation(fork)
}
}
impl HlChainSpec {
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
pub fn official_rpc_url(&self) -> &'static str {
match self.inner.chain().id() {
999 => Self::MAINNET_RPC_URL,
998 => Self::TESTNET_RPC_URL,
MAINNET_CHAIN_ID => Self::MAINNET_RPC_URL,
TESTNET_CHAIN_ID => Self::TESTNET_RPC_URL,
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
}
}
pub fn official_s3_bucket(self) -> &'static str {
match self.inner.chain().id() {
MAINNET_CHAIN_ID => "hl-mainnet-evm-blocks",
TESTNET_CHAIN_ID => "hl-testnet-evm-blocks",
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
}
}
fn new(inner: ChainSpec) -> Self {
let genesis_header =
HlHeader { inner: inner.genesis_header().clone(), extras: HlHeaderExtras::default() };
Self { inner, genesis_header }
}
}

View File

@ -1,11 +1,11 @@
use crate::chainspec::HlChainSpec;
use crate::chainspec::{HlChainSpec, hl::hl_testnet};
use super::hl::hl_mainnet;
use reth_cli::chainspec::ChainSpecParser;
use std::sync::Arc;
/// Chains supported by HyperEVM. First value should be used as the default.
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet"];
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "testnet"];
/// Hyperliquid chain specification parser.
#[derive(Debug, Clone, Default)]
@ -26,7 +26,8 @@ impl ChainSpecParser for HlChainSpecParser {
/// Currently only mainnet is supported.
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> {
match s {
"mainnet" => Ok(Arc::new(HlChainSpec { inner: hl_mainnet() })),
"mainnet" => Ok(Arc::new(HlChainSpec::new(hl_mainnet()))),
"testnet" => Ok(Arc::new(HlChainSpec::new(hl_testnet()))),
_ => Err(eyre::eyre!("Unsupported chain: {}", s)),
}
}

View File

@ -1,4 +1,4 @@
use alloy_primitives::{BlockNumber, B256};
use alloy_primitives::{B256, BlockNumber};
use reth_provider::{BlockNumReader, ProviderError};
use std::cmp::Ordering;

View File

@ -2,8 +2,8 @@ use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use reth_revm::context::ContextTr;
use revm::{
context::Cfg, context_interface::Block, handler::instructions::EthInstructions,
interpreter::interpreter::EthInterpreter, Context, Database,
Context, Database, context::Cfg, context_interface::Block,
handler::instructions::EthInstructions, interpreter::interpreter::EthInterpreter,
};
/// Trait that allows for hl HlEvm to be built.

View File

@ -1,8 +1,8 @@
use crate::evm::{spec::HlSpecId, transaction::HlTxEnv};
use revm::{
Context, Journal, MainContext,
context::{BlockEnv, CfgEnv, TxEnv},
database_interface::EmptyDB,
Context, Journal, MainContext,
};
/// Type alias for the default context type of the HlEvm.

View File

@ -1,16 +1,16 @@
use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use revm::{
context::{result::HaltReason, ContextSetters},
context_interface::{
result::{EVMError, ExecutionResult, ResultAndState},
Cfg, ContextTr, Database, JournalTr,
},
handler::{instructions::EthInstructions, PrecompileProvider},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
state::EvmState,
DatabaseCommit, ExecuteCommitEvm, ExecuteEvm,
context::{ContextSetters, result::HaltReason},
context_interface::{
Cfg, ContextTr, Database, JournalTr,
result::{EVMError, ExecutionResult, ResultAndState},
},
handler::{PrecompileProvider, instructions::EthInstructions},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
};
// Type alias for HL context

View File

@ -1,19 +1,23 @@
use revm::{
Inspector,
bytecode::opcode::BLOCKHASH,
context::{ContextSetters, Evm, FrameStack},
context_interface::ContextTr,
handler::{
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
evm::{ContextDbError, FrameInitResult},
instructions::{EthInstructions, InstructionProvider},
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
},
inspector::{InspectorEvmTr, JournalExt},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
Inspector,
interpreter::{Instruction, InterpreterResult, interpreter::EthInterpreter},
};
use crate::chainspec::MAINNET_CHAIN_ID;
pub mod builder;
pub mod ctx;
mod exec;
mod patch;
pub struct HlEvmInner<
CTX: ContextTr,
@ -26,10 +30,22 @@ impl<CTX: ContextTr, INSP>
HlEvmInner<CTX, INSP, EthInstructions<EthInterpreter, CTX>, EthPrecompiles>
{
pub fn new(ctx: CTX, inspector: INSP) -> Self {
let mut instruction = EthInstructions::new_mainnet();
const NON_PLACEHOLDER_BLOCK_HASH_HEIGHT: u64 = 243_538;
if ctx.chain_id() == MAINNET_CHAIN_ID &&
ctx.block_number() < NON_PLACEHOLDER_BLOCK_HASH_HEIGHT
{
instruction.insert_instruction(
BLOCKHASH,
Instruction::new(patch::blockhash_returning_placeholder, 20),
);
}
Self(Evm {
ctx,
inspector,
instruction: EthInstructions::new_mainnet(),
instruction,
precompiles: EthPrecompiles::default(),
frame_stack: FrameStack::new(),
})
@ -125,23 +141,3 @@ where
self.0.frame_return_result(result)
}
}
// #[cfg(test)]
// mod test {
// use super::{builder::HlBuilder, ctx::DefaultHl};
// use revm::{
// inspector::{InspectEvm, NoOpInspector},
// Context, ExecuteEvm,
// };
// #[test]
// fn default_run_bsc() {
// let ctx = Context::bsc();
// let mut evm = ctx.build_bsc_with_inspector(NoOpInspector {});
// // execute
// let _ = evm.replay();
// // inspect
// let _ = evm.inspect_replay();
// }
// }

49
src/evm/api/patch.rs Normal file
View File

@ -0,0 +1,49 @@
//! Modified version of `blockhash` instruction before block `243538`.
//!
//! This is a mainnet-specific fix for the `blockhash` instruction,
//! copied and modified from revm-interpreter-25.0.1/src/instructions/host.rs.
use alloy_primitives::keccak256;
use revm::{
context::Host,
interpreter::{
_count, InstructionContext, InterpreterTypes, as_u64_saturated, interpreter_types::StackTr,
popn_top,
},
primitives::{BLOCK_HASH_HISTORY, U256},
};
/// Implements the BLOCKHASH instruction.
///
/// Gets the hash of one of the 256 most recent complete blocks.
pub fn blockhash_returning_placeholder<WIRE: InterpreterTypes, H: Host + ?Sized>(
context: InstructionContext<'_, H, WIRE>,
) {
//gas!(context.interpreter, gas::BLOCKHASH);
popn_top!([], number, context.interpreter);
let requested_number = *number;
let block_number = context.host.block_number();
let Some(diff) = block_number.checked_sub(requested_number) else {
*number = U256::ZERO;
return;
};
let diff = as_u64_saturated!(diff);
// blockhash should push zero if number is same as current block number.
if diff == 0 {
*number = U256::ZERO;
return;
}
*number = if diff <= BLOCK_HASH_HISTORY {
// NOTE: This is HL-specific modifcation that returns the placeholder hash before specific
// block.
let hash = keccak256(as_u64_saturated!(requested_number).to_string().as_bytes());
U256::from_be_bytes(hash.0)
} else {
U256::ZERO
}
}

View File

@ -1 +0,0 @@

View File

@ -1,4 +1,3 @@
pub mod api;
mod handler;
pub mod spec;
pub mod transaction;

View File

@ -1,22 +1,15 @@
use revm::primitives::hardfork::SpecId;
use std::str::FromStr;
#[repr(u8)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[allow(non_camel_case_types)]
#[allow(clippy::upper_case_acronyms)]
pub enum HlSpecId {
/// Placeholder for evm cancun fork
#[default]
V1, // V1
V1,
}
impl HlSpecId {
pub const fn is_enabled_in(self, other: HlSpecId) -> bool {
other as u8 <= self as u8
}
/// Converts the [`HlSpecId`] into a [`SpecId`].
pub const fn into_eth_spec(self) -> SpecId {
match self {
Self::V1 => SpecId::CANCUN,
@ -25,31 +18,8 @@ impl HlSpecId {
}
impl From<HlSpecId> for SpecId {
/// Converts the [`HlSpecId`] into a [`SpecId`].
fn from(spec: HlSpecId) -> Self {
spec.into_eth_spec()
}
}
/// String identifiers for HL hardforks
pub mod name {
pub const V1: &str = "V1";
}
impl FromStr for HlSpecId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
name::V1 => Self::V1,
_ => return Err(format!("Unknown HL spec: {s}")),
})
}
}
impl From<HlSpecId> for &'static str {
fn from(spec_id: HlSpecId) -> Self {
match spec_id {
HlSpecId::V1 => name::V1,
}
}
}

View File

@ -7,13 +7,13 @@ use reth_primitives_traits::SignerRecoverable;
use revm::{
context::TxEnv,
context_interface::transaction::Transaction,
primitives::{Address, Bytes, TxKind, B256, U256},
primitives::{Address, B256, Bytes, TxKind, U256},
};
#[auto_impl(&, &mut, Box, Arc)]
pub trait HlTxTr: Transaction {}
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct HlTxEnv<T: Transaction> {
pub base: T,
@ -25,12 +25,6 @@ impl<T: Transaction> HlTxEnv<T> {
}
}
impl Default for HlTxEnv<TxEnv> {
fn default() -> Self {
Self { base: TxEnv::default() }
}
}
impl<T: Transaction> Transaction for HlTxEnv<T> {
type AccessListItem<'a>
= T::AccessListItem<'a>
@ -130,12 +124,13 @@ impl FromRecoveredTx<TransactionSigned> for HlTxEnv<TxEnv> {
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
use reth_primitives::Transaction;
let base = match tx.clone().into_inner().into_typed_transaction() {
reth_primitives::Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
reth_primitives::Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
};
Self { base }

View File

@ -2,7 +2,7 @@
use alloy_chains::{Chain, NamedChain};
use core::any::Any;
use reth_chainspec::ForkCondition;
use reth_ethereum_forks::{hardfork, ChainHardforks, EthereumHardfork, Hardfork};
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, Hardfork, hardfork};
hardfork!(
/// The name of a hl hardfork.
@ -13,88 +13,5 @@ hardfork!(
HlHardfork {
/// Initial version
V1,
/// block.number bugfix
V2,
/// gas mismatch bugfix
V3,
}
);
impl HlHardfork {
/// Retrieves the activation block for the specified hardfork on the given chain.
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
if chain == Chain::from_named(NamedChain::Hyperliquid) {
return Self::hl_mainnet_activation_block(fork);
}
None
}
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
None
}
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
match_hardfork(
fork,
|fork| match fork {
EthereumHardfork::Frontier |
EthereumHardfork::Homestead |
EthereumHardfork::Tangerine |
EthereumHardfork::SpuriousDragon |
EthereumHardfork::Byzantium |
EthereumHardfork::Constantinople |
EthereumHardfork::Petersburg |
EthereumHardfork::Istanbul |
EthereumHardfork::MuirGlacier |
EthereumHardfork::Berlin |
EthereumHardfork::London |
EthereumHardfork::Shanghai |
EthereumHardfork::Cancun => Some(0),
_ => None,
},
|fork| match fork {
Self::V1 | Self::V2 | Self::V3 => Some(0),
_ => None,
},
)
}
/// Hl mainnet list of hardforks.
pub fn hl_mainnet() -> ChainHardforks {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
(Self::V1.boxed(), ForkCondition::Block(0)),
(Self::V2.boxed(), ForkCondition::Block(0)),
(Self::V3.boxed(), ForkCondition::Block(0)),
])
}
}
/// Match helper method since it's not possible to match on `dyn Hardfork`
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
where
H: Hardfork,
HF: Fn(&EthereumHardfork) -> Option<u64>,
HHF: Fn(&HlHardfork) -> Option<u64>,
{
let fork: &dyn Any = &fork;
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
return hardfork_fn(fork);
}
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
}

View File

@ -1,13 +1,14 @@
//! Hard forks of hl protocol.
//! Hard forks of HyperEVM.
#![allow(unused)]
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
pub mod hl;
use hl::HlHardfork;
use reth_chainspec::{EthereumHardforks, ForkCondition};
use std::sync::Arc;
/// Extends [`EthereumHardforks`] with hl helper methods.
pub trait HlHardforks: EthereumHardforks {
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
/// [`ForkCondition::Never`].
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
}
///
/// Currently a placeholder for future use.
pub trait HlHardforks: EthereumHardforks {}
impl<T: HlHardforks> HlHardforks for Arc<T> {}

View File

@ -1,310 +0,0 @@
/// We need to override the following methods:
/// Filter:
/// - eth_getLogs
/// - eth_subscribe
///
/// Block (handled by HlEthApi already):
/// - eth_getBlockByNumber/eth_getBlockByHash
/// - eth_getBlockReceipts
use crate::HlBlock;
use alloy_consensus::TxReceipt;
use alloy_rpc_types::{
pubsub::{Params, SubscriptionKind},
Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
};
use jsonrpsee::{PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
use jsonrpsee_core::{async_trait, RpcResult};
use jsonrpsee_types::ErrorObject;
use reth::{
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
tasks::TaskSpawner,
};
use reth_network::NetworkInfo;
use reth_primitives::NodePrimitives;
use reth_provider::{BlockIdReader, BlockReader, ReceiptProvider, TransactionsProvider};
use reth_rpc::{EthFilter, EthPubSub};
use reth_rpc_eth_api::{
EthApiServer, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock, RpcHeader,
RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
};
use serde::Serialize;
use std::sync::Arc;
use tokio_stream::{Stream, StreamExt};
use tracing::trace;
pub trait EthWrapper:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes
+ RpcNodeCoreExt<
Provider: BlockIdReader + BlockReader<Block = HlBlock>,
Primitives: NodePrimitives<
SignedTx = <<Self as RpcNodeCore>::Provider as TransactionsProvider>::Transaction,
>,
Network: NetworkInfo,
> + 'static
{
}
impl <
T:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes
+ RpcNodeCoreExt<
Provider: BlockIdReader + BlockReader<Block = HlBlock>,
Primitives: NodePrimitives<
SignedTx = <<Self as RpcNodeCore>::Provider as TransactionsProvider>::Transaction,
>,
Network: NetworkInfo,
> + 'static
> EthWrapper for T {
}
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
filter: Arc<EthFilter<Eth>>,
provider: Arc<Eth::Provider>,
}
impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
pub fn new(filter: Arc<EthFilter<Eth>>, provider: Arc<Eth::Provider>) -> Self {
Self { filter, provider }
}
}
#[async_trait]
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterHttp<Eth>
{
/// Handler for `eth_newFilter`
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newFilter");
self.filter.new_filter(filter).await
}
/// Handler for `eth_newBlockFilter`
async fn new_block_filter(&self) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
self.filter.new_block_filter().await
}
/// Handler for `eth_newPendingTransactionFilter`
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newPendingTransactionFilter");
self.filter.new_pending_transaction_filter(kind).await
}
/// Handler for `eth_getFilterChanges`
async fn filter_changes(
&self,
id: FilterId,
) -> RpcResult<FilterChanges<RpcTransaction<Eth::NetworkTypes>>> {
trace!(target: "rpc::eth", "Serving eth_getFilterChanges");
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
}
/// Returns an array of all logs matching filter with given id.
///
/// Returns an error if no matching log filter exists.
///
/// Handler for `eth_getFilterLogs`
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
}
/// Handler for `eth_uninstallFilter`
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
self.filter.uninstall_filter(id).await
}
/// Returns logs matching given filter object.
///
/// Handler for `eth_getLogs`
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getLogs");
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
let provider = self.provider.clone();
Ok(logs.into_iter().filter_map(|log| exclude_system_tx::<Eth>(log, &provider)).collect())
}
}
pub struct HlNodeFilterWs<Eth: EthWrapper> {
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
}
impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
pub fn new(
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
) -> Self {
Self { pubsub, provider, subscription_task_spawner }
}
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
for HlNodeFilterWs<Eth>
{
/// Handler for `eth_subscribe`
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let pubsub = self.pubsub.clone();
let provider = self.provider.clone();
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::Logs {
// if no params are provided, used default filter params
let filter = match params {
Some(Params::Logs(filter)) => *filter,
Some(Params::Bool(_)) => {
return;
}
_ => Default::default(),
};
let _ = pipe_from_stream(
sink,
pubsub
.log_stream(filter)
.filter_map(|log| exclude_system_tx::<Eth>(log, &provider)),
)
.await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
};
}));
Ok(())
}
}
fn exclude_system_tx<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
let transaction_index = log.transaction_index?;
let log_index = log.log_index?;
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
// System transactions are always at the beginning of the block,
// so we can use the transaction index to determine if the log is from a system transaction,
// and if it is, we can exclude it.
//
// For non-system transactions, we can just return the log as is, and the client will
// adjust the transaction index accordingly.
let mut system_tx_count = 0u64;
let mut system_tx_logs_count = 0u64;
for receipt in receipts {
let is_system_tx = receipt.cumulative_gas_used() == 0;
if is_system_tx {
system_tx_count += 1;
system_tx_logs_count += receipt.logs().len() as u64;
}
}
if system_tx_count > transaction_index {
return None;
}
log.transaction_index = Some(transaction_index - system_tx_count);
log.log_index = Some(log_index - system_tx_logs_count);
Some(log)
}
/// Helper to convert a serde error into an [`ErrorObject`]
#[derive(Debug, thiserror::Error)]
#[error("Failed to serialize subscription item: {0}")]
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
impl SubscriptionSerializeError {
const fn new(err: serde_json::Error) -> Self {
Self(err)
}
}
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
fn from(value: SubscriptionSerializeError) -> Self {
internal_rpc_err(value.to_string())
}
}
async fn pipe_from_stream<T, St>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>>
where
St: Stream<Item = T> + Unpin,
T: Serialize,
{
loop {
tokio::select! {
_ = sink.closed() => {
// connection dropped
break Ok(())
},
maybe_item = stream.next() => {
let item = match maybe_item {
Some(item) => item,
None => {
// stream ended
break Ok(())
},
};
let msg = SubscriptionMessage::new(
sink.method_name(),
sink.subscription_id(),
&item
).map_err(SubscriptionSerializeError::new)?;
if sink.send(msg).await.is_err() {
break Ok(());
}
}
}
}
}
pub fn install_hl_node_compliance<Node, EthApi>(
ctx: RpcContext<Node, EthApi>,
) -> Result<(), eyre::Error>
where
Node: FullNodeComponents,
Node::Provider: BlockIdReader + BlockReader<Block = crate::HlBlock>,
EthApi: EthWrapper,
{
ctx.modules.replace_configured(
HlNodeFilterHttp::new(
Arc::new(ctx.registry.eth_handlers().filter.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
)
.into_rpc(),
)?;
ctx.modules.replace_configured(
HlNodeFilterWs::new(
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
Box::new(ctx.node().task_executor().clone()),
)
.into_rpc(),
)?;
Ok(())
}

View File

@ -1,11 +1,10 @@
pub mod call_forwarder;
pub mod addons;
pub mod chainspec;
pub mod consensus;
mod evm;
mod hardforks;
pub mod hl_node_compliance;
pub mod node;
pub mod pseudo_peer;
pub mod tx_forwarder;
pub mod version;
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};
pub use node::primitives::{HlBlock, HlBlockBody, HlHeader, HlPrimitives};

View File

@ -1,15 +1,21 @@
use std::sync::Arc;
use clap::Parser;
use reth::builder::NodeHandle;
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
use reth_db::DatabaseEnv;
use reth_hl::{
addons::{
call_forwarder::{self, CallForwarderApiServer},
chainspec::parser::HlChainSpecParser,
hl_node_compliance::install_hl_node_compliance,
node::{
cli::{Cli, HlNodeArgs},
storage::tables::Tables,
HlNode,
},
tx_forwarder::{self, EthForwarderApiServer},
},
chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{
HlNode,
cli::{Cli, HlNodeArgs},
rpc::precompile::{HlBlockPrecompileApiServer, HlBlockPrecompileExt},
storage::tables::Tables,
},
};
use tracing::info;
@ -21,20 +27,19 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
fn main() -> eyre::Result<()> {
reth_cli_util::sigsegv_handler::install();
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() {
std::env::set_var("RUST_BACKTRACE", "1");
}
// Initialize custom version metadata before parsing CLI so --version uses reth-hl values
reth_hl::version::init_reth_hl_version();
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(|builder, ext| async move {
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(
|builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>,
ext: HlNodeArgs| async move {
let default_upstream_rpc_url = builder.config().chain.official_rpc_url();
builder.builder.database.create_tables_for::<Tables>()?;
let (node, engine_handle_tx) =
HlNode::new(ext.block_source_args.parse().await?, ext.hl_node_compliant);
HlNode::new(ext.block_source_args.parse().await?, ext.debug_cutoff_height);
let NodeHandle { node, node_exit_future: exit_future } = builder
.node(node)
.extend_rpc_modules(move |ctx| {
.extend_rpc_modules(move |mut ctx| {
let upstream_rpc_url =
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
@ -55,18 +60,32 @@ fn main() -> eyre::Result<()> {
}
if ext.hl_node_compliant {
install_hl_node_compliance(ctx)?;
install_hl_node_compliance(&mut ctx)?;
info!("hl-node compliant mode enabled");
}
if !ext.experimental_eth_get_proof {
ctx.modules.remove_method_from_configured("eth_getProof");
info!("eth_getProof is disabled by default");
}
ctx.modules.merge_configured(
HlBlockPrecompileExt::new(ctx.registry.eth_api().clone()).into_rpc(),
)?;
Ok(())
})
.apply(|mut builder| {
builder.db_mut().create_tables_for::<Tables>().expect("create tables");
builder
})
.launch()
.await?;
engine_handle_tx.send(node.beacon_engine_handle.clone()).unwrap();
exit_future.await
})?;
},
)?;
Ok(())
}

View File

@ -1,37 +1,49 @@
use crate::{
chainspec::{parser::HlChainSpecParser, HlChainSpec},
chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{
consensus::HlConsensus, evm::config::HlEvmConfig, network::HlNetworkPrimitives, HlNode,
HlNode, consensus::HlConsensus, evm::config::HlEvmConfig, migrate::Migrator,
storage::tables::Tables,
},
pseudo_peer::BlockSourceArgs,
};
use clap::{Args, Parser};
use reth::{
args::LogArgs,
CliRunner,
args::{DatabaseArgs, DatadirArgs, LogArgs},
builder::{NodeBuilder, WithLaunchContext},
cli::Commands,
prometheus_exporter::install_prometheus_recorder,
version::{LONG_VERSION, SHORT_VERSION},
CliRunner,
version::version_metadata,
};
use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::launcher::FnLauncher;
use reth_db::DatabaseEnv;
use reth_cli_commands::{common::EnvironmentArgs, launcher::FnLauncher};
use reth_db::{DatabaseEnv, init_db, mdbx::init_db_for};
use reth_tracing::FileWorkerGuard;
use std::{
fmt::{self},
future::Future,
sync::Arc,
};
use tracing::info;
macro_rules! not_applicable {
($command:ident) => {
todo!("{} is not applicable for HL", stringify!($command))
};
}
#[derive(Debug, Clone, Args)]
#[non_exhaustive]
pub struct HlNodeArgs {
#[command(flatten)]
pub block_source_args: BlockSourceArgs,
/// Debug cutoff height.
///
/// This option is used to cut off the block import at a specific height.
#[arg(long, env = "DEBUG_CUTOFF_HEIGHT")]
pub debug_cutoff_height: Option<u64>,
/// Upstream RPC URL to forward incoming transactions.
///
/// Default to Hyperliquid's RPC URL when not provided (https://rpc.hyperliquid.xyz/evm).
@ -52,13 +64,31 @@ pub struct HlNodeArgs {
/// This is useful when read precompile is needed for gas estimation.
#[arg(long, env = "FORWARD_CALL")]
pub forward_call: bool,
/// Experimental: enables the eth_getProof RPC method.
///
/// Note: Due to the state root difference, trie updates* may not function correctly in all
/// scenarios. For example, incremental root updates are not possible, which can cause
/// eth_getProof to malfunction in some cases.
///
/// This limitation does not impact normal node functionality, except for state root (which is
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
/// potential issues.
///
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
/// working as intended. Enabling this by default will be tracked in #15.
///
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
pub experimental_eth_get_proof: bool,
}
/// The main reth_hl cli interface.
///
/// This is the entrypoint to the executable.
#[derive(Debug, Parser)]
#[command(author, version = SHORT_VERSION, long_version = LONG_VERSION, about = "Reth", long_about = None)]
#[command(author, version =version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)]
pub struct Cli<Spec: ChainSpecParser = HlChainSpecParser, Ext: clap::Args + fmt::Debug = HlNodeArgs>
{
/// The command to run
@ -78,20 +108,25 @@ where
///
/// This accepts a closure that is used to launch the node via the
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
pub fn run<L, Fut>(self, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
pub fn run(
self,
launcher: impl AsyncFnOnce(
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
Ext,
) -> eyre::Result<()>,
) -> eyre::Result<()> {
self.with_runner(CliRunner::try_default_runtime()?, launcher)
}
/// Execute the configured cli command with the provided [`CliRunner`].
pub fn with_runner<L, Fut>(mut self, runner: CliRunner, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
pub fn with_runner(
mut self,
runner: CliRunner,
launcher: impl AsyncFnOnce(
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
Ext,
) -> eyre::Result<()>,
) -> eyre::Result<()> {
// Add network name if available to the logs dir
if let Some(chain_spec) = self.command.chain_spec() {
self.logs.log_file_directory =
@ -104,44 +139,41 @@ where
// Install the prometheus recorder to be sure to record all metrics
let _ = install_prometheus_recorder();
let components =
|spec: Arc<C::ChainSpec>| (HlEvmConfig::new(spec.clone()), HlConsensus::new(spec));
let components = |spec: Arc<C::ChainSpec>| {
(HlEvmConfig::new(spec.clone()), Arc::new(HlConsensus::new(spec)))
};
match self.command {
Commands::Node(command) => runner.run_command_until_exit(|ctx| {
Self::migrate_db(&command.chain, &command.datadir, &command.db)
.expect("Failed to migrate database");
command.execute(ctx, FnLauncher::new::<C, Ext>(launcher))
}),
Commands::Init(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
}
Commands::InitState(command) => {
// Need to invoke `init_db_for` to create `BlockReadPrecompileCalls` table
Self::init_db(&command.env)?;
runner.run_blocking_until_ctrl_c(command.execute::<HlNode>())
}
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<HlNode>()),
Commands::Stage(command) => runner.run_command_until_exit(|ctx| {
command.execute::<HlNode, _, _, HlNetworkPrimitives>(ctx, components)
}),
Commands::P2P(command) => {
runner.run_until_ctrl_c(command.execute::<HlNetworkPrimitives>())
Commands::Stage(command) => {
runner.run_command_until_exit(|ctx| command.execute::<HlNode, _>(ctx, components))
}
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
Commands::Recover(command) => {
runner.run_command_until_exit(|ctx| command.execute::<HlNode>(ctx))
}
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()),
Commands::Import(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _, _>(components))
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _>(components))
}
Commands::Debug(_command) => todo!(),
Commands::P2P(_command) => not_applicable!(P2P),
Commands::ImportEra(_command) => not_applicable!(ImportEra),
Commands::Download(_command) => not_applicable!(Download),
Commands::ExportEra(_) => not_applicable!(ExportEra),
Commands::ReExecute(_) => not_applicable!(ReExecute),
#[cfg(feature = "dev")]
Commands::TestVectors(_command) => todo!(),
Commands::ImportEra(_command) => {
todo!()
}
Commands::Download(_command) => {
todo!()
}
Commands::TestVectors(_command) => not_applicable!(TestVectors),
}
}
@ -153,4 +185,21 @@ where
let guard = self.logs.init_tracing()?;
Ok(guard)
}
fn init_db(env: &EnvironmentArgs<C>) -> eyre::Result<()> {
let data_dir = env.datadir.clone().resolve_datadir(env.chain.chain());
let db_path = data_dir.db();
init_db(db_path.clone(), env.db.database_args())?;
init_db_for::<_, Tables>(db_path, env.db.database_args())?;
Ok(())
}
fn migrate_db(
chain: &HlChainSpec,
datadir: &DatadirArgs,
db: &DatabaseArgs,
) -> eyre::Result<()> {
Migrator::<HlNode>::new(chain.clone(), datadir.clone(), *db)?.migrate_db()?;
Ok(())
}
}

View File

@ -1,8 +1,8 @@
use crate::{hardforks::HlHardforks, node::HlNode, HlBlock, HlBlockBody, HlPrimitives};
use crate::{hardforks::HlHardforks, node::{primitives::HlHeader, HlNode}, HlBlock, HlBlockBody, HlPrimitives};
use reth::{
api::FullNodeTypes,
api::{FullNodeTypes, NodeTypes},
beacon_consensus::EthBeaconConsensus,
builder::{components::ConsensusBuilder, BuilderContext},
builder::{BuilderContext, components::ConsensusBuilder},
consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator},
consensus_common::validation::{
validate_against_parent_4844, validate_against_parent_hash_number,
@ -23,7 +23,7 @@ impl<Node> ConsensusBuilder<Node> for HlConsensusBuilder
where
Node: FullNodeTypes<Types = HlNode>,
{
type Consensus = Arc<dyn FullConsensus<HlPrimitives, Error = ConsensusError>>;
type Consensus = Arc<HlConsensus<<Node::Types as NodeTypes>::ChainSpec>>;
async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> {
Ok(Arc::new(HlConsensus::new(ctx.chain_spec())))
@ -39,7 +39,10 @@ pub struct HlConsensus<ChainSpec> {
chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec: EthChainSpec + HlHardforks> HlConsensus<ChainSpec> {
impl<ChainSpec> HlConsensus<ChainSpec>
where
ChainSpec: EthChainSpec + HlHardforks,
{
/// Create a new instance of [`HlConsensus`]
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { inner: EthBeaconConsensus::new(chain_spec.clone()), chain_spec }
@ -62,15 +65,19 @@ pub fn validate_against_parent_timestamp<H: BlockHeader>(
Ok(())
}
impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<ChainSpec> {
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
impl<H, ChainSpec> HeaderValidator<H> for HlConsensus<ChainSpec>
where
H: BlockHeader,
ChainSpec: EthChainSpec<Header = H> + HlHardforks,
{
fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError> {
self.inner.validate_header(header)
}
fn validate_header_against_parent(
&self,
header: &SealedHeader,
parent: &SealedHeader,
header: &SealedHeader<H>,
parent: &SealedHeader<H>,
) -> Result<(), ConsensusError> {
validate_against_parent_hash_number(header.header(), parent)?;
@ -83,7 +90,7 @@ impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<Chai
// )?;
// ensure that the blob gas fields for this block
if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp) {
if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) {
validate_against_parent_4844(header.header(), parent.header(), blob_params)?;
}
@ -91,13 +98,16 @@ impl<ChainSpec: EthChainSpec + HlHardforks> HeaderValidator for HlConsensus<Chai
}
}
impl<ChainSpec: EthChainSpec + HlHardforks> Consensus<HlBlock> for HlConsensus<ChainSpec> {
impl<ChainSpec> Consensus<HlBlock> for HlConsensus<ChainSpec>
where
ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{
type Error = ConsensusError;
fn validate_body_against_header(
&self,
body: &HlBlockBody,
header: &SealedHeader,
header: &SealedHeader<HlHeader>,
) -> Result<(), ConsensusError> {
Consensus::<HlBlock>::validate_body_against_header(&self.inner, body, header)
}
@ -135,8 +145,9 @@ impl<ChainSpec: EthChainSpec + HlHardforks> Consensus<HlBlock> for HlConsensus<C
mod reth_copy;
impl<ChainSpec: EthChainSpec<Header = alloy_consensus::Header> + HlHardforks>
FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
impl<ChainSpec> FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
where
ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{
fn validate_block_post_execution(
&self,

View File

@ -1,21 +1,21 @@
//! Copy of reth codebase.
use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt};
use crate::HlBlock;
use alloy_consensus::{BlockHeader, TxReceipt, proofs::calculate_receipt_root};
use alloy_eips::eip7685::Requests;
use alloy_primitives::{Bloom, B256};
use alloy_primitives::{B256, Bloom};
use reth::consensus::ConsensusError;
use reth_chainspec::EthereumHardforks;
use reth_primitives::{gas_spent_by_transactions, GotExpected, RecoveredBlock};
use reth_primitives_traits::{Block, Receipt as ReceiptTrait};
use reth_primitives::{GotExpected, RecoveredBlock, gas_spent_by_transactions};
use reth_primitives_traits::Receipt as ReceiptTrait;
pub fn validate_block_post_execution<B, R, ChainSpec>(
block: &RecoveredBlock<B>,
pub fn validate_block_post_execution<R, ChainSpec>(
block: &RecoveredBlock<HlBlock>,
chain_spec: &ChainSpec,
receipts: &[R],
requests: &Requests,
) -> Result<(), ConsensusError>
where
B: Block,
R: ReceiptTrait,
ChainSpec: EthereumHardforks,
{
@ -42,7 +42,7 @@ where
receipts.iter().filter(|&r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
if let Err(error) = verify_receipts(
block.header().receipts_root(),
block.header().logs_bloom(),
block.header().inner.logs_bloom(),
&receipts_for_root,
) {
tracing::debug!(%error, ?receipts, "receipts verification failed");

View File

@ -1,22 +1,9 @@
use std::sync::Arc;
use crate::{
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
HlBlock, HlPrimitives,
};
use crate::{HlBlock, HlPrimitives};
use alloy_eips::eip7685::Requests;
use alloy_primitives::U256;
use reth::{
api::FullNodeTypes,
builder::{components::PayloadServiceBuilder, BuilderContext},
payload::{PayloadBuilderHandle, PayloadServiceCommand},
transaction_pool::TransactionPool,
};
use reth_evm::ConfigureEvm;
use reth_payload_primitives::BuiltPayload;
use reth_primitives::SealedBlock;
use tokio::sync::{broadcast, mpsc};
use tracing::warn;
use std::sync::Arc;
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
/// included into [`HlBlock`].
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
self.requests.clone()
}
}
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct HlPayloadServiceBuilder;
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
where
Node: FullNodeTypes<Types = HlNode>,
Pool: TransactionPool,
Evm: ConfigureEvm,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
_pool: Pool,
_evm_config: Evm,
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
let (tx, mut rx) = mpsc::unbounded_channel();
ctx.task_executor().spawn_critical("payload builder", async move {
let mut subscriptions = Vec::new();
while let Some(message) = rx.recv().await {
match message {
PayloadServiceCommand::Subscribe(tx) => {
let (events_tx, events_rx) = broadcast::channel(100);
// Retain senders to make sure that channels are not getting closed
subscriptions.push(events_tx);
let _ = tx.send(events_rx);
}
message => warn!(?message, "Noop payload service received a message"),
}
}
});
Ok(PayloadBuilderHandle::new(tx))
}
}
// impl From<EthBuiltPayload> for HlBuiltPayload {
// fn from(value: EthBuiltPayload) -> Self {
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
// HlBuiltPayload {
// id,
// block: block.into(),
// fees,
// requests,
// }
// }
// }
// pub struct HlPayloadBuilder<Inner> {
// inner: Inner,
// }
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
// where
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
// {
// type Attributes = Inner::Attributes;
// type BuiltPayload = HlBuiltPayload;
// type Error = Inner::Error;
// fn try_build(
// &self,
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
// let outcome = self.inner.try_build(args)?;
// }
// }

View File

@ -1,8 +1,6 @@
use crate::{
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig},
HlBlock,
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig}, HlBlock, HlHeader
};
use alloy_consensus::Header;
use reth_evm::{
block::BlockExecutionError,
execute::{BlockAssembler, BlockAssemblerInput},
@ -13,7 +11,7 @@ impl BlockAssembler<HlBlockExecutorFactory> for HlEvmConfig {
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, Header>,
input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> {
let HlBlock { header, body } = self.block_assembler.assemble_block(input)?;
Ok(HlBlock { header, body })

View File

@ -1,37 +1,39 @@
use super::{executor::HlBlockExecutor, factory::HlEvmFactory};
use crate::{
HlBlock, HlBlockBody, HlHeader, HlPrimitives,
chainspec::HlChainSpec,
evm::{spec::HlSpecId, transaction::HlTxEnv},
hardforks::HlHardforks,
node::{
evm::{executor::is_system_transaction, receipt_builder::RethReceiptBuilder},
primitives::{BlockBody, TransactionSigned},
rpc::engine_api::validator::HlExecutionData,
types::HlExtras,
},
HlBlock, HlBlockBody, HlPrimitives,
};
use alloy_consensus::{BlockHeader, Header, Transaction as _, TxReceipt, EMPTY_OMMER_ROOT_HASH};
use alloy_eips::merge::BEACON_NONCE;
use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH, Header, Transaction as _, TxReceipt};
use alloy_eips::{Encodable2718, merge::BEACON_NONCE};
use alloy_primitives::{Log, U256};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{
ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator,
ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, NextBlockEnvAttributes,
block::{BlockExecutionError, BlockExecutorFactory, BlockExecutorFor},
eth::{receipt_builder::ReceiptBuilder, EthBlockExecutionCtx},
eth::{EthBlockExecutionCtx, receipt_builder::ReceiptBuilder},
execute::{BlockAssembler, BlockAssemblerInput},
precompiles::PrecompilesMap,
ConfigureEvm, EvmEnv, EvmFactory, ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded,
IntoTxEnv, NextBlockEnvAttributes,
};
use reth_evm_ethereum::EthBlockAssembler;
use reth_primitives::{logs_bloom, BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader};
use reth_primitives_traits::proofs;
use reth_payload_primitives::NewPayloadError;
use reth_primitives::{BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader, logs_bloom};
use reth_primitives_traits::{SignerRecoverable, WithEncoded, proofs};
use reth_provider::BlockExecutionResult;
use reth_revm::State;
use revm::{
Inspector,
context::{BlockEnv, CfgEnv, TxEnv},
context_interface::block::BlobExcessGasAndPrice,
primitives::hardfork::SpecId,
Inspector,
};
use std::{borrow::Cow, convert::Infallible, sync::Arc};
@ -52,7 +54,7 @@ where
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F>,
input: BlockAssemblerInput<'_, '_, F, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> {
// TODO: Copy of EthBlockAssembler::assemble_block
let inner = &self.inner;
@ -69,10 +71,10 @@ where
let timestamp = evm_env.block_env.timestamp.saturating_to();
// Filter out system tx receipts
let transactions_for_root: Vec<TransactionSigned> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect::<Vec<_>>();
let receipts_for_root: Vec<Receipt> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
let transactions_for_root: Vec<_> =
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect();
let receipts_for_root: Vec<_> =
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect();
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
@ -104,7 +106,10 @@ where
} else {
// for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0
Some(alloy_eips::eip7840::BlobParams::cancun().next_block_excess_blob_gas(0, 0))
Some(
alloy_eips::eip7840::BlobParams::cancun()
.next_block_excess_blob_gas_osaka(0, 0, 0),
)
};
}
@ -131,6 +136,9 @@ where
excess_blob_gas,
requests_hash,
};
let system_tx_count =
transactions.iter().filter(|t| is_system_transaction(t)).count() as u64;
let header = HlHeader::from_ethereum_header(header, receipts, system_tx_count);
Ok(Self::Block {
header,
@ -264,6 +272,8 @@ where
}
}
static EMPTY_OMMERS: [Header; 0] = [];
impl ConfigureEvm for HlEvmConfig
where
Self: Send + Sync + Unpin + Clone + 'static,
@ -282,7 +292,7 @@ where
self
}
fn evm_env(&self, header: &Header) -> EvmEnv<HlSpecId> {
fn evm_env(&self, header: &HlHeader) -> Result<EvmEnv<HlSpecId>, Self::Error> {
let blob_params = self.chain_spec().blob_params_at_timestamp(header.timestamp);
let spec = revm_spec_by_timestamp_and_block_number(
self.chain_spec().clone(),
@ -293,7 +303,6 @@ where
// configure evm env based on parent block
let mut cfg_env =
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
if let Some(blob_params) = &blob_params {
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
}
@ -323,12 +332,12 @@ where
blob_excess_gas_and_price,
};
EvmEnv { cfg_env, block_env }
Ok(EvmEnv { cfg_env, block_env })
}
fn next_evm_env(
&self,
parent: &Header,
parent: &HlHeader,
attributes: &Self::NextBlockEnvCtx,
) -> Result<EvmEnv<HlSpecId>, Self::Error> {
// ensure we're not missing any timestamp based hardforks
@ -372,38 +381,68 @@ where
fn context_for_block<'a>(
&self,
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> ExecutionCtxFor<'a, Self> {
) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> {
let block_body = block.body();
let extras = HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
};
HlBlockExecutionCtx {
Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header().parent_hash,
parent_beacon_block_root: block.header().parent_beacon_block_root,
ommers: &block.body().ommers,
ommers: &EMPTY_OMMERS,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
},
extras,
}
extras: HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address,
},
})
}
fn context_for_next_block(
&self,
parent: &SealedHeader<HeaderTy<Self::Primitives>>,
attributes: Self::NextBlockEnvCtx,
) -> ExecutionCtxFor<'_, Self> {
HlBlockExecutionCtx {
) -> Result<ExecutionCtxFor<'_, Self>, Self::Error> {
Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: parent.hash(),
parent_beacon_block_root: attributes.parent_beacon_block_root,
ommers: &[],
withdrawals: attributes.withdrawals.map(Cow::Owned),
},
// TODO: hacky, double check if this is correct
extras: HlExtras::default(),
extras: HlExtras::default(), // TODO: hacky, double check if this is correct
})
}
}
impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
fn evm_env_for_payload(&self, payload: &HlExecutionData) -> EvmEnvFor<Self> {
self.evm_env(&payload.0.header).unwrap()
}
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
let block = &payload.0;
HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx {
parent_hash: block.header.parent_hash,
parent_beacon_block_root: block.header.parent_beacon_block_root,
ommers: &EMPTY_OMMERS,
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
},
extras: HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
},
}
}
fn tx_iterator_for_payload(
&self,
payload: &HlExecutionData,
) -> impl ExecutableTxIterator<Self> {
payload.0.body.transactions.clone().into_iter().map(move |tx| {
let recovered = tx.try_into_recovered().map_err(NewPayloadError::other)?;
Ok::<_, NewPayloadError>(WithEncoded::new(recovered.encoded_2718().into(), recovered))
})
}
}

View File

@ -4,33 +4,30 @@ use crate::{
hardforks::HlHardforks,
node::{
primitives::TransactionSigned,
types::{ReadPrecompileInput, ReadPrecompileResult},
types::{HlExtras, ReadPrecompileInput, ReadPrecompileResult},
},
};
use alloy_consensus::{Transaction, TxReceipt};
use alloy_eips::{eip7685::Requests, Encodable2718};
use alloy_eips::{Encodable2718, eip7685::Requests};
use alloy_evm::{block::ExecutableTx, eth::receipt_builder::ReceiptBuilderCtx};
use alloy_primitives::{address, hex, Address, Bytes, U160, U256};
use alloy_primitives::{Address, Bytes, U160, U256, address, hex};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{
block::{BlockValidationError, CommitChanges},
Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
block::BlockValidationError,
eth::receipt_builder::ReceiptBuilder,
execute::{BlockExecutionError, BlockExecutor},
precompiles::{DynPrecompile, PrecompileInput, PrecompilesMap},
Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
};
use reth_provider::BlockExecutionResult;
use reth_revm::State;
use revm::{
context::{
result::{ExecutionResult, ResultAndState},
TxEnv,
},
DatabaseCommit,
context::{TxEnv, result::ResultAndState},
interpreter::instructions::utility::IntoU256,
precompile::{PrecompileError, PrecompileOutput, PrecompileResult},
primitives::HashMap,
state::Bytecode,
DatabaseCommit,
};
pub fn is_system_transaction(tx: &TransactionSigned) -> bool {
@ -72,7 +69,7 @@ fn run_precompile(
match *get {
ReadPrecompileResult::Ok { gas_used, ref bytes } => {
Ok(PrecompileOutput { gas_used, bytes: bytes.clone() })
Ok(PrecompileOutput { gas_used, bytes: bytes.clone(), reverted: false })
}
ReadPrecompileResult::OutOfGas => {
// Use all the gas passed to this precompile
@ -102,7 +99,7 @@ where
{
/// Creates a new HlBlockExecutor.
pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self {
apply_precompiles(&mut evm, &ctx);
apply_precompiles(&mut evm, &ctx.extras);
Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx }
}
@ -110,7 +107,9 @@ where
const COREWRITER_ENABLED_BLOCK_NUMBER: u64 = 7578300;
const COREWRITER_CONTRACT_ADDRESS: Address =
address!("0x3333333333333333333333333333333333333333");
const COREWRITER_CODE: &[u8] = &hex!("608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033");
const COREWRITER_CODE: &[u8] = &hex!(
"608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033"
);
if self.evm.block().number != U256::from(COREWRITER_ENABLED_BLOCK_NUMBER) {
return Ok(());
@ -155,17 +154,16 @@ where
type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
apply_precompiles(&mut self.evm, &self.ctx);
apply_precompiles(&mut self.evm, &self.ctx.extras);
self.deploy_corewriter_contract()?;
Ok(())
}
fn execute_transaction_with_commit_condition(
fn execute_transaction_without_commit(
&mut self,
tx: impl ExecutableTx<Self>,
f: impl FnOnce(&ExecutionResult<<Self::Evm as Evm>::HaltReason>) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError> {
) -> Result<ResultAndState<<Self::Evm as Evm>::HaltReason>, BlockExecutionError> {
// The sum of the transaction's gas limit, Tg, and the gas utilized in this block prior,
// must be no greater than the block's gasLimit.
let block_available_gas = self.evm.block().gas_limit - self.gas_used;
@ -178,16 +176,20 @@ where
.into());
}
// Execute transaction.
let ResultAndState { result, mut state } = self
.evm
.transact(tx)
.map_err(|err| BlockExecutionError::evm(err, tx.tx().trie_hash()))?;
if !f(&result).should_commit() {
return Ok(None);
// Execute transaction and return the result
self.evm.transact(&tx).map_err(|err| {
let hash = tx.tx().trie_hash();
BlockExecutionError::evm(err, hash)
})
}
fn commit_transaction(
&mut self,
output: ResultAndState<<Self::Evm as Evm>::HaltReason>,
tx: impl ExecutableTx<Self>,
) -> Result<u64, BlockExecutionError> {
let ResultAndState { result, mut state } = output;
let gas_used = result.gas_used();
// append gas used
@ -215,7 +217,7 @@ where
// Commit the state changes.
self.evm.db_mut().commit(state);
Ok(Some(gas_used))
Ok(gas_used)
}
fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<R::Receipt>), BlockExecutionError> {
@ -240,10 +242,9 @@ where
}
}
fn apply_precompiles<'a, DB, EVM>(evm: &mut EVM, ctx: &HlBlockExecutionCtx<'a>)
pub fn apply_precompiles<EVM>(evm: &mut EVM, extras: &HlExtras)
where
EVM: Evm<DB = &'a mut State<DB>, Precompiles = PrecompilesMap>,
DB: Database + 'a,
EVM: Evm<Precompiles = PrecompilesMap>,
{
let block_number = evm.block().number;
let precompiles_mut = evm.precompiles_mut();
@ -255,9 +256,7 @@ where
precompiles_mut.apply_precompile(&address, |_| None);
}
}
for (address, precompile) in
ctx.extras.read_precompile_calls.clone().unwrap_or_default().0.iter()
{
for (address, precompile) in extras.read_precompile_calls.clone().unwrap_or_default().0.iter() {
let precompile = precompile.clone();
precompiles_mut.apply_precompile(address, |_| {
let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> =
@ -271,7 +270,7 @@ where
// NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5
const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684;
if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) {
fill_all_precompiles(ctx, precompiles_mut);
fill_all_precompiles(extras, precompiles_mut);
}
}
@ -279,9 +278,9 @@ fn address_to_u64(address: Address) -> u64 {
address.into_u256().try_into().unwrap()
}
fn fill_all_precompiles<'a>(ctx: &HlBlockExecutionCtx<'a>, precompiles_mut: &mut PrecompilesMap) {
fn fill_all_precompiles(extras: &HlExtras, precompiles_mut: &mut PrecompilesMap) {
let lowest_address = 0x800;
let highest_address = ctx.extras.highest_precompile_address.map_or(0x80D, address_to_u64);
let highest_address = extras.highest_precompile_address.map_or(0x80D, address_to_u64);
for address in lowest_address..=highest_address {
let address = Address::from(U160::from(address));
precompiles_mut.apply_precompile(&address, |f| {

View File

@ -7,16 +7,16 @@ use crate::evm::{
spec::HlSpecId,
transaction::HlTxEnv,
};
use reth_evm::{precompiles::PrecompilesMap, Database, EvmEnv, EvmFactory};
use reth_evm::{Database, EvmEnv, EvmFactory, precompiles::PrecompilesMap};
use reth_revm::Context;
use revm::{
Inspector,
context::{
result::{EVMError, HaltReason},
TxEnv,
result::{EVMError, HaltReason},
},
inspector::NoOpInspector,
precompile::{PrecompileSpecId, Precompiles},
Inspector,
};
/// Factory producing [`HlEvm`].

View File

@ -1,6 +1,6 @@
use crate::{
evm::{
api::{ctx::HlContext, HlEvmInner},
api::{HlEvmInner, ctx::HlContext},
spec::HlSpecId,
transaction::HlTxEnv,
},
@ -10,18 +10,18 @@ use alloy_primitives::{Address, Bytes};
use config::HlEvmConfig;
use reth::{
api::FullNodeTypes,
builder::{components::ExecutorBuilder, BuilderContext},
builder::{BuilderContext, components::ExecutorBuilder},
};
use reth_evm::{Evm, EvmEnv};
use reth_evm::{Database, Evm, EvmEnv};
use revm::{
Context, ExecuteEvm, InspectEvm, Inspector,
context::{
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
BlockEnv, TxEnv,
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
},
handler::{instructions::EthInstructions, EthPrecompiles, PrecompileProvider},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
handler::{EthPrecompiles, PrecompileProvider, instructions::EthInstructions},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
Context, Database, ExecuteEvm, InspectEvm, Inspector,
};
use std::ops::{Deref, DerefMut};
@ -32,6 +32,8 @@ mod factory;
mod patch;
pub mod receipt_builder;
pub use executor::apply_precompiles;
/// HL EVM implementation.
///
/// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing)
@ -75,7 +77,6 @@ where
DB: Database,
I: Inspector<HlContext<DB>>,
P: PrecompileProvider<HlContext<DB>, Output = InterpreterResult>,
<DB as revm::Database>::Error: std::marker::Send + std::marker::Sync + 'static,
{
type DB = DB;
type Tx = HlTxEnv<TxEnv>;
@ -97,11 +98,7 @@ where
&mut self,
tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
if self.inspect {
self.inner.inspect_tx(tx)
} else {
self.inner.transact(tx)
}
if self.inspect { self.inner.inspect_tx(tx) } else { self.inner.transact(tx) }
}
fn transact_system_call(
@ -127,10 +124,6 @@ where
))
}
fn db_mut(&mut self) -> &mut Self::DB {
&mut self.journaled_state.database
}
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) {
let Context { block: block_env, cfg: cfg_env, journaled_state, .. } = self.inner.0.ctx;
@ -141,20 +134,20 @@ where
self.inspect = enabled;
}
fn precompiles_mut(&mut self) -> &mut Self::Precompiles {
&mut self.inner.0.precompiles
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
(
&self.inner.0.ctx.journaled_state.database,
&self.inner.0.inspector,
&self.inner.0.precompiles,
)
}
fn inspector_mut(&mut self) -> &mut Self::Inspector {
&mut self.inner.0.inspector
}
fn precompiles(&self) -> &Self::Precompiles {
&self.inner.0.precompiles
}
fn inspector(&self) -> &Self::Inspector {
&self.inner.0.inspector
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
(
&mut self.inner.0.ctx.journaled_state.database,
&mut self.inner.0.inspector,
&mut self.inner.0.precompiles,
)
}
}
@ -170,7 +163,6 @@ where
type EVM = HlEvmConfig;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
Ok(evm_config)
Ok(HlEvmConfig::hl(ctx.chain_spec()))
}
}

View File

@ -1,4 +1,4 @@
use alloy_primitives::{address, Address};
use alloy_primitives::{Address, address};
use reth_evm::block::BlockExecutionError;
use revm::{primitives::HashMap, state::Account};

View File

@ -1,5 +1,6 @@
use crate::node::primitives::TransactionSigned;
use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx};
use reth_codecs::alloy::transaction::Envelope;
use reth_evm::Evm;
use reth_primitives::Receipt;

405
src/node/migrate.rs Normal file
View File

@ -0,0 +1,405 @@
use alloy_consensus::Header;
use alloy_primitives::{b256, hex::ToHexExt, BlockHash, Bytes, B256, U256};
use reth::{
api::NodeTypesWithDBAdapter,
args::{DatabaseArgs, DatadirArgs},
dirs::{ChainPath, DataDirPath},
};
use reth_chainspec::EthChainSpec;
use reth_db::{
mdbx::{tx::Tx, RO},
models::CompactU256,
static_file::iter_static_files,
table::Decompress,
tables, DatabaseEnv,
};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut},
};
use reth_errors::ProviderResult;
use reth_ethereum_primitives::EthereumReceipt;
use reth_provider::{
providers::{NodeTypesForProvider, StaticFileProvider},
static_file::SegmentRangeInclusive,
DatabaseProvider, ProviderFactory, ReceiptProvider, StaticFileProviderFactory,
StaticFileSegment, StaticFileWriter,
};
use std::{fs::File, io::Write, path::PathBuf, sync::Arc};
use tracing::{info, warn};
use crate::{chainspec::HlChainSpec, HlHeader, HlPrimitives};
pub(crate) trait HlNodeType:
NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>
{
}
impl<N: NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>> HlNodeType for N {}
pub(super) struct Migrator<N: HlNodeType> {
data_dir: ChainPath<DataDirPath>,
provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
}
impl<N: HlNodeType> Migrator<N> {
const MIGRATION_PATH_SUFFIX: &'static str = "migration-tmp";
pub fn new(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<Self> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let provider_factory = Self::provider_factory(chain_spec, datadir, database_args)?;
Ok(Self { data_dir, provider_factory })
}
pub fn sf_provider(&self) -> StaticFileProvider<HlPrimitives> {
self.provider_factory.static_file_provider()
}
pub fn migrate_db(&self) -> eyre::Result<()> {
let is_empty = Self::highest_block_number(&self.sf_provider()).is_none();
if is_empty {
return Ok(());
}
self.migrate_db_inner()
}
fn highest_block_number(sf_provider: &StaticFileProvider<HlPrimitives>) -> Option<u64> {
sf_provider.get_highest_static_file_block(StaticFileSegment::Headers)
}
fn migrate_db_inner(&self) -> eyre::Result<()> {
let migrated_mdbx = MigratorMdbx::<N>(self).migrate_mdbx()?;
let migrated_static_files = MigrateStaticFiles::<N>(self).migrate_static_files()?;
if migrated_mdbx || migrated_static_files {
info!("Database migrated successfully");
}
Ok(())
}
fn conversion_tmp_dir(&self) -> PathBuf {
self.data_dir.data_dir().join(Self::MIGRATION_PATH_SUFFIX)
}
fn provider_factory(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let db_env = reth_db::init_db(data_dir.db(), database_args.database_args())?;
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?;
let db = Arc::new(db_env);
Ok(ProviderFactory::new(db, Arc::new(chain_spec), static_file_provider))
}
}
struct MigratorMdbx<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigratorMdbx<'a, N> {
fn migrate_mdbx(&self) -> eyre::Result<bool> {
// if any header is in old format, we need to migrate it, so we pick the first and last one
let db_env = self.0.provider_factory.provider()?;
let mut cursor = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let migration_needed = {
let first_is_old = match cursor.first()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
let last_is_old = match cursor.last()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
first_is_old || last_is_old
};
if !migration_needed {
return Ok(false);
}
self.migrate_mdbx_inner()?;
Ok(true)
}
fn migrate_mdbx_inner(&self) -> eyre::Result<()> {
// There shouldn't be many headers in mdbx, but using file for safety
info!("Old database detected, migrating mdbx...");
let tmp_path = self.0.conversion_tmp_dir().join("headers.rmp");
let count = self.export_old_headers(&tmp_path)?;
self.import_new_headers(tmp_path, count)?;
Ok(())
}
fn export_old_headers(&self, tmp_path: &PathBuf) -> Result<i32, eyre::Error> {
let db_env = self.0.provider_factory.provider()?;
let mut cursor_read = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let mut tmp_writer = File::create(tmp_path)?;
let mut count = 0;
let old_headers = cursor_read.walk(None)?.filter_map(|row| {
let (block_number, header) = row.ok()?;
if !using_old_header(block_number, &header) {
None
} else {
Some((block_number, Header::decompress(&header).ok()?))
}
});
for (block_number, header) in old_headers {
let receipt =
db_env.receipts_by_block(block_number.into())?.expect("Receipt not found");
let new_header = to_hl_header(receipt, header);
tmp_writer.write_all(&rmp_serde::to_vec(&(block_number, new_header))?)?;
count += 1;
}
Ok(count)
}
fn import_new_headers(&self, tmp_path: PathBuf, count: i32) -> Result<(), eyre::Error> {
let mut tmp_reader = File::open(tmp_path)?;
let db_env = self.0.provider_factory.provider_rw()?;
let mut cursor_write = db_env.tx_ref().cursor_write::<tables::Headers<Bytes>>()?;
for _ in 0..count {
let (number, header) = rmp_serde::from_read::<_, (u64, HlHeader)>(&mut tmp_reader)?;
cursor_write.upsert(number, &rmp_serde::to_vec(&header)?.into())?;
}
db_env.commit()?;
Ok(())
}
}
struct MigrateStaticFiles<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
fn iterate_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
dir: &PathBuf,
) -> eyre::Result<Vec<(PathBuf, String)>> {
let prefix = StaticFileSegment::Headers.filename(&block_range);
let entries = std::fs::read_dir(dir)?
.map(|res| res.map(|e| e.path()))
.collect::<Result<Vec<_>, _>>()?;
Ok(entries
.into_iter()
.filter_map(|path| {
let file_name = path.file_name().and_then(|f| f.to_str())?;
if file_name.starts_with(&prefix) {
Some((path.clone(), file_name.to_string()))
} else {
None
}
})
.collect())
}
fn create_placeholder(&self, block_range: SegmentRangeInclusive) -> eyre::Result<()> {
// The direction is opposite here
let src = self.0.data_dir.static_files();
let dst = self.0.conversion_tmp_dir();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
if dst_path.exists() {
std::fs::remove_file(&dst_path)?;
}
std::os::unix::fs::symlink(src_path, dst_path)?;
}
Ok(())
}
fn move_static_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
) -> eyre::Result<()> {
let src = self.0.conversion_tmp_dir();
let dst = self.0.data_dir.static_files();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
std::fs::remove_file(&dst_path)?;
std::fs::rename(&src_path, &dst_path)?;
}
// Still StaticFileProvider needs the file to exist, so we create a symlink
self.create_placeholder(block_range)
}
fn migrate_static_files(&self) -> eyre::Result<bool> {
let conversion_tmp = self.0.conversion_tmp_dir();
let old_path = self.0.data_dir.static_files();
if conversion_tmp.exists() {
std::fs::remove_dir_all(&conversion_tmp)?;
}
std::fs::create_dir_all(&conversion_tmp)?;
let mut all_static_files = iter_static_files(&old_path)?;
let all_static_files =
all_static_files.remove(&StaticFileSegment::Headers).unwrap_or_default();
let provider = self.0.provider_factory.provider()?;
let mut first = true;
for (block_range, _tx_ranges) in all_static_files {
let migration_needed = self.using_old_header(block_range.start())? ||
self.using_old_header(block_range.end())?;
if !migration_needed {
// Create a placeholder symlink
self.create_placeholder(block_range)?;
continue;
}
if first {
info!("Old database detected, migrating static files...");
first = false;
}
let sf_provider = self.0.sf_provider();
let sf_tmp_provider = StaticFileProvider::<HlPrimitives>::read_write(&conversion_tmp)?;
let block_range_for_filename = sf_provider.find_fixed_range(block_range.start());
migrate_single_static_file(&sf_tmp_provider, &sf_provider, &provider, block_range)?;
self.move_static_files_for_segment(block_range_for_filename)?;
}
Ok(!first)
}
fn using_old_header(&self, number: u64) -> eyre::Result<bool> {
let sf_provider = self.0.sf_provider();
let content = old_headers_range(&sf_provider, number..=number)?;
let &[row] = &content.as_slice() else {
warn!("No header found for block {}", number);
return Ok(false);
};
Ok(using_old_header(number, &row[0]))
}
}
// Problem is that decompress just panics when the header is not valid
// So we need heuristics...
fn is_old_header(header: &[u8]) -> bool {
const SHA3_UNCLE_OFFSET: usize = 0x24;
const SHA3_UNCLE_HASH: B256 =
b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347");
const GENESIS_PREFIX: [u8; 4] = [0x01, 0x20, 0x00, 0xf8];
let Some(sha3_uncle_hash) = header.get(SHA3_UNCLE_OFFSET..SHA3_UNCLE_OFFSET + 32) else {
return false;
};
if sha3_uncle_hash == SHA3_UNCLE_HASH {
return true;
}
// genesis block might be different
if header.starts_with(&GENESIS_PREFIX) {
return true;
}
false
}
fn is_new_header(header: &[u8]) -> bool {
rmp_serde::from_slice::<HlHeader>(header).is_ok()
}
fn migrate_single_static_file<N: HlNodeType>(
sf_out: &StaticFileProvider<HlPrimitives>,
sf_in: &StaticFileProvider<HlPrimitives>,
provider: &DatabaseProvider<Tx<RO>, NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
block_range: SegmentRangeInclusive,
) -> Result<(), eyre::Error> {
info!("Migrating block range {}...", block_range);
// block_ranges into chunks of 100000 blocks
const CHUNK_SIZE: u64 = 100000;
for chunk in (0..=block_range.end()).step_by(CHUNK_SIZE as usize) {
let end = std::cmp::min(chunk + CHUNK_SIZE - 1, block_range.end());
let block_range = chunk..=end;
let headers = old_headers_range(sf_in, block_range.clone())?;
let receipts = provider.receipts_by_block_range(block_range.clone())?;
assert_eq!(headers.len(), receipts.len());
let mut writer = sf_out.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
let new_headers = std::iter::zip(headers, receipts)
.map(|(header, receipts)| {
let eth_header = Header::decompress(&header[0]).unwrap();
let hl_header = to_hl_header(receipts, eth_header);
let difficulty: U256 = CompactU256::decompress(&header[1]).unwrap().into();
let hash = BlockHash::decompress(&header[2]).unwrap();
(hl_header, difficulty, hash)
})
.collect::<Vec<_>>();
for header in new_headers {
writer.append_header(&header.0, header.1, &header.2)?;
}
writer.commit().unwrap();
info!("Migrated block range {:?}...", block_range);
}
Ok(())
}
fn to_hl_header(receipts: Vec<EthereumReceipt>, eth_header: Header) -> HlHeader {
let system_tx_count = receipts.iter().filter(|r| r.cumulative_gas_used == 0).count();
HlHeader::from_ethereum_header(eth_header, &receipts, system_tx_count as u64)
}
fn old_headers_range(
provider: &StaticFileProvider<HlPrimitives>,
block_range: impl std::ops::RangeBounds<u64>,
) -> ProviderResult<Vec<Vec<Vec<u8>>>> {
Ok(provider
.fetch_range_with_predicate(
StaticFileSegment::Headers,
to_range(block_range),
|cursor, number| {
cursor.get(number.into(), 0b111).map(|rows| {
rows.map(|columns| columns.into_iter().map(|column| column.to_vec()).collect())
})
},
|_| true,
)?
.into_iter()
.collect())
}
// Copied from reth
fn to_range<R: std::ops::RangeBounds<u64>>(bounds: R) -> std::ops::Range<u64> {
let start = match bounds.start_bound() {
std::ops::Bound::Included(&v) => v,
std::ops::Bound::Excluded(&v) => v + 1,
std::ops::Bound::Unbounded => 0,
};
let end = match bounds.end_bound() {
std::ops::Bound::Included(&v) => v + 1,
std::ops::Bound::Excluded(&v) => v,
std::ops::Bound::Unbounded => u64::MAX,
};
start..end
}
fn using_old_header(number: u64, header: &[u8]) -> bool {
let deserialized_old = is_old_header(header);
let deserialized_new = is_new_header(header);
assert!(
deserialized_old ^ deserialized_new,
"Header is not valid: {} {}\ndeserialized_old: {}\ndeserialized_new: {}",
number,
header.encode_hex(),
deserialized_old,
deserialized_new
);
deserialized_old && !deserialized_new
}

View File

@ -2,38 +2,38 @@ use crate::{
chainspec::HlChainSpec,
node::{
pool::HlPoolBuilder,
primitives::{BlockBody, HlBlock, HlBlockBody, HlPrimitives, TransactionSigned},
primitives::{HlBlock, HlPrimitives},
rpc::{
HlEthApiBuilder,
engine_api::{
builder::HlEngineApiBuilder, payload::HlPayloadTypes,
validator::HlEngineValidatorBuilder,
validator::HlPayloadValidatorBuilder,
},
HlEthApiBuilder,
},
storage::HlStorage,
},
pseudo_peer::BlockSourceConfig,
};
use consensus::HlConsensusBuilder;
use engine::HlPayloadServiceBuilder;
use evm::HlExecutorBuilder;
use network::HlNetworkBuilder;
use reth::{
api::{FullNodeComponents, FullNodeTypes, NodeTypes},
api::{FullNodeTypes, NodeTypes},
builder::{
components::ComponentsBuilder, rpc::RpcAddOns, DebugNode, Node, NodeAdapter,
NodeComponentsBuilder,
Node, NodeAdapter,
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
rpc::RpcAddOns,
},
};
use reth_engine_primitives::BeaconConsensusEngineHandle;
use reth_trie_db::MerklePatriciaTrie;
use std::sync::Arc;
use tokio::sync::{oneshot, Mutex};
use reth_engine_primitives::ConsensusEngineHandle;
use std::{marker::PhantomData, sync::Arc};
use tokio::sync::{Mutex, oneshot};
pub mod cli;
pub mod consensus;
pub mod engine;
pub mod evm;
pub mod migrate;
pub mod network;
pub mod primitives;
pub mod rpc;
@ -43,28 +43,27 @@ pub mod types;
/// Hl addons configuring RPC types
pub type HlNodeAddOns<N> =
RpcAddOns<N, HlEthApiBuilder, HlEngineValidatorBuilder, HlEngineApiBuilder>;
RpcAddOns<N, HlEthApiBuilder, HlPayloadValidatorBuilder, HlEngineApiBuilder>;
/// Type configuration for a regular Hl node.
#[derive(Debug, Clone)]
pub struct HlNode {
engine_handle_rx:
Arc<Mutex<Option<oneshot::Receiver<BeaconConsensusEngineHandle<HlPayloadTypes>>>>>,
engine_handle_rx: Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
block_source_config: BlockSourceConfig,
hl_node_compliant: bool,
debug_cutoff_height: Option<u64>,
}
impl HlNode {
pub fn new(
block_source_config: BlockSourceConfig,
hl_node_compliant: bool,
) -> (Self, oneshot::Sender<BeaconConsensusEngineHandle<HlPayloadTypes>>) {
debug_cutoff_height: Option<u64>,
) -> (Self, oneshot::Sender<ConsensusEngineHandle<HlPayloadTypes>>) {
let (tx, rx) = oneshot::channel();
(
Self {
engine_handle_rx: Arc::new(Mutex::new(Some(rx))),
block_source_config,
hl_node_compliant,
debug_cutoff_height,
},
tx,
)
@ -79,7 +78,7 @@ impl HlNode {
) -> ComponentsBuilder<
Node,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,
@ -91,10 +90,11 @@ impl HlNode {
.node_types::<Node>()
.pool(HlPoolBuilder)
.executor(HlExecutorBuilder::default())
.payload(HlPayloadServiceBuilder::default())
.payload(NoopPayloadServiceBuilder::default())
.network(HlNetworkBuilder {
engine_handle_rx: self.engine_handle_rx.clone(),
block_source_config: self.block_source_config.clone(),
debug_cutoff_height: self.debug_cutoff_height,
})
.consensus(HlConsensusBuilder::default())
}
@ -103,7 +103,6 @@ impl HlNode {
impl NodeTypes for HlNode {
type Primitives = HlPrimitives;
type ChainSpec = HlChainSpec;
type StateCommitment = MerklePatriciaTrie;
type Storage = HlStorage;
type Payload = HlPayloadTypes;
}
@ -115,15 +114,13 @@ where
type ComponentsBuilder = ComponentsBuilder<
N,
HlPoolBuilder,
HlPayloadServiceBuilder,
NoopPayloadServiceBuilder,
HlNetworkBuilder,
HlExecutorBuilder,
HlConsensusBuilder,
>;
type AddOns = HlNodeAddOns<
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
>;
type AddOns = HlNodeAddOns<NodeAdapter<N>>;
fn components_builder(&self) -> Self::ComponentsBuilder {
Self::components(self)
@ -131,37 +128,11 @@ where
fn add_ons(&self) -> Self::AddOns {
HlNodeAddOns::new(
HlEthApiBuilder { hl_node_compliant: self.hl_node_compliant },
HlEthApiBuilder { _nt: PhantomData },
Default::default(),
Default::default(),
Default::default(),
Default::default(),
)
}
}
impl<N> DebugNode<N> for HlNode
where
N: FullNodeComponents<Types = Self>,
{
type RpcBlock = alloy_rpc_types::Block;
fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> HlBlock {
let alloy_rpc_types::Block { header, transactions, withdrawals, .. } = rpc_block;
HlBlock {
header: header.inner,
body: HlBlockBody {
inner: BlockBody {
transactions: transactions
.into_transactions()
.map(|tx| TransactionSigned::Default(tx.inner.into_inner().into()))
.collect(),
ommers: Default::default(),
withdrawals,
},
sidecars: None,
read_precompile_calls: None,
highest_precompile_address: None,
},
}
}
}

View File

@ -8,7 +8,7 @@ use reth_primitives::NodePrimitives;
use service::{BlockMsg, ImportEvent, Outcome};
use std::{
fmt,
task::{ready, Context, Poll},
task::{Context, Poll, ready},
};
use crate::node::network::HlNewBlock;

View File

@ -1,18 +1,18 @@
use super::handle::ImportHandle;
use crate::{
HlBlock, HlBlockBody,
consensus::HlConsensus,
node::{
network::HlNewBlock,
rpc::engine_api::payload::HlPayloadTypes,
types::{BlockAndReceipts, EvmBlock},
},
HlBlock, HlBlockBody,
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::U128;
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
use futures::{future::Either, stream::FuturesUnordered, StreamExt};
use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes};
use futures::{StreamExt, future::Either, stream::FuturesUnordered};
use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes};
use reth_eth_wire::NewBlock;
use reth_network::{
import::{BlockImportError, BlockImportEvent, BlockImportOutcome, BlockValidation},
@ -55,7 +55,7 @@ where
Provider: BlockNumReader + Clone,
{
/// The handle to communicate with the engine service
engine: BeaconConsensusEngineHandle<HlPayloadTypes>,
engine: ConsensusEngineHandle<HlPayloadTypes>,
/// The consensus implementation
consensus: Arc<HlConsensus<Provider>>,
/// Receive the new block from the network
@ -73,7 +73,7 @@ where
/// Create a new block import service
pub fn new(
consensus: Arc<HlConsensus<Provider>>,
engine: BeaconConsensusEngineHandle<HlPayloadTypes>,
engine: ConsensusEngineHandle<HlPayloadTypes>,
from_network: UnboundedReceiver<IncomingBlock>,
to_network: UnboundedSender<ImportEvent>,
) -> Self {
@ -89,7 +89,6 @@ where
/// Process a new payload and return the outcome
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
let engine = self.engine.clone();
Box::pin(async move {
let sealed_block = block.block.0.block.clone().seal();
let payload = HlPayloadTypes::block_to_payload(sealed_block);
@ -107,7 +106,7 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
@ -117,15 +116,10 @@ where
let engine = self.engine.clone();
let consensus = self.consensus.clone();
let sealed_block = block.block.0.block.clone().seal();
let hash = sealed_block.hash();
let number = sealed_block.number();
let (hash, number) = (sealed_block.hash(), sealed_block.number());
Box::pin(async move {
let (head_block_hash, current_hash) = match consensus.canonical_head(hash, number) {
Ok(hash) => hash,
Err(_) => return None,
};
let (head_block_hash, _) = consensus.canonical_head(hash, number).ok()?;
let state = ForkchoiceState {
head_block_hash,
safe_block_hash: head_block_hash,
@ -146,18 +140,15 @@ where
.into(),
_ => None,
},
Err(err) => None,
Err(_) => None,
}
})
}
/// Add a new block import task to the pending imports
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
let payload_fut = self.new_payload(block.clone(), peer_id);
self.pending_imports.push(payload_fut);
let fcu_fut = self.update_fork_choice(block, peer_id);
self.pending_imports.push(fcu_fut);
self.pending_imports.push(self.new_payload(block.clone(), peer_id));
self.pending_imports.push(self.update_fork_choice(block, peer_id));
}
}
@ -176,37 +167,19 @@ where
}
// Process completed imports and send events to network
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
if let Some(outcome) = outcome {
while let Poll::Ready(Some(Some(outcome))) = this.pending_imports.poll_next_unpin(cx) {
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
return Poll::Ready(Err(Box::new(e)));
}
}
}
Poll::Pending
}
}
pub(crate) fn collect_block(height: u64) -> Option<BlockAndReceipts> {
let ingest_dir = "/home/user/personal/evm-blocks";
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{ingest_dir}/{f}/{s}/{height}.rmp.lz4");
if std::path::Path::new(&path).exists() {
let file = std::fs::File::open(path).unwrap();
let file = std::io::BufReader::new(file);
let mut decoder = lz4_flex::frame::FrameDecoder::new(file);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder).unwrap();
Some(blocks[0].clone())
} else {
None
}
}
#[cfg(test)]
mod tests {
use crate::chainspec::hl::hl_mainnet;
use crate::{chainspec::hl::hl_mainnet, HlHeader};
use super::*;
use alloy_primitives::{B256, U128};
@ -277,15 +250,12 @@ mod tests {
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
unimplemented!()
}
fn best_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn last_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
Ok(None)
}
@ -295,7 +265,6 @@ mod tests {
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
Ok(Some(B256::ZERO))
}
fn canonical_hashes_range(
&self,
_start: u64,
@ -315,14 +284,12 @@ mod tests {
fn both_valid() -> Self {
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
}
fn invalid_new_payload() -> Self {
Self {
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
fcu: PayloadStatusEnum::Valid,
}
}
fn invalid_fcu() -> Self {
Self {
new_payload: PayloadStatusEnum::Valid,
@ -341,20 +308,16 @@ mod tests {
async fn new(responses: EngineResponses) -> Self {
let consensus = Arc::new(HlConsensus { provider: MockProvider });
let (to_engine, from_engine) = mpsc::unbounded_channel();
let engine_handle = BeaconConsensusEngineHandle::new(to_engine);
let engine_handle = ConsensusEngineHandle::new(to_engine);
handle_engine_msg(from_engine, responses).await;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
tokio::spawn(Box::pin(async move {
service.await.unwrap();
}));
Self { handle }
}
@ -392,7 +355,7 @@ mod tests {
/// Creates a test block message
fn create_test_block() -> NewBlockMessage<HlNewBlock> {
let block = HlBlock {
header: Header::default(),
header: HlHeader::default(),
body: HlBlockBody {
inner: BlockBody {
transactions: Vec::new(),

View File

@ -1,25 +1,24 @@
#![allow(clippy::owned_cow)]
use crate::{
HlBlock,
consensus::HlConsensus,
node::{
network::block_import::{handle::ImportHandle, service::ImportService, HlBlockImport},
HlNode,
network::block_import::{HlBlockImport, handle::ImportHandle, service::ImportService},
primitives::HlPrimitives,
rpc::engine_api::payload::HlPayloadTypes,
types::ReadPrecompileCalls,
HlNode,
},
pseudo_peer::{start_pseudo_peer, BlockSourceConfig},
HlBlock,
pseudo_peer::{BlockSourceConfig, start_pseudo_peer},
};
use alloy_rlp::{Decodable, Encodable};
// use handshake::HlHandshake;
use reth::{
api::{FullNodeTypes, TxTy},
builder::{components::NetworkBuilder, BuilderContext},
builder::{BuilderContext, components::NetworkBuilder},
transaction_pool::{PoolTransaction, TransactionPool},
};
use reth_discv4::NodeRecord;
use reth_engine_primitives::BeaconConsensusEngineHandle;
use reth_engine_primitives::ConsensusEngineHandle;
use reth_eth_wire::{BasicNetworkPrimitives, NewBlock, NewBlockPayload};
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_network::{NetworkConfig, NetworkHandle, NetworkManager};
@ -27,12 +26,11 @@ use reth_network_api::PeersInfo;
use reth_provider::StageCheckpointReader;
use reth_stages_types::StageId;
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::sync::{Mutex, mpsc, oneshot};
use tracing::info;
pub mod block_import;
// pub mod handshake;
// pub(crate) mod upgrade_status;
/// HL `NewBlock` message value.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct HlNewBlock(pub NewBlock<HlBlock>);
@ -40,10 +38,10 @@ pub struct HlNewBlock(pub NewBlock<HlBlock>);
mod rlp {
use super::*;
use crate::{
HlBlockBody, HlHeader,
node::primitives::{BlockBody, TransactionSigned},
HlBlockBody,
};
use alloy_consensus::{BlobTransactionSidecar, Header};
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::{Address, U128};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use alloy_rpc_types::Withdrawals;
@ -52,9 +50,9 @@ mod rlp {
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockHelper<'a> {
header: Cow<'a, Header>,
header: Cow<'a, HlHeader>,
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>,
ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
}
@ -70,32 +68,22 @@ mod rlp {
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
fn from(value: &'a HlNewBlock) -> Self {
let HlNewBlock(NewBlock {
block:
HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
},
td,
}) = value;
let b = &value.0.block;
Self {
block: BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
header: Cow::Borrowed(&b.header),
transactions: Cow::Borrowed(&b.body.inner.transactions),
ommers: Cow::Borrowed(&b.body.inner.ommers),
withdrawals: b.body.inner.withdrawals.as_ref().map(Cow::Borrowed),
},
td: *td,
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
td: value.0.td,
sidecars: b.body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: b.body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: b
.body
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
}
@ -112,30 +100,24 @@ mod rlp {
impl Decodable for HlNewBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let HlNewBlockHelper {
block: BlockHelper { header, transactions, ommers, withdrawals },
td,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = HlNewBlockHelper::decode(buf)?;
let h = HlNewBlockHelper::decode(buf)?;
Ok(HlNewBlock(NewBlock {
block: HlBlock {
header: header.into_owned(),
header: h.block.header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
transactions: h.block.transactions.into_owned(),
ommers: h.block.ommers.into_owned(),
withdrawals: h.block.withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address
sidecars: h.sidecars.map(|s| s.into_owned()),
read_precompile_calls: h.read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: h
.highest_precompile_address
.map(|s| s.into_owned()),
},
},
td,
td: h.td,
}))
}
}
@ -157,9 +139,11 @@ pub type HlNetworkPrimitives =
#[derive(Debug)]
pub struct HlNetworkBuilder {
pub(crate) engine_handle_rx:
Arc<Mutex<Option<oneshot::Receiver<BeaconConsensusEngineHandle<HlPayloadTypes>>>>>,
Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
pub(crate) block_source_config: BlockSourceConfig,
pub(crate) debug_cutoff_height: Option<u64>,
}
impl HlNetworkBuilder {
@ -173,41 +157,32 @@ impl HlNetworkBuilder {
where
Node: FullNodeTypes<Types = HlNode>,
{
let Self { engine_handle_rx, .. } = self;
let network_builder = ctx.network_config_builder()?;
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
let handle = ImportHandle::new(to_import, import_outcome);
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
ctx.task_executor().spawn_critical("block import", async move {
let handle = engine_handle_rx
let handle = self
.engine_handle_rx
.lock()
.await
.take()
.expect("node should only be launched once")
.await
.unwrap();
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
});
let network_builder = network_builder
Ok(ctx.build_network_config(
ctx.network_config_builder()?
.disable_dns_discovery()
.disable_nat()
.boot_nodes(boot_nodes())
.set_head(ctx.head())
.with_pow()
.block_import(Box::new(HlBlockImport::new(handle)));
// .discovery(discv4)
// .eth_rlpx_handshake(Arc::new(HlHandshake::default()));
let network_config = ctx.build_network_config(network_builder);
Ok(network_config)
.block_import(Box::new(HlBlockImport::new(handle))),
))
}
}
@ -230,21 +205,29 @@ where
pool: Pool,
) -> eyre::Result<Self::Network> {
let block_source_config = self.block_source_config.clone();
let network_config = self.network_config(ctx)?;
let network = NetworkManager::builder(network_config).await?;
let handle = ctx.start_network(network, pool);
let debug_cutoff_height = self.debug_cutoff_height;
let handle =
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
let local_node_record = handle.local_node_record();
let chain_spec = ctx.chain_spec();
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
let next_block_number =
ctx.provider().get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number
+ 1;
let next_block_number = ctx
.provider()
.get_stage_checkpoint(StageId::Finish)?
.unwrap_or_default()
.block_number +
1;
let chain_spec = ctx.chain_spec();
ctx.task_executor().spawn_critical("pseudo peer", async move {
let block_source =
block_source_config.create_cached_block_source(next_block_number).await;
start_pseudo_peer(chain_spec, local_node_record.to_string(), block_source)
start_pseudo_peer(
chain_spec.clone(),
local_node_record.to_string(),
block_source_config
.create_cached_block_source((*chain_spec).clone(), next_block_number)
.await,
debug_cutoff_height,
)
.await
.unwrap();
});

View File

@ -6,47 +6,34 @@
//! Ethereum transaction pool only supports TransactionSigned (EthereumTxEnvelope<TxEip4844>),
//! hence this placeholder for the transaction pool.
use crate::node::{primitives::TransactionSigned, HlNode};
use crate::node::{HlNode, primitives::TransactionSigned};
use alloy_consensus::{
error::ValueError, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844,
EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844, error::ValueError,
};
use alloy_eips::{
eip4844::BlobAndProofV2, eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization,
Typed2718,
};
use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256};
use alloy_eips::{Typed2718, eip7702::SignedAuthorization};
use alloy_primitives::{Address, B256, Bytes, ChainId, TxHash, TxKind, U256};
use alloy_rpc_types::AccessList;
use alloy_rpc_types_engine::BlobAndProofV1;
use reth::{
api::FullNodeTypes,
builder::components::PoolBuilder,
transaction_pool::{PoolResult, PoolSize, PoolTransaction, TransactionOrigin, TransactionPool},
api::FullNodeTypes, builder::components::PoolBuilder, transaction_pool::PoolTransaction,
};
use reth_eth_wire::HandleMempoolData;
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_primitives::Recovered;
use reth_primitives_traits::InMemorySize;
use reth_transaction_pool::{
error::InvalidPoolTransactionError, AllPoolTransactions, AllTransactionsEvents,
BestTransactions, BestTransactionsAttributes, BlobStoreError, BlockInfo, EthPoolTransaction,
GetPooledTransactionLimit, NewBlobSidecar, NewTransactionEvent, PropagatedTransactions,
TransactionEvents, TransactionListenerKind, ValidPoolTransaction,
};
use std::{collections::HashSet, sync::Arc};
use tokio::sync::mpsc::{self, Receiver};
use reth_transaction_pool::{EthPoolTransaction, noop::NoopTransactionPool};
use std::sync::Arc;
pub struct HlPoolBuilder;
impl<Node> PoolBuilder<Node> for HlPoolBuilder
where
Node: FullNodeTypes<Types = HlNode>,
{
type Pool = HlTransactionPool;
type Pool = NoopTransactionPool<HlPooledTransaction>;
async fn build_pool(
self,
_ctx: &reth::builder::BuilderContext<Node>,
) -> eyre::Result<Self::Pool> {
Ok(HlTransactionPool)
Ok(NoopTransactionPool::new())
}
}
@ -124,16 +111,6 @@ impl PoolTransaction for HlPooledTransaction {
type Consensus = TransactionSigned;
type Pooled = PooledTransactionVariant;
fn try_from_consensus(
_tx: Recovered<Self::Consensus>,
) -> Result<Self, Self::TryFromConsensusError> {
unreachable!()
}
fn clone_into_consensus(&self) -> Recovered<Self::Consensus> {
unreachable!()
}
fn into_consensus(self) -> Recovered<Self::Consensus> {
unreachable!()
}
@ -161,13 +138,6 @@ impl PoolTransaction for HlPooledTransaction {
fn encoded_length(&self) -> usize {
0
}
fn ensure_max_init_code_size(
&self,
_max_init_code_size: usize,
) -> Result<(), InvalidPoolTransactionError> {
Ok(())
}
}
impl EthPoolTransaction for HlPooledTransaction {
@ -197,243 +167,3 @@ impl EthPoolTransaction for HlPooledTransaction {
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct HlTransactionPool;
impl TransactionPool for HlTransactionPool {
type Transaction = HlPooledTransaction;
fn pool_size(&self) -> PoolSize {
PoolSize::default()
}
fn block_info(&self) -> BlockInfo {
BlockInfo::default()
}
async fn add_transaction_and_subscribe(
&self,
_origin: TransactionOrigin,
_transaction: Self::Transaction,
) -> PoolResult<TransactionEvents> {
unreachable!()
}
async fn add_transaction(
&self,
_origin: TransactionOrigin,
_transaction: Self::Transaction,
) -> PoolResult<TxHash> {
Ok(TxHash::default())
}
async fn add_transactions(
&self,
_origin: TransactionOrigin,
_transactions: Vec<Self::Transaction>,
) -> Vec<PoolResult<TxHash>> {
vec![]
}
fn transaction_event_listener(&self, _tx_hash: TxHash) -> Option<TransactionEvents> {
None
}
fn all_transactions_event_listener(&self) -> AllTransactionsEvents<Self::Transaction> {
unreachable!()
}
fn pending_transactions_listener_for(
&self,
_kind: TransactionListenerKind,
) -> Receiver<TxHash> {
mpsc::channel(1).1
}
fn blob_transaction_sidecars_listener(&self) -> Receiver<NewBlobSidecar> {
mpsc::channel(1).1
}
fn new_transactions_listener_for(
&self,
_kind: TransactionListenerKind,
) -> Receiver<NewTransactionEvent<Self::Transaction>> {
mpsc::channel(1).1
}
fn pooled_transaction_hashes(&self) -> Vec<TxHash> {
vec![]
}
fn pooled_transaction_hashes_max(&self, _max: usize) -> Vec<TxHash> {
vec![]
}
fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn pooled_transactions_max(
&self,
_max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pooled_transaction_elements(
&self,
_tx_hashes: Vec<TxHash>,
_limit: GetPooledTransactionLimit,
) -> Vec<<Self::Transaction as PoolTransaction>::Pooled> {
vec![]
}
fn get_pooled_transaction_element(
&self,
_tx_hash: TxHash,
) -> Option<Recovered<<Self::Transaction as PoolTransaction>::Pooled>> {
None
}
fn best_transactions(
&self,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
Box::new(std::iter::empty())
}
fn best_transactions_with_attributes(
&self,
_best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
Box::new(std::iter::empty())
}
fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn pending_transactions_max(
&self,
_max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn all_transactions(&self) -> AllPoolTransactions<Self::Transaction> {
AllPoolTransactions::default()
}
fn remove_transactions(
&self,
_hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn remove_transactions_and_descendants(
&self,
_hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn remove_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn retain_unknown<A>(&self, _announcement: &mut A)
where
A: HandleMempoolData,
{
// do nothing
}
fn get(&self, _tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_all(&self, _txs: Vec<TxHash>) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn on_propagated(&self, _txs: PropagatedTransactions) {
// do nothing
}
fn get_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pending_transactions_with_predicate(
&self,
_predicate: impl FnMut(&ValidPoolTransaction<Self::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pending_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_queued_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
unreachable!()
}
fn get_highest_transaction_by_sender(
&self,
_sender: Address,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_highest_consecutive_transaction_by_sender(
&self,
_sender: Address,
_on_chain_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_transaction_by_sender_and_nonce(
&self,
_sender: Address,
_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_transactions_by_origin(
&self,
_origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
unreachable!()
}
fn get_pending_transactions_by_origin(
&self,
_origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
unreachable!()
}
fn unique_senders(&self) -> HashSet<Address> {
unreachable!()
}
fn get_blob(
&self,
_tx_hash: TxHash,
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
unreachable!()
}
fn get_all_blobs(
&self,
_tx_hashes: Vec<TxHash>,
) -> Result<Vec<(TxHash, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
unreachable!()
}
fn get_all_blobs_exact(
&self,
_tx_hashes: Vec<TxHash>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
unreachable!()
}
fn get_blobs_for_versioned_hashes_v1(
&self,
_versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
unreachable!()
}
fn get_blobs_for_versioned_hashes_v2(
&self,
_versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
unreachable!()
}
}

View File

@ -0,0 +1,49 @@
use super::{HlBlockBody, HlHeader, rlp};
use alloy_rlp::Encodable;
use reth_primitives_traits::{Block, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: HlHeader,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = HlHeader;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}

View File

@ -0,0 +1,77 @@
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::{BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
use crate::{HlHeader, node::primitives::TransactionSigned};
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned, HlHeader>;
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size()
+ self
.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>())
+ self
.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = super::HlHeader;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}

View File

@ -0,0 +1,241 @@
use alloy_consensus::Header;
use alloy_primitives::{Address, B64, B256, BlockNumber, Bloom, Bytes, Sealable, U256};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use reth_cli_commands::common::CliHeader;
use reth_codecs::Compact;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives::{SealedHeader, logs_bloom};
use reth_primitives_traits::{BlockHeader, InMemorySize, serde_bincode_compat::RlpBincode};
use reth_rpc_convert::transaction::FromConsensusHeader;
use serde::{Deserialize, Serialize};
/// The header type of this node
///
/// This type extends the regular ethereum header with an extension.
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
derive_more::AsRef,
derive_more::Deref,
Default,
RlpEncodable,
RlpDecodable,
Serialize,
Deserialize,
)]
#[serde(rename_all = "camelCase")]
pub struct HlHeader {
/// The regular eth header
#[as_ref]
#[deref]
pub inner: Header,
/// The extended header fields that is not part of the block hash
pub extras: HlHeaderExtras,
}
#[derive(
Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Hash,
)]
pub struct HlHeaderExtras {
pub logs_bloom_with_system_txs: Bloom,
pub system_tx_count: u64,
}
impl HlHeader {
pub(crate) fn from_ethereum_header(header: Header, receipts: &[EthereumReceipt], system_tx_count: u64) -> HlHeader {
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs));
HlHeader {
inner: header,
extras: HlHeaderExtras { logs_bloom_with_system_txs: logs_bloom, system_tx_count },
}
}
}
impl From<Header> for HlHeader {
fn from(_value: Header) -> Self {
unreachable!()
}
}
impl AsRef<Self> for HlHeader {
fn as_ref(&self) -> &Self {
self
}
}
impl Sealable for HlHeader {
fn hash_slow(&self) -> B256 {
self.inner.hash_slow()
}
}
impl alloy_consensus::BlockHeader for HlHeader {
fn parent_hash(&self) -> B256 {
self.inner.parent_hash()
}
fn ommers_hash(&self) -> B256 {
self.inner.ommers_hash()
}
fn beneficiary(&self) -> Address {
self.inner.beneficiary()
}
fn state_root(&self) -> B256 {
self.inner.state_root()
}
fn transactions_root(&self) -> B256 {
self.inner.transactions_root()
}
fn receipts_root(&self) -> B256 {
self.inner.receipts_root()
}
fn withdrawals_root(&self) -> Option<B256> {
self.inner.withdrawals_root()
}
fn logs_bloom(&self) -> Bloom {
self.extras.logs_bloom_with_system_txs
}
fn difficulty(&self) -> U256 {
self.inner.difficulty()
}
fn number(&self) -> BlockNumber {
self.inner.number()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_used(&self) -> u64 {
self.inner.gas_used()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn mix_hash(&self) -> Option<B256> {
self.inner.mix_hash()
}
fn nonce(&self) -> Option<B64> {
self.inner.nonce()
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.inner.base_fee_per_gas()
}
fn blob_gas_used(&self) -> Option<u64> {
self.inner.blob_gas_used()
}
fn excess_blob_gas(&self) -> Option<u64> {
self.inner.excess_blob_gas()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.inner.parent_beacon_block_root()
}
fn requests_hash(&self) -> Option<B256> {
self.inner.requests_hash()
}
fn extra_data(&self) -> &Bytes {
self.inner.extra_data()
}
fn is_empty(&self) -> bool {
self.extras.system_tx_count == 0 && self.inner.is_empty()
}
}
impl InMemorySize for HlHeader {
fn size(&self) -> usize {
self.inner.size() + self.extras.size()
}
}
impl InMemorySize for HlHeaderExtras {
fn size(&self) -> usize {
self.logs_bloom_with_system_txs.data().len() + self.system_tx_count.size()
}
}
impl reth_codecs::Compact for HlHeader {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: alloy_rlp::bytes::BufMut + AsMut<[u8]>,
{
// Because Header ends with extra_data which is `Bytes`, we can't use to_compact for extras,
// because Compact trait requires the Bytes field to be placed at the end of the struct.
// Bytes::from_compact just reads all trailing data as the Bytes field.
//
// Hence we need to use other form of serialization, since extra headers are not Compact-compatible.
// We just treat all header fields as rmp-serialized one `Bytes` field.
let result: Bytes = rmp_serde::to_vec(&self).unwrap().into();
result.to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (bytes, remaining) = Bytes::from_compact(buf, len);
let header: HlHeader = rmp_serde::from_slice(&bytes).unwrap();
(header, remaining)
}
}
impl reth_db_api::table::Compress for HlHeader {
type Compressed = Vec<u8>;
fn compress_to_buf<B: alloy_primitives::bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl reth_db_api::table::Decompress for HlHeader {
fn decompress(value: &[u8]) -> Result<Self, reth_db_api::DatabaseError> {
let (obj, _) = Compact::from_compact(value, value.len());
Ok(obj)
}
}
impl BlockHeader for HlHeader {}
impl RlpBincode for HlHeader {}
impl CliHeader for HlHeader {
fn set_number(&mut self, number: u64) {
self.inner.set_number(number);
}
}
impl From<HlHeader> for Header {
fn from(value: HlHeader) -> Self {
value.inner
}
}
pub fn to_ethereum_ommers(ommers: &[HlHeader]) -> Vec<Header> {
ommers.iter().map(|ommer| ommer.clone().into()).collect()
}
impl FromConsensusHeader<HlHeader> for alloy_rpc_types::Header {
fn from_consensus_header(header: SealedHeader<HlHeader>, block_size: usize) -> Self {
FromConsensusHeader::<Header>::from_consensus_header(
SealedHeader::<Header>::new(header.inner.clone(), header.hash()),
block_size,
)
}
}

View File

@ -1,17 +1,18 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::{BlobTransactionSidecar, Header};
use alloy_primitives::Address;
use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable};
use reth_ethereum_primitives::Receipt;
use reth_primitives::NodePrimitives;
use reth_primitives_traits::{Block, BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
pub mod transaction;
pub use transaction::TransactionSigned;
pub mod tx_wrapper;
pub use tx_wrapper::{BlockBody, TransactionSigned};
pub mod block;
pub use block::HlBlock;
pub mod body;
pub use body::{BlockBody, HlBlockBody};
pub mod header;
pub use header::HlHeader;
pub mod rlp;
pub mod serde_bincode_compat;
/// Primitive types for HyperEVM.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
@ -20,332 +21,8 @@ pub struct HlPrimitives;
impl NodePrimitives for HlPrimitives {
type Block = HlBlock;
type BlockHeader = Header;
type BlockHeader = HlHeader;
type BlockBody = HlBlockBody;
type SignedTx = TransactionSigned;
type Receipt = Receipt;
}
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size() +
self.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>()) +
self.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = Header;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: Header,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = Header;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}
mod rlp {
use super::*;
use alloy_eips::eip4895::Withdrawals;
use alloy_rlp::Decodable;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, Header>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<Header>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}
}
pub mod serde_bincode_compat {
use super::*;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, Header>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: Header::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}
}

142
src/node/primitives/rlp.rs Normal file
View File

@ -0,0 +1,142 @@
#![allow(clippy::owned_cow)]
use super::{HlBlock, HlBlockBody, TransactionSigned};
use crate::{node::types::ReadPrecompileCalls, HlHeader};
use alloy_consensus::{BlobTransactionSidecar, BlockBody};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::Address;
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use std::borrow::Cow;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, HlHeader>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<HlHeader>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}

View File

@ -0,0 +1,64 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use super::{HlBlock, HlBlockBody};
use crate::{node::{primitives::BlockBody, types::ReadPrecompileCalls}, HlHeader};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, HlHeader>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: HlHeader::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}

View File

@ -1,32 +1,35 @@
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
//! except that it supports pseudo signer for system transactions.
use std::convert::Infallible;
use crate::evm::transaction::HlTxEnv;
use alloy_consensus::{
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, SignableTransaction, Signed,
Transaction as TransactionTrait, TransactionEnvelope, TxEip1559, TxEip2930, TxEip4844,
TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction,
SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559,
TxEip2930, TxEip4844, TxEip7702, TxLegacy, TxType, TypedTransaction, crypto::RecoveryError,
error::ValueError, transaction::TxHashRef,
};
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718};
use alloy_primitives::{address, Address, TxHash, U256};
use alloy_eips::Encodable2718;
use alloy_network::TxSigner;
use alloy_primitives::{Address, TxHash, U256, address};
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
use alloy_signer::Signature;
use reth_codecs::alloy::transaction::FromTxCompact;
use reth_codecs::alloy::transaction::{Envelope, FromTxCompact};
use reth_db::{
table::{Compress, Decompress},
DatabaseError,
table::{Compress, Decompress},
};
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_evm::FromRecoveredTx;
use reth_primitives::Recovered;
use reth_primitives_traits::{
serde_bincode_compat::SerdeBincodeCompat, InMemorySize, SignedTransaction, SignerRecoverable,
InMemorySize, SignedTransaction, SignerRecoverable, serde_bincode_compat::SerdeBincodeCompat,
};
use reth_rpc_eth_api::{
EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx,
transaction::{FromConsensusTx, TryIntoTxEnv},
EthTxEnvError, TryIntoSimTx,
};
use revm::context::{BlockEnv, CfgEnv, TxEnv};
use crate::evm::transaction::HlTxEnv;
type InnerType = alloy_consensus::EthereumTxEnvelope<TxEip4844>;
#[derive(Debug, Clone, TransactionEnvelope)]
@ -45,6 +48,12 @@ fn s_to_address(s: U256) -> Address {
Address::from_slice(&buf)
}
impl TxHashRef for TransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
}
impl SignerRecoverable for TransactionSigned {
fn recover_signer(&self) -> Result<Address, RecoveryError> {
if self.is_system_transaction() {
@ -59,24 +68,17 @@ impl SignerRecoverable for TransactionSigned {
}
self.inner().recover_signer_unchecked()
}
}
impl SignedTransaction for TransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
fn recover_signer_unchecked_with_buf(
&self,
buf: &mut Vec<u8>,
) -> Result<Address, RecoveryError> {
fn recover_unchecked_with_buf(&self, buf: &mut Vec<u8>) -> Result<Address, RecoveryError> {
if self.is_system_transaction() {
return Ok(s_to_address(self.signature().s()));
}
self.inner().recover_signer_unchecked_with_buf(buf)
self.inner().recover_unchecked_with_buf(buf)
}
}
impl SignedTransaction for TransactionSigned {}
// ------------------------------------------------------------
// NOTE: All lines below are just wrappers for the inner type.
// ------------------------------------------------------------
@ -116,11 +118,6 @@ impl reth_codecs::Compact for TransactionSigned {
}
}
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
let (tx, signer) = value.into_parts();
Recovered::new_unchecked(tx.into_inner(), signer)
}
impl FromRecoveredTx<TransactionSigned> for TxEnv {
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
TxEnv::from_recovered_tx(&tx.inner(), sender)
@ -164,16 +161,8 @@ impl TransactionSigned {
}
}
pub fn signature(&self) -> &Signature {
self.inner().signature()
}
pub const fn tx_type(&self) -> TxType {
self.inner().tx_type()
}
pub fn is_system_transaction(&self) -> bool {
self.gas_price().is_some() && self.gas_price().unwrap() == 0
matches!(self.gas_price(), Some(0))
}
}
@ -192,40 +181,16 @@ impl SerdeBincodeCompat for TransactionSigned {
}
}
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
fn from(value: TransactionSigned) -> Self {
value.into_inner()
}
}
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
impl TryFrom<TransactionSigned> for PooledTransactionVariant {
type Error = <InnerType as TryInto<PooledTransactionVariant>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl TryFrom<TransactionSigned>
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
{
type Error = <InnerType as TryInto<
EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into()
}
}
impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>>
for TransactionSigned
{
fn from(
value: EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
) -> Self {
impl From<PooledTransactionVariant> for TransactionSigned {
fn from(value: PooledTransactionVariant) -> Self {
Self::Default(value.into())
}
}
@ -233,10 +198,6 @@ impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>
impl Compress for TransactionSigned {
type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
self.into_inner().compress()
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
self.inner().compress_to_buf(buf);
}
@ -248,22 +209,6 @@ impl Decompress for TransactionSigned {
}
}
pub fn convert_to_eth_block_body(value: BlockBody) -> alloy_consensus::BlockBody<InnerType> {
alloy_consensus::BlockBody {
transactions: value.transactions.into_iter().map(|tx| tx.into_inner()).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
pub fn convert_to_hl_block_body(value: alloy_consensus::BlockBody<InnerType>) -> BlockBody {
BlockBody {
transactions: value.transactions.into_iter().map(TransactionSigned::Default).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
impl TryIntoSimTx<TransactionSigned> for TransactionRequest {
fn try_into_sim_tx(self) -> Result<TransactionSigned, ValueError<Self>> {
let tx = self
@ -291,8 +236,26 @@ impl TryIntoTxEnv<HlTxEnv<TxEnv>> for TransactionRequest {
impl FromConsensusTx<TransactionSigned> for Transaction {
type TxInfo = TransactionInfo;
type Err = Infallible;
fn from_consensus_tx(tx: TransactionSigned, signer: Address, tx_info: Self::TxInfo) -> Self {
Self::from_transaction(Recovered::new_unchecked(tx.into_inner().into(), signer), tx_info)
fn from_consensus_tx(
tx: TransactionSigned,
signer: Address,
tx_info: Self::TxInfo,
) -> Result<Self, Self::Err> {
Ok(Self::from_transaction(
Recovered::new_unchecked(tx.into_inner().into(), signer),
tx_info,
))
}
}
impl SignableTxRequest<TransactionSigned> for TransactionRequest {
async fn try_build_and_sign(
self,
signer: impl TxSigner<Signature> + Send,
) -> Result<TransactionSigned, SignTxRequestError> {
let signed = SignableTxRequest::<InnerType>::try_build_and_sign(self, signer).await?;
Ok(TransactionSigned::Default(signed))
}
}

View File

@ -1,263 +1,55 @@
use std::{future::Future, sync::Arc};
use crate::{
chainspec::HlChainSpec,
node::{
primitives::TransactionSigned,
rpc::{HlEthApi, HlNodeCore},
},
HlBlock,
};
use alloy_consensus::{BlockHeader, ReceiptEnvelope, TxType};
use alloy_primitives::B256;
use reth::{
api::NodeTypes,
builder::FullNodeComponents,
primitives::{Receipt, SealedHeader, TransactionMeta},
providers::{BlockReaderIdExt, ProviderHeader, ReceiptProvider, TransactionsProvider},
rpc::{
eth::EthApiTypes,
server_types::eth::{
error::FromEvmError, receipt::build_receipt, EthApiError, PendingBlock,
},
types::{BlockId, TransactionReceipt},
},
transaction_pool::{PoolTransaction, TransactionPool},
};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_evm::{ConfigureEvm, NextBlockEnvAttributes};
use reth_primitives::{NodePrimitives, SealedBlock};
use reth_primitives_traits::{BlockBody as _, RecoveredBlock, SignedTransaction as _};
use reth_provider::{
BlockIdReader, BlockReader, ChainSpecProvider, HeaderProvider, ProviderBlock, ProviderReceipt,
ProviderTx, StateProviderFactory,
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use reth::rpc::server_types::eth::{
EthApiError, PendingBlock, builder::config::PendingBlockKind, error::FromEvmError,
};
use reth_rpc_eth_api::{
helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking},
types::RpcTypes,
FromEthApiError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, RpcReceipt,
RpcConvert,
helpers::{
EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, pending_block::PendingEnvBuilder,
},
};
fn is_system_tx(tx: &TransactionSigned) -> bool {
tx.is_system_transaction()
}
impl<N> EthBlocks for HlEthApi<N>
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
where
Self: LoadBlock<
Error = EthApiError,
NetworkTypes: RpcTypes<Receipt = TransactionReceipt>,
Provider: BlockReader<Transaction = TransactionSigned, Receipt = Receipt>,
>,
N: HlNodeCore<Provider: ChainSpecProvider<ChainSpec = HlChainSpec> + HeaderProvider>,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
async fn block_receipts(
&self,
block_id: BlockId,
) -> Result<Option<Vec<RpcReceipt<Self::NetworkTypes>>>, Self::Error>
where
Self: LoadReceipt,
{
if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? {
let block_number = block.number();
let base_fee = block.base_fee_per_gas();
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas();
let timestamp = block.timestamp();
let blob_params = self.provider().chain_spec().blob_params_at_timestamp(timestamp);
return block
.body()
.transactions()
.iter()
.zip(receipts.iter())
.filter(|(tx, _)| !is_system_tx(tx))
.enumerate()
.map(|(idx, (tx, receipt))| {
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
build_receipt(tx, meta, receipt, &receipts, blob_params, |receipt_with_bloom| {
match receipt.tx_type {
TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom),
TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom),
TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom),
TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom),
TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom),
}
})
})
.collect::<Result<Vec<_>, Self::Error>>()
.map(Some);
}
Ok(None)
}
}
impl<N> LoadBlock for HlEthApi<N>
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
where
Self: LoadPendingBlock
+ SpawnBlocking
+ RpcNodeCoreExt<
Pool: TransactionPool<
Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>,
>,
> + RpcNodeCore<Provider: BlockReader<Block = crate::HlBlock>>,
N: HlNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn recovered_block(
&self,
block_id: BlockId,
) -> impl Future<
Output = Result<
Option<Arc<RecoveredBlock<<Self::Provider as BlockReader>::Block>>>,
Self::Error,
>,
> + Send {
let hl_node_compliant = self.hl_node_compliant;
async move {
// Copy of LoadBlock::recovered_block, but with --hl-node-compliant support
if block_id.is_pending() {
return Ok(None);
}
let block_hash = match self
.provider()
.block_hash_for_id(block_id)
.map_err(Self::Error::from_eth_err)?
{
Some(block_hash) => block_hash,
None => return Ok(None),
};
let recovered_block = self
.cache()
.get_recovered_block(block_hash)
.await
.map_err(Self::Error::from_eth_err)?;
if let Some(recovered_block) = recovered_block {
let recovered_block = if hl_node_compliant {
filter_if_hl_node_compliant(&recovered_block)
} else {
(*recovered_block).clone()
};
return Ok(Some(std::sync::Arc::new(recovered_block)));
}
Ok(None)
}
}
}
fn filter_if_hl_node_compliant(
recovered_block: &RecoveredBlock<HlBlock>,
) -> RecoveredBlock<HlBlock> {
let sealed_block = recovered_block.sealed_block();
let transactions = sealed_block.body().transactions();
let to_skip = transactions
.iter()
.position(|tx| !tx.is_system_transaction())
.unwrap_or(transactions.len());
let mut new_block: HlBlock = sealed_block.clone_block();
new_block.body.transactions.drain(..to_skip);
let new_sealed_block = SealedBlock::new_unchecked(new_block, sealed_block.hash());
let new_senders = recovered_block.senders()[to_skip..].to_vec();
RecoveredBlock::new_sealed(new_sealed_block, new_senders)
}
impl<N> LoadPendingBlock for HlEthApi<N>
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
where
Self: SpawnBlocking
+ EthApiTypes<
NetworkTypes: RpcTypes<
Header = alloy_rpc_types_eth::Header<ProviderHeader<Self::Provider>>,
>,
Error: FromEvmError<Self::Evm>,
RpcConvert: RpcConvert<Network = Self::NetworkTypes>,
>,
N: RpcNodeCore<
Provider: BlockReaderIdExt
+ ChainSpecProvider<ChainSpec: EthChainSpec + EthereumHardforks>
+ StateProviderFactory,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = ProviderTx<N::Provider>>>,
Evm: ConfigureEvm<
Primitives = <Self as RpcNodeCore>::Primitives,
NextBlockEnvCtx: From<NextBlockEnvAttributes>,
>,
Primitives: NodePrimitives<
BlockHeader = ProviderHeader<Self::Provider>,
SignedTx = ProviderTx<Self::Provider>,
Receipt = ProviderReceipt<Self::Provider>,
Block = ProviderBlock<Self::Provider>,
>,
>,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn pending_block(
&self,
) -> &tokio::sync::Mutex<
Option<PendingBlock<ProviderBlock<Self::Provider>, ProviderReceipt<Self::Provider>>>,
> {
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> {
self.inner.eth_api.pending_block()
}
fn next_env_attributes(
&self,
parent: &SealedHeader<ProviderHeader<Self::Provider>>,
) -> Result<<Self::Evm as reth_evm::ConfigureEvm>::NextBlockEnvCtx, Self::Error> {
Ok(NextBlockEnvAttributes {
timestamp: parent.timestamp().saturating_add(12),
suggested_fee_recipient: parent.beneficiary(),
prev_randao: B256::random(),
gas_limit: parent.gas_limit(),
parent_beacon_block_root: parent.parent_beacon_block_root(),
withdrawals: None,
#[inline]
fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm> {
self.inner.eth_api.pending_env_builder()
}
.into())
#[inline]
fn pending_block_kind(&self) -> PendingBlockKind {
self.inner.eth_api.pending_block_kind()
}
}
impl<N> LoadReceipt for HlEthApi<N>
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
where
Self: Send + Sync,
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>,
Self::Provider:
TransactionsProvider<Transaction = TransactionSigned> + ReceiptProvider<Receipt = Receipt>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
async fn build_transaction_receipt(
&self,
tx: TransactionSigned,
meta: TransactionMeta,
receipt: Receipt,
) -> Result<RpcReceipt<Self::NetworkTypes>, Self::Error> {
let hash = meta.block_hash;
// get all receipts for the block
let all_receipts = self
.cache()
.get_receipts(hash)
.await
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(hash.into()))?;
let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp);
build_receipt(&tx, meta, &receipt, &all_receipts, blob_params, |receipt_with_bloom| {
match receipt.tx_type {
TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom),
TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom),
TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom),
TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom),
TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom),
}
})
}
}

View File

@ -1,51 +1,45 @@
use super::{HlEthApi, HlNodeCore};
use crate::evm::transaction::HlTxEnv;
use alloy_rpc_types::TransactionRequest;
use core::fmt;
use super::{HlEthApi, HlRpcNodeCore};
use crate::{HlBlock, node::evm::apply_precompiles};
use alloy_consensus::transaction::TxHashRef;
use alloy_evm::Evm;
use alloy_primitives::B256;
use reth::rpc::server_types::eth::EthApiError;
use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor};
use reth_primitives::NodePrimitives;
use reth_provider::{ProviderError, ProviderHeader, ProviderTx};
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TxEnvFor};
use reth_primitives::{NodePrimitives, Recovered};
use reth_provider::{ProviderError, ProviderTx};
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking},
FromEvmError, FullEthApiTypes, RpcConvert, RpcTypes,
FromEvmError, RpcConvert, RpcNodeCore,
helpers::{Call, EthCall},
};
use revm::context::TxEnv;
use revm::{DatabaseCommit, context::result::ResultAndState};
impl<N> EthCall for HlEthApi<N>
impl<N> HlRpcNodeCore for N where N: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
where
Self: EstimateCall + LoadBlock + FullEthApiTypes,
N: HlNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N> EstimateCall for HlEthApi<N>
impl<N, Rpc> Call for HlEthApi<N, Rpc>
where
Self: Call,
Self::Error: From<EthApiError>,
N: HlNodeCore,
{
}
impl<N> Call for HlEthApi<N>
where
Self: LoadState<
Evm: ConfigureEvm<
Primitives: NodePrimitives<
BlockHeader = ProviderHeader<Self::Provider>,
SignedTx = ProviderTx<Self::Provider>,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
BlockExecutorFactory: BlockExecutorFactory<
EvmFactory: EvmFactory<Tx = HlTxEnv<TxEnv>>,
>,
>,
RpcConvert: RpcConvert<TxEnv = TxEnvFor<Self::Evm>, Network = Self::NetworkTypes>,
NetworkTypes: RpcTypes<TransactionRequest: From<TransactionRequest>>,
Error: FromEvmError<Self::Evm>
+ From<<Self::RpcConvert as RpcConvert>::Error>
+ From<ProviderError>,
> + SpawnBlocking,
Self::Error: From<EthApiError>,
N: HlNodeCore,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
@ -56,4 +50,75 @@ where
fn max_simulate_blocks(&self) -> u64 {
self.inner.eth_api.max_simulate_blocks()
}
fn transact<DB>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn transact_with_inspector<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn replay_transactions_until<'a, DB, I>(
&self,
db: &mut DB,
evm_env: EvmEnvFor<Self::Evm>,
transactions: I,
target_tx_hash: B256,
) -> Result<usize, Self::Error>
where
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let mut index = 0;
for tx in transactions {
if *tx.tx_hash() == target_tx_hash {
// reached the target transaction
break;
}
let tx_env = self.evm_config().tx_env(tx);
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
index += 1;
}
Ok(index)
}
}

View File

@ -6,20 +6,15 @@ use crate::{
use alloy_consensus::BlockHeader;
use alloy_eips::eip4895::Withdrawal;
use alloy_primitives::B256;
use alloy_rpc_types_engine::{PayloadAttributes, PayloadError};
use alloy_rpc_types_engine::PayloadError;
use reth::{
api::{FullNodeComponents, NodeTypes},
builder::{rpc::EngineValidatorBuilder, AddOnsContext},
consensus::ConsensusError,
};
use reth_engine_primitives::{EngineValidator, ExecutionPayload, PayloadValidator};
use reth_payload_primitives::{
EngineApiMessageVersion, EngineObjectValidationError, NewPayloadError, PayloadOrAttributes,
PayloadTypes,
builder::{AddOnsContext, rpc::PayloadValidatorBuilder},
};
use reth_engine_primitives::{ExecutionPayload, PayloadValidator};
use reth_payload_primitives::NewPayloadError;
use reth_primitives::{RecoveredBlock, SealedBlock};
use reth_primitives_traits::Block as _;
use reth_trie_common::HashedPostState;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
@ -27,27 +22,27 @@ use super::payload::HlPayloadTypes;
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct HlEngineValidatorBuilder;
pub struct HlPayloadValidatorBuilder;
impl<Node, Types> EngineValidatorBuilder<Node> for HlEngineValidatorBuilder
impl<Node, Types> PayloadValidatorBuilder<Node> for HlPayloadValidatorBuilder
where
Types: NodeTypes<ChainSpec = HlChainSpec, Payload = HlPayloadTypes, Primitives = HlPrimitives>,
Node: FullNodeComponents<Types = Types>,
{
type Validator = HlEngineValidator;
type Validator = HlPayloadValidator;
async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result<Self::Validator> {
Ok(HlEngineValidator::new(Arc::new(ctx.config.chain.clone().as_ref().clone())))
Ok(HlPayloadValidator::new(Arc::new(ctx.config.chain.clone().as_ref().clone())))
}
}
/// Validator for Optimism engine API.
/// Validator for HyperEVM engine API.
#[derive(Debug, Clone)]
pub struct HlEngineValidator {
pub struct HlPayloadValidator {
inner: HlExecutionPayloadValidator<HlChainSpec>,
}
impl HlEngineValidator {
impl HlPayloadValidator {
/// Instantiates a new validator.
pub fn new(chain_spec: Arc<HlChainSpec>) -> Self {
Self { inner: HlExecutionPayloadValidator { inner: chain_spec } }
@ -87,47 +82,17 @@ impl ExecutionPayload for HlExecutionData {
}
}
impl PayloadValidator for HlEngineValidator {
impl PayloadValidator<HlPayloadTypes> for HlPayloadValidator {
type Block = HlBlock;
type ExecutionData = HlExecutionData;
fn ensure_well_formed_payload(
&self,
payload: Self::ExecutionData,
payload: HlExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
let sealed_block =
self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?;
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
}
fn validate_block_post_execution_with_hashed_state(
&self,
_state_updates: &HashedPostState,
_block: &RecoveredBlock<Self::Block>,
) -> Result<(), ConsensusError> {
Ok(())
}
}
impl<Types> EngineValidator<Types> for HlEngineValidator
where
Types: PayloadTypes<PayloadAttributes = PayloadAttributes, ExecutionData = HlExecutionData>,
{
fn validate_version_specific_fields(
&self,
_version: EngineApiMessageVersion,
_payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, PayloadAttributes>,
) -> Result<(), EngineObjectValidationError> {
Ok(())
}
fn ensure_well_formed_attributes(
&self,
_version: EngineApiMessageVersion,
_attributes: &PayloadAttributes,
) -> Result<(), EngineObjectValidationError> {
Ok(())
}
}
/// Execution payload validator.
@ -158,7 +123,7 @@ where
return Err(PayloadError::BlockHash {
execution: sealed_block.hash(),
consensus: expected_hash,
})?;
});
}
Ok(sealed_block)

214
src/node/rpc/estimate.rs Normal file
View File

@ -0,0 +1,214 @@
use super::{HlEthApi, HlRpcNodeCore, apply_precompiles};
use alloy_evm::overrides::{StateOverrideError, apply_state_overrides};
use alloy_network::TransactionBuilder;
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types_eth::state::StateOverride;
use reth_chainspec::MIN_TRANSACTION_GAS;
use reth_errors::ProviderError;
use reth_evm::{ConfigureEvm, Evm, EvmEnvFor, SpecFor, TransactionEnv, TxEnvFor};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_convert::{RpcConvert, RpcTxReq};
use reth_rpc_eth_api::{
AsEthApiError, IntoEthApiError, RpcNodeCore,
helpers::{
Call,
estimate::{EstimateCall, update_estimated_gas_range},
},
};
use reth_rpc_eth_types::{
EthApiError, RevertError, RpcInvalidTransactionError,
error::{FromEvmError, api::FromEvmHalt},
};
use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO};
use reth_storage_api::StateProvider;
use revm::context_interface::{Transaction, result::ExecutionResult};
use tracing::trace;
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
where
Self: Call,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm> + From<StateOverrideError<ProviderError>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
// Modified version that adds `apply_precompiles`; comments are stripped out.
fn estimate_gas_with<S>(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
state: S,
state_override: Option<StateOverride>,
) -> Result<U256, Self::Error>
where
S: StateProvider,
{
evm_env.cfg_env.disable_eip3607 = true;
evm_env.cfg_env.disable_base_fee = true;
request.as_mut().take_nonce();
let tx_request_gas_limit = request.as_ref().gas_limit();
let tx_request_gas_price = request.as_ref().gas_price();
let max_gas_limit = evm_env
.cfg_env
.tx_gas_limit_cap
.map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit));
let mut highest_gas_limit = tx_request_gas_limit
.map(|mut tx_gas_limit| {
if max_gas_limit < tx_gas_limit {
tx_gas_limit = max_gas_limit;
}
tx_gas_limit
})
.unwrap_or(max_gas_limit);
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if let Some(state_override) = state_override {
apply_state_overrides(state_override, &mut db).map_err(
|err: StateOverrideError<ProviderError>| {
let eth_api_error: EthApiError = EthApiError::from(err);
Self::Error::from(eth_api_error)
},
)?;
}
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
let mut is_basic_transfer = false;
if tx_env.input().is_empty() &&
let TxKind::Call(to) = tx_env.kind() &&
let Ok(code) = db.db.account_code(&to)
{
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true);
}
if tx_env.gas_price() > 0 {
highest_gas_limit =
highest_gas_limit.min(self.caller_gas_allowance(&mut db, &evm_env, &tx_env)?);
}
tx_env.set_gas_limit(tx_env.gas_limit().min(highest_gas_limit));
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(&mut db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
if is_basic_transfer {
let mut min_tx_env = tx_env.clone();
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) &&
res.result.is_success()
{
return Ok(U256::from(MIN_TRANSACTION_GAS));
}
}
trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation");
let mut res = match evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err) {
Err(err)
if err.is_gas_too_high() &&
(tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) =>
{
return Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit);
}
Err(err) if err.is_gas_too_low() => {
return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance {
gas_limit: tx_env.gas_limit(),
}
.into_eth_err());
}
ethres => ethres?,
};
let gas_refund = match res.result {
ExecutionResult::Success { gas_refunded, .. } => gas_refunded,
ExecutionResult::Halt { reason, .. } => {
return Err(Self::Error::from_evm_halt(reason, tx_env.gas_limit()));
}
ExecutionResult::Revert { output, .. } => {
return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() {
Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit)
} else {
Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err())
};
}
};
highest_gas_limit = tx_env.gas_limit();
let mut gas_used = res.result.gas_used();
let mut lowest_gas_limit = gas_used.saturating_sub(1);
let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63;
if optimistic_gas_limit < highest_gas_limit {
let mut optimistic_tx_env = tx_env.clone();
optimistic_tx_env.set_gas_limit(optimistic_gas_limit);
res = evm.transact(optimistic_tx_env).map_err(Self::Error::from_evm_err)?;
gas_used = res.result.gas_used();
update_estimated_gas_range(
res.result,
optimistic_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
};
let mut mid_gas_limit = std::cmp::min(
gas_used * 3,
((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64,
);
trace!(target: "rpc::eth::estimate", ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas");
while lowest_gas_limit + 1 < highest_gas_limit {
if (highest_gas_limit - lowest_gas_limit) as f64 / (highest_gas_limit as f64) <
ESTIMATE_GAS_ERROR_RATIO
{
break;
};
let mut mid_tx_env = tx_env.clone();
mid_tx_env.set_gas_limit(mid_gas_limit);
match evm.transact(mid_tx_env).map_err(Self::Error::from_evm_err) {
Err(err) if err.is_gas_too_high() => {
highest_gas_limit = mid_gas_limit;
}
Err(err) if err.is_gas_too_low() => {
lowest_gas_limit = mid_gas_limit;
}
ethres => {
res = ethres?;
update_estimated_gas_range(
res.result,
mid_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
}
}
mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64;
}
Ok(U256::from(highest_gas_limit))
}
}

View File

@ -1,104 +1,110 @@
use crate::{
HlBlock, HlPrimitives,
chainspec::HlChainSpec,
node::{evm::apply_precompiles, types::HlExtras},
};
use alloy_eips::BlockId;
use alloy_evm::Evm;
use alloy_network::Ethereum;
use alloy_primitives::U256;
use reth::{
api::{FullNodeTypes, HeaderTy, NodeTypes, PrimitivesTy},
builder::{
rpc::{EthApiBuilder, EthApiCtx},
FullNodeComponents,
rpc::{EthApiBuilder, EthApiCtx},
},
chainspec::EthChainSpec,
primitives::EthereumHardforks,
providers::ChainSpecProvider,
rpc::{
eth::{core::EthApiInner, DevSigner, FullEthApiServer},
server_types::eth::{EthApiError, EthStateCache, FeeHistoryCache, GasPriceOracle},
eth::{DevSigner, FullEthApiServer, core::EthApiInner},
server_types::eth::{
EthApiError, EthStateCache, FeeHistoryCache, GasPriceOracle,
receipt::EthReceiptConverter,
},
},
tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner,
pool::{BlockingTaskGuard, BlockingTaskPool},
},
transaction_pool::TransactionPool,
};
use reth_evm::ConfigureEvm;
use reth_network::NetworkInfo;
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, TxEnvFor};
use reth_primitives::NodePrimitives;
use reth_provider::{
BlockNumReader, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt,
ProviderTx, StageCheckpointReader, StateProviderFactory,
BlockReaderIdExt, ChainSpecProvider, ProviderError, ProviderHeader, ProviderTx,
};
use reth_rpc::RpcTypes;
use reth_rpc_eth_api::{
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
SignableTxRequest,
helpers::{
AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState,
SpawnBlocking, Trace,
AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, LoadState,
SpawnBlocking, Trace, pending_block::BuildPendingEnv, spec::SignersForApi,
},
EthApiTypes, FromEvmError, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
};
use std::{fmt, sync::Arc};
use revm::context::result::ResultAndState;
use std::{fmt, marker::PhantomData, sync::Arc};
mod block;
mod call;
pub mod engine_api;
mod estimate;
pub mod precompile;
mod transaction;
/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`HlEthApi`].
pub trait HlNodeCore: RpcNodeCore<Provider: BlockReader> {}
impl<T> HlNodeCore for T where T: RpcNodeCore<Provider: BlockReader> {}
/// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API.
pub type EthApiNodeBackend<N> = EthApiInner<
<N as RpcNodeCore>::Provider,
<N as RpcNodeCore>::Pool,
<N as RpcNodeCore>::Network,
<N as RpcNodeCore>::Evm,
>;
pub trait HlRpcNodeCore: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
/// Container type `HlEthApi`
#[allow(missing_debug_implementations)]
pub(crate) struct HlEthApiInner<N: HlNodeCore> {
pub(crate) struct HlEthApiInner<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) eth_api: EthApiNodeBackend<N>,
pub(crate) eth_api: EthApiInner<N, Rpc>,
}
#[derive(Clone)]
pub struct HlEthApi<N: HlNodeCore> {
type HlRpcConvert<N, NetworkT> =
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
pub struct HlEthApi<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
pub(crate) inner: Arc<HlEthApiInner<N>>,
/// Converter for RPC types.
tx_resp_builder: RpcConverter<Ethereum, N::Evm, EthApiError, ()>,
/// Whether the node is in HL node compliant mode.
pub(crate) hl_node_compliant: bool,
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
}
impl<N: HlNodeCore> fmt::Debug for HlEthApi<N> {
impl<N: HlRpcNodeCore, Rpc: RpcConvert> Clone for HlEthApi<N, Rpc> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HlEthApi").finish_non_exhaustive()
}
}
impl<N> EthApiTypes for HlEthApi<N>
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
where
Self: Send + Sync,
N: HlNodeCore,
N::Evm: std::fmt::Debug,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Error = EthApiError;
type NetworkTypes = Ethereum;
type RpcConvert = RpcConverter<Ethereum, N::Evm, EthApiError, ()>;
type NetworkTypes = Rpc::Network;
type RpcConvert = Rpc;
fn tx_resp_builder(&self) -> &Self::RpcConvert {
&self.tx_resp_builder
self.inner.eth_api.tx_resp_builder()
}
}
impl<N> RpcNodeCore for HlEthApi<N>
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
where
N: HlNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Primitives = N::Primitives;
type Provider = N::Provider;
type Pool = N::Pool;
type Evm = <N as RpcNodeCore>::Evm;
type Network = <N as RpcNodeCore>::Network;
type PayloadBuilder = ();
type Evm = N::Evm;
type Network = N::Network;
#[inline]
fn pool(&self) -> &Self::Pool {
@ -115,37 +121,30 @@ where
self.inner.eth_api.network()
}
#[inline]
fn payload_builder(&self) -> &Self::PayloadBuilder {
&()
}
#[inline]
fn provider(&self) -> &Self::Provider {
self.inner.eth_api.provider()
}
}
impl<N> RpcNodeCoreExt for HlEthApi<N>
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
where
N: HlNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
fn cache(&self) -> &EthStateCache<ProviderBlock<N::Provider>, ProviderReceipt<N::Provider>> {
fn cache(&self) -> &EthStateCache<N::Primitives> {
self.inner.eth_api.cache()
}
}
impl<N> EthApiSpec for HlEthApi<N>
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
where
N: HlNodeCore<
Provider: ChainSpecProvider<ChainSpec: EthereumHardforks>
+ BlockNumReader
+ StageCheckpointReader,
Network: NetworkInfo,
>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
type Transaction = ProviderTx<Self::Provider>;
type Rpc = Rpc::Network;
#[inline]
fn starting_block(&self) -> U256 {
@ -153,16 +152,15 @@ where
}
#[inline]
fn signers(&self) -> &parking_lot::RwLock<Vec<Box<dyn EthSigner<ProviderTx<Self::Provider>>>>> {
fn signers(&self) -> &SignersForApi<Self> {
self.inner.eth_api.signers()
}
}
impl<N> SpawnBlocking for HlEthApi<N>
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
where
Self: Send + Sync + Clone + 'static,
N: HlNodeCore,
N::Evm: std::fmt::Debug,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn io_task_spawner(&self) -> impl TaskSpawner {
@ -180,14 +178,11 @@ where
}
}
impl<N> LoadFee for HlEthApi<N>
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
where
Self: LoadBlock<Provider = N::Provider>,
N: HlNodeCore<
Provider: BlockReaderIdExt
+ ChainSpecProvider<ChainSpec: EthChainSpec + EthereumHardforks>
+ StateProviderFactory,
>,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> {
@ -195,25 +190,24 @@ where
}
#[inline]
fn fee_history_cache(&self) -> &FeeHistoryCache {
fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> {
self.inner.eth_api.fee_history_cache()
}
}
impl<N> LoadState for HlEthApi<N>
impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
where
N: HlNodeCore<
Provider: StateProviderFactory + ChainSpecProvider<ChainSpec: EthereumHardforks>,
Pool: TransactionPool,
>,
N::Evm: std::fmt::Debug,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
}
impl<N> EthState for HlEthApi<N>
impl<N, Rpc> EthState for HlEthApi<N, Rpc>
where
Self: LoadState + SpawnBlocking,
N: HlNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{
#[inline]
fn max_proof_window(&self) -> u64 {
@ -221,36 +215,63 @@ where
}
}
impl<N> EthFees for HlEthApi<N>
impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
where
Self: LoadFee<
Provider: ChainSpecProvider<
ChainSpec: EthChainSpec<Header = ProviderHeader<Self::Provider>>,
>,
>,
N: HlNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N> Trace for HlEthApi<N>
impl<N, Rpc> Trace for HlEthApi<N, Rpc>
where
Self: RpcNodeCore<Provider: BlockReader>
+ LoadState<
Evm: ConfigureEvm<
Primitives: NodePrimitives<
BlockHeader = ProviderHeader<Self::Provider>,
SignedTx = ProviderTx<Self::Provider>,
>,
>,
Error: FromEvmError<Self::Evm>,
>,
N: HlNodeCore,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn inspect<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError>,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
}
}
impl<N> AddDevSigners for HlEthApi<N>
impl<N, Rpc> HlEthApi<N, Rpc>
where
N: HlNodeCore,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn get_hl_extras(&self, block: BlockId) -> Result<HlExtras, ProviderError> {
Ok(self
.provider()
.block_by_id(block)?
.map(|block| HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
})
.unwrap_or_default())
}
}
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>,
{
fn with_dev_accounts(&self) {
*self.inner.eth_api.signers().write() = DevSigner::random_signers(20)
@ -258,40 +279,41 @@ where
}
/// Builds [`HlEthApi`] for HL.
#[derive(Debug, Default)]
#[derive(Debug)]
#[non_exhaustive]
pub struct HlEthApiBuilder {
/// Whether the node is in HL node compliant mode.
pub(crate) hl_node_compliant: bool,
pub struct HlEthApiBuilder<NetworkT = Ethereum> {
/// Marker for network types.
pub(crate) _nt: PhantomData<NetworkT>,
}
impl<N> EthApiBuilder<N> for HlEthApiBuilder
where
N: FullNodeComponents,
HlEthApi<N>: FullEthApiServer<Provider = N::Provider, Pool = N::Pool>,
{
type EthApi = HlEthApi<N>;
async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> {
let eth_api = reth::rpc::eth::EthApiBuilder::new(
ctx.components.provider().clone(),
ctx.components.pool().clone(),
ctx.components.network().clone(),
ctx.components.evm_config().clone(),
)
.eth_cache(ctx.cache)
.task_spawner(ctx.components.task_executor().clone())
.gas_cap(ctx.config.rpc_gas_cap.into())
.max_simulate_blocks(ctx.config.rpc_max_simulate_blocks)
.eth_proof_window(ctx.config.eth_proof_window)
.fee_history_cache_config(ctx.config.fee_history_cache)
.proof_permits(ctx.config.proof_permits)
.build_inner();
Ok(HlEthApi {
inner: Arc::new(HlEthApiInner { eth_api }),
tx_resp_builder: Default::default(),
hl_node_compliant: self.hl_node_compliant,
})
impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
fn default() -> Self {
Self { _nt: PhantomData }
}
}
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
where
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>>
+ RpcNodeCore<
Primitives = PrimitivesTy<N::Types>,
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,
>,
NetworkT: RpcTypes,
HlRpcConvert<N, NetworkT>: RpcConvert<Network = NetworkT, Primitives = PrimitivesTy<N::Types>>,
HlEthApi<N, HlRpcConvert<N, NetworkT>>: FullEthApiServer<
Provider = <N as FullNodeTypes>::Provider,
Pool = <N as FullNodeComponents>::Pool,
> + AddDevSigners,
{
type EthApi = HlEthApi<N, HlRpcConvert<N, NetworkT>>;
async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> {
let provider = FullNodeComponents::provider(ctx.components);
let rpc_converter =
RpcConverter::new(EthReceiptConverter::<HlChainSpec>::new(provider.chain_spec()));
let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner();
Ok(HlEthApi { inner: Arc::new(HlEthApiInner { eth_api }) })
}
}

View File

@ -0,0 +1,44 @@
use alloy_eips::BlockId;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee_core::{RpcResult, async_trait};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_types::EthApiError;
use tracing::trace;
use crate::node::{
rpc::{HlEthApi, HlRpcNodeCore},
types::HlExtras,
};
/// A custom RPC trait for fetching block precompile data.
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait HlBlockPrecompileApi {
/// Fetches precompile data for a given block.
#[method(name = "blockPrecompileData")]
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras>;
}
pub struct HlBlockPrecompileExt<N: HlRpcNodeCore, Rpc: RpcConvert> {
eth_api: HlEthApi<N, Rpc>,
}
impl<N: HlRpcNodeCore, Rpc: RpcConvert> HlBlockPrecompileExt<N, Rpc> {
/// Creates a new instance of the [`HlBlockPrecompileExt`].
pub fn new(eth_api: HlEthApi<N, Rpc>) -> Self {
Self { eth_api }
}
}
#[async_trait]
impl<N, Rpc> HlBlockPrecompileApiServer for HlBlockPrecompileExt<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras> {
trace!(target: "rpc::eth", ?block, "Serving eth_blockPrecompileData");
let hl_extras = self.eth_api.get_hl_extras(block).map_err(EthApiError::from)?;
Ok(hl_extras)
}
}

View File

@ -1,51 +1,34 @@
use super::HlNodeCore;
use crate::node::rpc::HlEthApi;
use alloy_primitives::{Bytes, B256};
use reth::{
rpc::server_types::eth::utils::recover_raw_transaction,
transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool},
};
use reth_provider::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider};
use std::time::Duration;
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use alloy_primitives::{B256, Bytes};
use reth::rpc::server_types::eth::EthApiError;
use reth_rpc_eth_api::{
helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking},
FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt,
RpcConvert,
helpers::{EthTransactions, LoadTransaction, spec::SignersForRpc},
};
impl<N> LoadTransaction for HlEthApi<N>
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
where
Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt,
N: HlNodeCore<Provider: TransactionsProvider, Pool: TransactionPool>,
Self::Pool: TransactionPool,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N> EthTransactions for HlEthApi<N>
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
where
Self: LoadTransaction<Provider: BlockReaderIdExt>,
N: HlNodeCore<Provider: BlockReader<Transaction = ProviderTx<Self::Provider>>>,
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn signers(&self) -> &parking_lot::RwLock<Vec<Box<dyn EthSigner<ProviderTx<Self::Provider>>>>> {
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
self.inner.eth_api.signers()
}
/// Decodes and recovers the transaction and submits it to the pool.
///
/// Returns the hash of the transaction.
async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256, Self::Error> {
let recovered = recover_raw_transaction(&tx)?;
async fn send_raw_transaction(&self, _tx: Bytes) -> Result<B256, Self::Error> {
unreachable!()
}
// broadcast raw transaction to subscribers if there is any.
self.inner.eth_api.broadcast_raw_transaction(tx);
let pool_transaction = <Self::Pool as TransactionPool>::Transaction::from_pooled(recovered);
// submit the transaction to the pool with a `Local` origin
let hash = self
.pool()
.add_transaction(TransactionOrigin::Local, pool_transaction)
.await
.map_err(Self::Error::from_eth_err)?;
Ok(hash)
fn send_raw_transaction_sync_timeout(&self) -> Duration {
self.inner.eth_api.send_raw_transaction_sync_timeout()
}
}

View File

@ -3,8 +3,9 @@ use eyre::{Error, Result};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
pub(crate) const MAINNET_CHAIN_ID: u64 = 999;
pub(crate) const TESTNET_CHAIN_ID: u64 = 998;
use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID};
mod patch;
#[derive(Debug, Clone, Serialize, Deserialize)]
struct EvmContract {
@ -59,5 +60,10 @@ pub(crate) fn erc20_contract_to_spot_token(chain_id: u64) -> Result<BTreeMap<Add
map.insert(evm_contract.address, SpotId { index: token.index });
}
}
if chain_id == TESTNET_CHAIN_ID {
patch::patch_testnet_spot_meta(&mut map);
}
Ok(map)
}

View File

@ -0,0 +1,8 @@
use crate::node::spot_meta::SpotId;
use alloy_primitives::{Address, address};
use std::collections::BTreeMap;
/// Testnet-specific fix for #67
pub(super) fn patch_testnet_spot_meta(map: &mut BTreeMap<Address, SpotId>) {
map.insert(address!("0xd9cbec81df392a88aeff575e962d149d57f4d6bc"), SpotId { index: 0 });
}

View File

@ -1,29 +1,27 @@
use crate::{
node::{
primitives::tx_wrapper::{convert_to_eth_block_body, convert_to_hl_block_body},
types::HlExtras,
},
HlBlock, HlBlockBody, HlPrimitives,
HlBlock, HlBlockBody, HlHeader, HlPrimitives,
node::{primitives::TransactionSigned, types::HlExtras},
};
use alloy_consensus::BlockHeader;
use alloy_primitives::Bytes;
use reth_chainspec::EthereumHardforks;
use reth_db::{
DbTxUnwindExt,
cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut},
DbTxUnwindExt,
};
use reth_primitives_traits::Block;
use reth_provider::{
providers::{ChainStorage, NodeTypesForProvider},
BlockBodyReader, BlockBodyWriter, ChainSpecProvider, ChainStorageReader, ChainStorageWriter,
DBProvider, DatabaseProvider, EthStorage, ProviderResult, ReadBodyInput, StorageLocation,
providers::{ChainStorage, NodeTypesForProvider},
};
pub mod tables;
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct HlStorage(EthStorage);
pub struct HlStorage(EthStorage<TransactionSigned, HlHeader>);
impl HlStorage {
fn write_precompile_calls<Provider>(
@ -89,30 +87,17 @@ where
let mut read_precompile_calls = Vec::with_capacity(bodies.len());
for (block_number, body) in bodies {
match body {
let (inner_opt, extras) = match body {
Some(HlBlockBody {
inner,
sidecars: _,
read_precompile_calls: rpc,
read_precompile_calls,
highest_precompile_address,
}) => {
eth_bodies.push((block_number, Some(convert_to_eth_block_body(inner))));
read_precompile_calls.push((
block_number,
HlExtras { read_precompile_calls: rpc, highest_precompile_address },
));
}
None => {
eth_bodies.push((block_number, None));
read_precompile_calls.push((
block_number,
HlExtras {
read_precompile_calls: Default::default(),
highest_precompile_address: None,
},
));
}
}
}) => (Some(inner), HlExtras { read_precompile_calls, highest_precompile_address }),
None => Default::default(),
};
eth_bodies.push((block_number, inner_opt));
read_precompile_calls.push((block_number, extras));
}
self.0.write_block_bodies(provider, eth_bodies, write_to)?;
@ -146,22 +131,16 @@ where
inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<HlBlockBody>> {
let read_precompile_calls = self.read_precompile_calls(provider, &inputs)?;
let eth_bodies = self.0.read_block_bodies(
provider,
inputs
.into_iter()
.map(|(header, transactions)| {
(header, transactions.into_iter().map(|tx| tx.into_inner()).collect())
})
.collect(),
)?;
let inputs: Vec<(&<Self::Block as Block>::Header, _)> = inputs;
let eth_bodies = self.0.read_block_bodies(provider, inputs)?;
let eth_bodies: Vec<alloy_consensus::BlockBody<_, HlHeader>> = eth_bodies;
// NOTE: sidecars are not used in HyperEVM yet.
Ok(eth_bodies
.into_iter()
.zip(read_precompile_calls)
.map(|(inner, extra)| HlBlockBody {
inner: convert_to_hl_block_body(inner),
inner,
sidecars: None,
read_precompile_calls: extra.read_precompile_calls,
highest_precompile_address: extra.highest_precompile_address,

View File

@ -1,5 +1,5 @@
use alloy_primitives::{BlockNumber, Bytes};
use reth_db::{table::TableInfo, tables, TableSet, TableType, TableViewer};
use reth_db::{TableSet, TableType, TableViewer, table::TableInfo, tables};
use std::fmt;
tables! {

View File

@ -2,19 +2,22 @@
//!
//! Changes:
//! - ReadPrecompileCalls supports RLP encoding / decoding
use alloy_primitives::{Address, Bytes, Log, B256};
use alloy_consensus::TxType;
use alloy_primitives::{Address, B256, Bytes, Log};
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::BufMut;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives_traits::InMemorySize;
use serde::{Deserialize, Serialize};
use crate::{node::spot_meta::MAINNET_CHAIN_ID, HlBlock};
use crate::HlBlock;
pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>);
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default, Hash)]
pub struct ReadPrecompileCalls(pub Vec<ReadPrecompileCall>);
mod reth_compat;
pub(crate) mod reth_compat;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct HlExtras {
@ -22,6 +25,13 @@ pub struct HlExtras {
pub highest_precompile_address: Option<Address>,
}
impl InMemorySize for HlExtras {
fn size(&self) -> usize {
self.read_precompile_calls.as_ref().map_or(0, |s| s.0.len()) +
self.highest_precompile_address.as_ref().map_or(0, |_| 20)
}
}
impl Encodable for ReadPrecompileCalls {
fn encode(&self, out: &mut dyn BufMut) {
let buf: Bytes = rmp_serde::to_vec(&self.0).unwrap().into();
@ -38,7 +48,7 @@ impl Decodable for ReadPrecompileCalls {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct BlockAndReceipts {
pub block: EvmBlock,
pub receipts: Vec<LegacyReceipt>,
@ -50,13 +60,14 @@ pub struct BlockAndReceipts {
}
impl BlockAndReceipts {
pub fn to_reth_block(self) -> HlBlock {
pub fn to_reth_block(self, chain_id: u64) -> HlBlock {
let EvmBlock::Reth115(block) = self.block;
block.to_reth_block(
self.read_precompile_calls.clone(),
self.highest_precompile_address,
self.system_txs.clone(),
MAINNET_CHAIN_ID,
self.receipts.clone(),
chain_id,
)
}
@ -71,12 +82,12 @@ impl BlockAndReceipts {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub enum EvmBlock {
Reth115(reth_compat::SealedBlock),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct LegacyReceipt {
tx_type: LegacyTxType,
success: bool,
@ -84,7 +95,24 @@ pub struct LegacyReceipt {
logs: Vec<Log>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
impl From<LegacyReceipt> for EthereumReceipt {
fn from(r: LegacyReceipt) -> Self {
EthereumReceipt {
tx_type: match r.tx_type {
LegacyTxType::Legacy => TxType::Legacy,
LegacyTxType::Eip2930 => TxType::Eip2930,
LegacyTxType::Eip1559 => TxType::Eip1559,
LegacyTxType::Eip4844 => TxType::Eip4844,
LegacyTxType::Eip7702 => TxType::Eip7702,
},
success: r.success,
cumulative_gas_used: r.cumulative_gas_used,
logs: r.logs,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
enum LegacyTxType {
Legacy = 0,
Eip2930 = 1,
@ -93,7 +121,7 @@ enum LegacyTxType {
Eip7702 = 4,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct SystemTx {
pub tx: reth_compat::Transaction,
pub receipt: Option<LegacyReceipt>,
@ -117,7 +145,7 @@ pub struct ReadPrecompileInput {
pub gas_limit: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub enum ReadPrecompileResult {
Ok { gas_used: u64, bytes: Bytes },
OutOfGas,

View File

@ -10,12 +10,12 @@ use std::{
use tracing::info;
use crate::{
HlBlock, HlBlockBody, HlHeader,
node::{
primitives::TransactionSigned as TxSigned,
spot_meta::{erc20_contract_to_spot_token, SpotId},
types::{ReadPrecompileCalls, SystemTx},
spot_meta::{SpotId, erc20_contract_to_spot_token},
types::{LegacyReceipt, ReadPrecompileCalls, SystemTx},
},
HlBlock, HlBlockBody,
};
/// A raw transaction.
@ -114,22 +114,36 @@ impl SealedBlock {
read_precompile_calls: ReadPrecompileCalls,
highest_precompile_address: Option<Address>,
system_txs: Vec<super::SystemTx>,
receipts: Vec<LegacyReceipt>,
chain_id: u64,
) -> HlBlock {
let mut merged_txs = vec![];
merged_txs.extend(system_txs.iter().map(|tx| system_tx_to_reth_transaction(tx, chain_id)));
merged_txs.extend(self.body.transactions.iter().map(|tx| tx.to_reth_transaction()));
let mut merged_receipts = vec![];
merged_receipts.extend(system_txs.iter().map(|tx| tx.receipt.clone().unwrap().into()));
merged_receipts.extend(receipts.into_iter().map(From::from));
let block_body = HlBlockBody {
inner: reth_primitives::BlockBody {
transactions: merged_txs,
withdrawals: self.body.withdrawals.clone(),
ommers: self.body.ommers.clone(),
ommers: vec![],
},
sidecars: None,
read_precompile_calls: Some(read_precompile_calls),
highest_precompile_address,
};
HlBlock { header: self.header.header.clone(), body: block_body }
let system_tx_count = system_txs.len() as u64;
HlBlock {
header: HlHeader::from_ethereum_header(
self.header.header.clone(),
&merged_receipts,
system_tx_count,
),
body: block_body,
}
}
}

View File

@ -1,3 +1,7 @@
use std::time::Duration;
use crate::pseudo_peer::HlNodeBlockSourceArgs;
use super::config::BlockSourceConfig;
use clap::{Args, Parser};
use reth_node_core::args::LogArgs;
@ -13,7 +17,7 @@ pub struct BlockSourceArgs {
block_source: Option<String>,
#[arg(long, alias = "local-ingest-dir")]
block_source_from_node: Option<String>,
local_ingest_dir: Option<String>,
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
#[arg(long, default_value_t = false)]
@ -22,6 +26,19 @@ pub struct BlockSourceArgs {
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
#[arg(long)]
local: bool,
/// Interval for polling new blocks in S3 in milliseconds.
#[arg(id = "s3.polling-interval", long = "s3.polling-interval", default_value = "25")]
s3_polling_interval: u64,
/// Maximum allowed delay for the hl-node block source in milliseconds.
/// If this threshold is exceeded, the client falls back to other sources.
#[arg(
id = "local.fallback-threshold",
long = "local.fallback-threshold",
default_value = "5000"
)]
local_fallback_threshold: u64,
}
impl BlockSourceArgs {
@ -33,7 +50,10 @@ impl BlockSourceArgs {
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
if self.s3 {
return Ok(BlockSourceConfig::s3_default().await);
return Ok(BlockSourceConfig::s3_default(Duration::from_millis(
self.s3_polling_interval,
))
.await);
}
if self.local {
@ -47,18 +67,25 @@ impl BlockSourceArgs {
};
if let Some(bucket) = value.strip_prefix("s3://") {
Ok(BlockSourceConfig::s3(bucket.to_string()).await)
Ok(BlockSourceConfig::s3(
bucket.to_string(),
Duration::from_millis(self.s3_polling_interval),
)
.await)
} else {
Ok(BlockSourceConfig::local(value.into()))
}
}
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
let Some(block_source_from_node) = self.block_source_from_node.as_ref() else {
let Some(local_ingest_dir) = self.local_ingest_dir.as_ref() else {
return config;
};
config.with_block_source_from_node(block_source_from_node.to_string())
config.with_block_source_from_node(HlNodeBlockSourceArgs {
root: local_ingest_dir.into(),
fallback_threshold: Duration::from_millis(self.local_fallback_threshold),
})
}
}

View File

@ -1,34 +1,38 @@
use super::{
consts::DEFAULT_S3_BUCKET,
sources::{
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, LocalBlockSource, S3BlockSource,
},
use crate::chainspec::HlChainSpec;
use super::sources::{
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, HlNodeBlockSourceArgs,
LocalBlockSource, S3BlockSource,
};
use aws_config::BehaviorVersion;
use std::{env::home_dir, path::PathBuf, sync::Arc};
use std::{env::home_dir, path::PathBuf, sync::Arc, time::Duration};
#[derive(Debug, Clone)]
pub struct BlockSourceConfig {
pub source_type: BlockSourceType,
pub block_source_from_node: Option<String>,
pub block_source_from_node: Option<HlNodeBlockSourceArgs>,
}
#[derive(Debug, Clone)]
pub enum BlockSourceType {
S3 { bucket: String },
S3Default { polling_interval: Duration },
S3 { bucket: String, polling_interval: Duration },
Local { path: PathBuf },
}
impl BlockSourceConfig {
pub async fn s3_default() -> Self {
pub async fn s3_default(polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3 { bucket: DEFAULT_S3_BUCKET.to_string() },
source_type: BlockSourceType::S3Default { polling_interval },
block_source_from_node: None,
}
}
pub async fn s3(bucket: String) -> Self {
Self { source_type: BlockSourceType::S3 { bucket }, block_source_from_node: None }
pub async fn s3(bucket: String, polling_interval: Duration) -> Self {
Self {
source_type: BlockSourceType::S3 { bucket, polling_interval },
block_source_from_node: None,
}
}
pub fn local(path: PathBuf) -> Self {
@ -42,27 +46,27 @@ impl BlockSourceConfig {
.expect("home dir not found")
.join("hl")
.join("data")
.join("evm_blocks_and_receipts"),
.join("evm_block_and_receipts"),
},
block_source_from_node: None,
}
}
pub fn with_block_source_from_node(mut self, block_source_from_node: String) -> Self {
pub fn with_block_source_from_node(
mut self,
block_source_from_node: HlNodeBlockSourceArgs,
) -> Self {
self.block_source_from_node = Some(block_source_from_node);
self
}
pub async fn create_block_source(&self) -> BlockSourceBoxed {
pub async fn create_block_source(&self, chain_spec: HlChainSpec) -> BlockSourceBoxed {
match &self.source_type {
BlockSourceType::S3 { bucket } => {
let client = aws_sdk_s3::Client::new(
&aws_config::defaults(BehaviorVersion::latest())
.region("ap-northeast-1")
.load()
.await,
);
Arc::new(Box::new(S3BlockSource::new(client, bucket.clone())))
BlockSourceType::S3Default { polling_interval } => {
s3_block_source(chain_spec.official_s3_bucket(), *polling_interval).await
}
BlockSourceType::S3 { bucket, polling_interval } => {
s3_block_source(bucket, *polling_interval).await
}
BlockSourceType::Local { path } => {
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
@ -82,16 +86,28 @@ impl BlockSourceConfig {
Arc::new(Box::new(
HlNodeBlockSource::new(
fallback_block_source,
PathBuf::from(block_source_from_node.clone()),
block_source_from_node.clone(),
next_block_number,
)
.await,
))
}
pub async fn create_cached_block_source(&self, next_block_number: u64) -> BlockSourceBoxed {
let block_source = self.create_block_source().await;
let block_source = self.create_block_source_from_node(next_block_number, block_source).await;
pub async fn create_cached_block_source(
&self,
chain_spec: HlChainSpec,
next_block_number: u64,
) -> BlockSourceBoxed {
let block_source = self.create_block_source(chain_spec).await;
let block_source =
self.create_block_source_from_node(next_block_number, block_source).await;
Arc::new(Box::new(CachedBlockSource::new(block_source)))
}
}
async fn s3_block_source(bucket: impl AsRef<str>, polling_interval: Duration) -> BlockSourceBoxed {
let client = aws_sdk_s3::Client::new(
&aws_config::defaults(BehaviorVersion::latest()).region("ap-northeast-1").load().await,
);
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string(), polling_interval)))
}

View File

@ -1,2 +0,0 @@
pub const MAX_CONCURRENCY: usize = 100;
pub const DEFAULT_S3_BUCKET: &str = "hl-mainnet-evm-blocks";

View File

@ -1,36 +0,0 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum PseudoPeerError {
#[error("Block source error: {0}")]
BlockSource(String),
#[error("Network error: {0}")]
Network(#[from] reth_network::error::NetworkError),
#[error("Configuration error: {0}")]
Config(String),
#[error("AWS S3 error: {0}")]
S3(#[from] aws_sdk_s3::Error),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serialization error: {0}")]
Serialization(#[from] rmp_serde::encode::Error),
#[error("Deserialization error: {0}")]
Deserialization(#[from] rmp_serde::decode::Error),
#[error("Compression error: {0}")]
Compression(String),
}
impl From<eyre::Error> for PseudoPeerError {
fn from(err: eyre::Error) -> Self {
PseudoPeerError::Config(err.to_string())
}
}
pub type Result<T> = std::result::Result<T, PseudoPeerError>;

View File

@ -5,33 +5,25 @@
pub mod cli;
pub mod config;
pub mod consts;
pub mod error;
pub mod network;
pub mod service;
pub mod sources;
pub mod utils;
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::{error, info};
pub use cli::*;
pub use config::*;
pub use error::*;
pub use network::*;
pub use service::*;
pub use sources::*;
#[cfg(test)]
mod tests;
use tokio::sync::mpsc;
use tracing::info;
/// Re-export commonly used types
pub mod prelude {
pub use super::{
config::BlockSourceConfig,
error::{PseudoPeerError, Result},
service::{BlockPoller, PseudoPeer},
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
};
@ -45,14 +37,17 @@ pub async fn start_pseudo_peer(
chain_spec: Arc<HlChainSpec>,
destination_peer: String,
block_source: BlockSourceBoxed,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> {
let blockhash_cache = new_blockhash_cache();
// Create network manager
let (mut network, start_tx) = create_network_manager::<BlockSourceBoxed>(
(*chain_spec).clone(),
destination_peer,
block_source.clone(),
blockhash_cache.clone(),
debug_cutoff_height,
)
.await?;
@ -85,9 +80,12 @@ pub async fn start_pseudo_peer(
_ = transaction_rx.recv() => {}
Some(eth_req) = eth_rx.recv() => {
service.process_eth_request(eth_req).await?;
if let Err(e) = service.process_eth_request(eth_req).await {
error!("Error processing eth request: {e:?}");
} else {
info!("Processed eth request");
}
}
}
}
}

View File

@ -1,16 +1,16 @@
use super::service::{BlockHashCache, BlockPoller};
use crate::{
chainspec::{parser::chain_value_parser, HlChainSpec},
node::network::HlNetworkPrimitives,
HlPrimitives,
};
use crate::{HlPrimitives, chainspec::HlChainSpec, node::network::HlNetworkPrimitives};
use reth_network::{
config::{rng_secret_key, SecretKey},
NetworkConfig, NetworkManager, PeersConfig,
config::{SecretKey, rng_secret_key},
};
use reth_network_peers::TrustedPeer;
use reth_provider::test_utils::NoopProvider;
use std::{str::FromStr, sync::Arc};
use std::{
net::{Ipv4Addr, SocketAddr},
str::FromStr,
sync::Arc,
};
use tokio::sync::mpsc;
pub struct NetworkBuilder {
@ -19,6 +19,8 @@ pub struct NetworkBuilder {
boot_nodes: Vec<TrustedPeer>,
discovery_port: u16,
listener_port: u16,
chain_spec: HlChainSpec,
debug_cutoff_height: Option<u64>,
}
impl Default for NetworkBuilder {
@ -29,29 +31,25 @@ impl Default for NetworkBuilder {
boot_nodes: vec![],
discovery_port: 0,
listener_port: 0,
chain_spec: HlChainSpec::default(),
debug_cutoff_height: None,
}
}
}
impl NetworkBuilder {
pub fn with_secret(mut self, secret: SecretKey) -> Self {
self.secret = secret;
self
}
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
self.peer_config = peer_config;
self
}
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
self.boot_nodes = boot_nodes;
self
}
pub fn with_ports(mut self, discovery_port: u16, listener_port: u16) -> Self {
self.discovery_port = discovery_port;
self.listener_port = listener_port;
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
self.chain_spec = chain_spec;
self
}
pub fn with_debug_cutoff_height(mut self, debug_cutoff_height: Option<u64>) -> Self {
self.debug_cutoff_height = debug_cutoff_height;
self
}
@ -63,15 +61,21 @@ impl NetworkBuilder {
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
.boot_nodes(self.boot_nodes)
.peer_config(self.peer_config)
.discovery_port(self.discovery_port)
.listener_port(self.listener_port);
.discovery_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.discovery_port))
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
let chain_id = self.chain_spec.inner.chain().id();
let (block_poller, start_tx) = BlockPoller::new_suspended(block_source, blockhash_cache);
let (block_poller, start_tx) = BlockPoller::new_suspended(
chain_id,
block_source,
blockhash_cache,
self.debug_cutoff_height,
);
let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::<
HlChainSpec,
HlPrimitives,
>::new(
chain_value_parser("mainnet").unwrap(),
self.chain_spec.into(),
)));
let network = NetworkManager::new(config).await.map_err(|e| eyre::eyre!(e))?;
@ -80,12 +84,16 @@ impl NetworkBuilder {
}
pub async fn create_network_manager<BS>(
chain_spec: HlChainSpec,
destination_peer: String,
block_source: Arc<Box<dyn super::sources::BlockSource>>,
blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> {
NetworkBuilder::default()
.with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()])
.with_chain_spec(chain_spec)
.with_debug_cutoff_height(debug_cutoff_height)
.build::<BS>(block_source, blockhash_cache)
.await
}

View File

@ -26,7 +26,6 @@ use std::{
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
time::Duration,
};
use tokio::{sync::mpsc, task::JoinHandle};
use tracing::{debug, info};
@ -42,24 +41,24 @@ pub fn new_blockhash_cache() -> BlockHashCache {
/// A block poller that polls blocks from `BlockSource` and sends them to the `block_tx`
#[derive(Debug)]
pub struct BlockPoller {
chain_id: u64,
block_rx: mpsc::Receiver<(u64, BlockAndReceipts)>,
task: JoinHandle<eyre::Result<()>>,
blockhash_cache: BlockHashCache,
}
impl BlockPoller {
const POLL_INTERVAL: Duration = Duration::from_millis(25);
pub fn new_suspended<BS: BlockSource>(
chain_id: u64,
block_source: BS,
blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> (Self, mpsc::Sender<()>) {
let block_source = Arc::new(block_source);
let (start_tx, start_rx) = mpsc::channel(1);
let (block_tx, block_rx) = mpsc::channel(100);
let block_tx_clone = block_tx.clone();
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx_clone));
(Self { block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx, debug_cutoff_height));
(Self { chain_id, block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
}
#[allow(unused)]
@ -70,25 +69,33 @@ impl BlockPoller {
async fn task<BS: BlockSource>(
mut start_rx: mpsc::Receiver<()>,
block_source: Arc<BS>,
block_tx_clone: mpsc::Sender<(u64, BlockAndReceipts)>,
block_tx: mpsc::Sender<(u64, BlockAndReceipts)>,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> {
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
info!("Starting block poller");
let latest_block_number = block_source
let polling_interval = block_source.polling_interval();
let mut next_block_number = block_source
.find_latest_block_number()
.await
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
let mut next_block_number = latest_block_number;
if let Some(debug_cutoff_height) = debug_cutoff_height &&
next_block_number > debug_cutoff_height
{
next_block_number = debug_cutoff_height;
}
loop {
let Ok(block) = block_source.collect_block(next_block_number).await else {
tokio::time::sleep(Self::POLL_INTERVAL).await;
continue;
};
block_tx_clone.send((next_block_number, block)).await?;
match block_source.collect_block(next_block_number).await {
Ok(block) => {
block_tx.send((next_block_number, block)).await?;
next_block_number += 1;
}
Err(_) => tokio::time::sleep(polling_interval).await,
}
}
}
}
@ -98,7 +105,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
match Pin::new(&mut self.block_rx).poll_recv(_cx) {
Poll::Ready(Some((number, block))) => {
debug!("Polled block: {}", number);
let reth_block = block.to_reth_block();
let reth_block = block.to_reth_block(self.chain_id);
let hash = reth_block.header.hash_slow();
self.blockhash_cache.write().insert(hash, number);
let td = U128::from(reth_block.header.difficulty);
@ -109,8 +116,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
},
}))
}
Poll::Ready(None) => Poll::Pending,
Poll::Pending => Poll::Pending,
Poll::Ready(None) | Poll::Pending => Poll::Pending,
}
}
@ -153,20 +159,21 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn collect_blocks(
&self,
block_numbers: impl IntoIterator<Item = u64>,
) -> Vec<BlockAndReceipts> {
) -> eyre::Result<Vec<BlockAndReceipts>> {
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
let blocks = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await.unwrap())
let res = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await)
.buffered(self.block_source.recommended_chunk_size() as usize)
.collect::<Vec<_>>()
.await;
blocks
res.into_iter().collect()
}
pub async fn process_eth_request(
&mut self,
eth_req: IncomingEthRequest<HlNetworkPrimitives>,
) -> eyre::Result<()> {
let chain_id = self.chain_spec.inner.chain().id();
match eth_req {
IncomingEthRequest::GetBlockHeaders {
peer_id: _,
@ -176,7 +183,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
debug!(
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
);
let number = match start_block {
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
HashOrNumber::Number(number) => number,
@ -187,9 +193,9 @@ impl<BS: BlockSource> PseudoPeer<BS> {
HeadersDirection::Falling => {
self.collect_blocks((number + 1 - limit..number + 1).rev()).await
}
}
}?
.into_par_iter()
.map(|block| block.to_reth_block().header.clone())
.map(|block| block.to_reth_block(chain_id).header.clone())
.collect::<Vec<_>>();
let _ = response.send(Ok(BlockHeaders(block_headers)));
@ -205,19 +211,15 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let block_bodies = self
.collect_blocks(numbers)
.await
.await?
.into_iter()
.map(|block| block.to_reth_block().body)
.map(|block| block.to_reth_block(chain_id).body)
.collect::<Vec<_>>();
let _ = response.send(Ok(BlockBodies(block_bodies)));
}
IncomingEthRequest::GetNodeData { .. } => {
debug!("GetNodeData request: {eth_req:?}");
}
eth_req => {
debug!("New eth protocol request: {eth_req:?}");
}
IncomingEthRequest::GetNodeData { .. } => debug!("GetNodeData request: {eth_req:?}"),
eth_req => debug!("New eth protocol request: {eth_req:?}"),
}
Ok(())
}
@ -248,7 +250,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
// official RPC. The latter is much easier but has 300/day rate limit.
use jsonrpsee::http_client::HttpClientBuilder;
use jsonrpsee_core::client::ClientT;
@ -256,7 +257,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let client =
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
self.cache_blocks([(hash, target_block.header.number)]);
Ok(target_block.header.number)
@ -269,10 +269,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
}
return Some(block_number);
}
Some(block_number)
} else {
None
}
}
/// Backfill the cache with blocks to find the target hash
async fn backfill_cache_for_hash(
@ -316,10 +317,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
self.if_hit_then_warm_around.lock().unwrap().insert(start);
self.if_hit_then_warm_around.lock().unwrap().insert(end);
{
let mut guard = self.if_hit_then_warm_around.lock().unwrap();
guard.insert(start);
guard.insert(end);
}
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
}
@ -345,15 +347,12 @@ impl<BS: BlockSource> PseudoPeer<BS> {
}
debug!("Backfilling from {start_number} to {end_number}");
// Collect blocks and cache them
let blocks = self.collect_blocks(uncached_block_numbers).await;
let blocks = self.collect_blocks(uncached_block_numbers).await?;
let block_map: HashMap<B256, u64> =
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
let maybe_block_number = block_map.get(&target_hash).copied();
self.cache_blocks(block_map);
Ok(maybe_block_number)
}

View File

@ -0,0 +1,48 @@
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::{FutureExt, future::BoxFuture};
use reth_network::cache::LruMap;
use std::sync::{Arc, RwLock};
/// Block source wrapper that caches blocks in memory
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
fn polling_interval(&self) -> std::time::Duration {
self.block_source.polling_interval()
}
}

View File

@ -1,393 +0,0 @@
use std::{
fs::File,
io::{BufRead, BufReader, Read, Seek, SeekFrom},
ops::RangeInclusive,
path::{Path, PathBuf},
sync::Arc,
};
use futures::future::BoxFuture;
use reth_network::cache::LruMap;
use rangemap::RangeInclusiveMap;
use serde::Deserialize;
use time::{macros::format_description, Date, Duration, OffsetDateTime, Time};
use tokio::sync::Mutex;
use tracing::{info, warn};
use crate::node::types::{BlockAndReceipts, EvmBlock};
use super::{BlockSource, BlockSourceBoxed};
const TAIL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(25);
const HOURLY_SUBDIR: &str = "hourly";
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
// Lightweight range map to track the ranges of blocks in the local ingest directory
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
// 3660 blocks per hour
const CACHE_SIZE: u32 = 8000;
fn new() -> Self {
Self {
cache: LruMap::new(Self::CACHE_SIZE),
ranges: RangeInclusiveMap::new(),
}
}
fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_ingest_dir: PathBuf,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, // height → block
}
#[derive(Deserialize)]
struct LocalBlockAndReceipts(String, BlockAndReceipts);
struct ScanResult {
path: PathBuf,
next_expected_height: u64,
new_blocks: Vec<BlockAndReceipts>,
new_block_ranges: Vec<RangeInclusive<u64>>,
}
fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_block_timestamp, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
fn scan_hour_file(path: &Path, last_line: &mut usize, start_height: u64) -> ScanResult {
let file = File::open(path).expect("Failed to open hour file path");
let reader = BufReader::new(file);
let mut new_blocks = Vec::new();
let mut last_height = start_height;
let lines: Vec<String> = reader.lines().collect::<Result<_, _>>().unwrap();
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
let mut block_ranges: Vec<RangeInclusive<u64>> = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
if line_idx < *last_line || line.trim().is_empty() {
continue;
}
match line_to_evm_block(line) {
Ok((parsed_block, height)) => {
if height >= start_height {
last_height = last_height.max(height);
new_blocks.push(parsed_block);
*last_line = line_idx;
}
if matches!(current_range, Some((_, end)) if end + 1 == height) {
current_range = Some((current_range.unwrap().0, height));
} else {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
Err(_) => {
warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line));
continue;
}
}
}
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
ScanResult {
path: path.to_path_buf(),
next_expected_height: last_height + 1,
new_blocks,
new_block_ranges: block_ranges,
}
}
fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
Box::pin(async move {
if let Some(block) = self.try_collect_local_block(height).await {
info!("Returning locally synced block for @ Height [{height}]");
Ok(block)
} else {
self.fallback.collect_block(height).await
}
})
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
Box::pin(async move {
let Some(dir) = Self::find_latest_hourly_file(&self.local_ingest_dir) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
self.local_ingest_dir
);
return self.fallback.find_latest_block_number().await;
};
let mut file = File::open(&dir).expect("Failed to open hour file path");
let last_line = read_last_complete_line(&mut file);
let Ok((_, height)) = line_to_evm_block(&last_line) else {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
file
);
return self.fallback.find_latest_block_number().await;
};
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> String {
const CHUNK_SIZE: u64 = 4096;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line: Vec<u8> = Vec::new();
while pos > 0 {
let read_size = std::cmp::min(pos, CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if line_to_evm_block(&String::from_utf8(candidate.to_vec()).unwrap()).is_ok() {
return String::from_utf8(candidate.to_vec()).unwrap();
}
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
String::from_utf8(last_line).unwrap()
}
impl HlNodeBlockSource {
async fn try_collect_local_block(&self, height: u64) -> Option<BlockAndReceipts> {
let mut u_cache = self.local_blocks_cache.lock().await;
if let Some(block) = u_cache.cache.remove(&height) {
return Some(block);
}
let Some(path) = u_cache.ranges.get(&height).cloned() else {
return None;
};
info!("Loading block data from {:?}", path);
u_cache.load_scan_result(scan_hour_file(&path, &mut 0, height));
u_cache.cache.get(&height).cloned()
}
fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let dt_part = path.parent()?.file_name()?.to_str()?;
let hour_part = path.file_name()?.to_str()?;
let hour: u8 = hour_part.parse().ok()?;
Some(OffsetDateTime::new_utc(
Date::parse(&format!("{dt_part}"), &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour, 0, 0).ok()?,
))
}
fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let dir = root.join(HOURLY_SUBDIR);
let mut files = Vec::new();
for entry in std::fs::read_dir(dir).ok()? {
let file = entry.ok()?.path();
let subfiles: Vec<_> = std::fs::read_dir(&file)
.ok()?
.filter_map(|f| f.ok().map(|f| f.path()))
.filter(|p| Self::datetime_from_path(p).is_some())
.collect();
files.extend(subfiles);
}
files.sort();
Some(files)
}
fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.last().cloned()
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in Self::all_hourly_files(root).unwrap_or_default() {
let mut file = File::open(&subfile).expect("Failed to open hour file path");
let last_line = read_last_complete_line(&mut file);
if let Ok((_, height)) = line_to_evm_block(&last_line) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file, fallback to slow path: {:?}", subfile);
}
let mut scan_result = scan_hour_file(&subfile, &mut 0, cutoff_height);
// Only store the block ranges for now; actual block data will be loaded lazily later to optimize memory usage
scan_result.new_blocks.clear();
u_cache.load_scan_result(scan_result);
}
info!("Backfilled {} blocks", u_cache.cache.len());
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.local_ingest_dir.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
// Wait for the first hourly file to be created
let mut dt = loop {
if let Some(latest_file) = Self::find_latest_hourly_file(&root) {
break Self::datetime_from_path(&latest_file).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let mut hour = dt.hour();
let mut day_str = date_from_datetime(dt);
let mut last_line = 0;
info!("Starting local ingest loop from height: {:?}", current_head);
loop {
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
if hour_file.exists() {
let scan_result = scan_hour_file(&hour_file, &mut last_line, next_height);
next_height = scan_result.next_expected_height;
let mut u_cache = cache.lock().await;
u_cache.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + Duration::HOUR < now {
dt += Duration::HOUR;
hour = dt.hour();
day_str = date_from_datetime(dt);
last_line = 0;
info!(
"Moving to a new file. {:?}",
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.local_ingest_dir,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
local_ingest_dir: PathBuf,
next_block_number: u64,
) -> Self {
let block_source = HlNodeBlockSource {
fallback,
local_ingest_dir,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new())),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = HlNodeBlockSource::datetime_from_path(path).unwrap();
println!("{:?}", dt);
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new()));
HlNodeBlockSource::try_backfill_local_blocks(&test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
println!("{:?}", u_cache.ranges);
assert_eq!(
u_cache.ranges.get(&9735058),
Some(&test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
}

View File

@ -0,0 +1,51 @@
use super::scan::ScanResult;
use crate::node::types::{BlockAndReceipts, EvmBlock};
use rangemap::RangeInclusiveMap;
use reth_network::cache::LruMap;
use std::path::{Path, PathBuf};
use tracing::{info, warn};
#[derive(Debug)]
pub struct LocalBlocksCache {
cache: LruMap<u64, BlockAndReceipts>,
ranges: RangeInclusiveMap<u64, PathBuf>,
}
impl LocalBlocksCache {
pub fn new(cache_size: u32) -> Self {
Self { cache: LruMap::new(cache_size), ranges: RangeInclusiveMap::new() }
}
pub fn load_scan_result(&mut self, scan_result: ScanResult) {
for blk in scan_result.new_blocks {
let EvmBlock::Reth115(b) = &blk.block;
self.cache.insert(b.header.header.number, blk);
}
for range in scan_result.new_block_ranges {
self.ranges.insert(range, scan_result.path.clone());
}
}
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
self.cache.get(&height).cloned()
}
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {
self.ranges.get(&height).cloned()
}
pub fn log_range_summary(&self, root: &Path) {
if self.ranges.is_empty() {
warn!("No ranges found in {:?}", root);
} else {
let (min, max) =
(self.ranges.first_range_value().unwrap(), self.ranges.last_range_value().unwrap());
info!(
"Populated {} ranges (min: {}, max: {})",
self.ranges.len(),
min.0.start(),
max.0.end()
);
}
}
}

View File

@ -0,0 +1,67 @@
use super::{HOURLY_SUBDIR, scan::Scanner, time_utils::TimeUtils};
use crate::node::types::BlockAndReceipts;
use std::{
fs::File,
io::{Read, Seek, SeekFrom},
path::{Path, PathBuf},
};
pub struct FileOperations;
impl FileOperations {
pub fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
let mut files = Vec::new();
for entry in std::fs::read_dir(root.join(HOURLY_SUBDIR)).ok()? {
let dir = entry.ok()?.path();
if let Ok(subentries) = std::fs::read_dir(&dir) {
files.extend(
subentries
.filter_map(|f| f.ok().map(|f| f.path()))
.filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
);
}
}
files.sort();
Some(files.into_iter().map(|(_, p)| p).collect())
}
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
Self::all_hourly_files(root)?.into_iter().last()
}
pub fn read_last_block_from_file(path: &Path) -> Option<(BlockAndReceipts, u64)> {
let mut file = File::open(path).ok()?;
Self::read_last_complete_line(&mut file)
}
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
const CHUNK_SIZE: u64 = 50000;
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
let mut last_line = Vec::new();
while pos > 0 {
let read_size = pos.min(CHUNK_SIZE);
buf.resize(read_size as usize, 0);
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
read.read_exact(&mut buf).unwrap();
last_line = [buf.clone(), last_line].concat();
if last_line.ends_with(b"\n") {
last_line.pop();
}
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
let candidate = &last_line[idx + 1..];
if let Ok(result) = Scanner::line_to_evm_block(str::from_utf8(candidate).unwrap()) {
return Some(result);
}
last_line.truncate(idx);
}
if pos < read_size {
break;
}
pos -= read_size;
}
Scanner::line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
}
}

View File

@ -0,0 +1,270 @@
mod cache;
mod file_ops;
mod scan;
#[cfg(test)]
mod tests;
mod time_utils;
use self::{
cache::LocalBlocksCache,
file_ops::FileOperations,
scan::{LineStream, ScanOptions, Scanner},
time_utils::TimeUtils,
};
use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts;
use futures::future::BoxFuture;
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use time::OffsetDateTime;
use tokio::sync::Mutex;
use tracing::{info, warn};
const HOURLY_SUBDIR: &str = "hourly";
const CACHE_SIZE: u32 = 8000; // 3660 blocks per hour
const ONE_HOUR: Duration = Duration::from_secs(60 * 60);
const TAIL_INTERVAL: Duration = Duration::from_millis(25);
#[derive(Debug, Clone)]
pub struct HlNodeBlockSourceArgs {
pub root: PathBuf,
pub fallback_threshold: Duration,
}
/// Block source that monitors the local ingest directory for the HL node.
#[derive(Debug, Clone)]
pub struct HlNodeBlockSource {
pub fallback: BlockSourceBoxed,
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
pub args: HlNodeBlockSourceArgs,
pub metrics: HlNodeBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.hl_node")]
pub struct HlNodeBlockSourceMetrics {
/// How many times the HL node block source is polling for a block
pub fetched_from_hl_node: Counter,
/// How many times the HL node block source is fetched from the fallback
pub fetched_from_fallback: Counter,
/// How many times `try_collect_local_block` was faster than ingest loop
pub file_read_triggered: Counter,
}
impl BlockSource for HlNodeBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
let local_blocks_cache = self.local_blocks_cache.clone();
let last_local_fetch = self.last_local_fetch.clone();
let metrics = self.metrics.clone();
Box::pin(async move {
let now = OffsetDateTime::now_utc();
if let Some(block) =
Self::try_collect_local_block(&metrics, local_blocks_cache, height).await
{
Self::update_last_fetch(last_local_fetch, height, now).await;
metrics.fetched_from_hl_node.increment(1);
return Ok(block);
}
if let Some((last_height, last_poll_time)) = *last_local_fetch.lock().await {
let more_recent = last_height < height;
let too_soon = now - last_poll_time < args.fallback_threshold;
if more_recent && too_soon {
return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
));
}
}
let block = fallback.collect_block(height).await?;
metrics.fetched_from_fallback.increment(1);
Self::update_last_fetch(last_local_fetch, height, now).await;
Ok(block)
})
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let fallback = self.fallback.clone();
let args = self.args.clone();
Box::pin(async move {
let Some(dir) = FileOperations::find_latest_hourly_file(&args.root) else {
warn!(
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
args.root
);
return fallback.find_latest_block_number().await;
};
match FileOperations::read_last_block_from_file(&dir) {
Some((_, height)) => {
info!("Latest block number: {} with path {}", height, dir.display());
Some(height)
}
None => {
warn!(
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
dir
);
fallback.find_latest_block_number().await
}
}
})
}
fn recommended_chunk_size(&self) -> u64 {
self.fallback.recommended_chunk_size()
}
}
struct CurrentFile {
path: PathBuf,
line_stream: Option<LineStream>,
}
impl CurrentFile {
pub fn from_datetime(dt: OffsetDateTime, root: &Path) -> Self {
let (hour, day_str) = (dt.hour(), TimeUtils::date_from_datetime(dt));
let path = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{}", hour));
Self { path, line_stream: None }
}
pub fn open(&mut self) -> eyre::Result<()> {
if self.line_stream.is_some() {
return Ok(());
}
self.line_stream = Some(LineStream::from_path(&self.path)?);
Ok(())
}
}
impl HlNodeBlockSource {
async fn update_last_fetch(
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
height: u64,
now: OffsetDateTime,
) {
let mut last_fetch = last_local_fetch.lock().await;
if last_fetch.is_none_or(|(h, _)| h < height) {
*last_fetch = Some((height, now));
}
}
async fn try_collect_local_block(
metrics: &HlNodeBlockSourceMetrics,
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
height: u64,
) -> Option<BlockAndReceipts> {
let mut u_cache = local_blocks_cache.lock().await;
if let Some(block) = u_cache.get_block(height) {
return Some(block);
}
let path = u_cache.get_path_for_height(height)?;
info!("Loading block data from {:?}", path);
metrics.file_read_triggered.increment(1);
let mut line_stream = LineStream::from_path(&path).ok()?;
let scan_result = Scanner::scan_hour_file(
&mut line_stream,
ScanOptions { start_height: 0, only_load_ranges: false },
);
u_cache.load_scan_result(scan_result);
u_cache.get_block(height)
}
async fn try_backfill_local_blocks(
root: &Path,
cache: &Arc<Mutex<LocalBlocksCache>>,
cutoff_height: u64,
) -> eyre::Result<()> {
let mut u_cache = cache.lock().await;
for subfile in FileOperations::all_hourly_files(root).unwrap_or_default() {
if let Some((_, height)) = FileOperations::read_last_block_from_file(&subfile) {
if height < cutoff_height {
continue;
}
} else {
warn!("Failed to parse last line of file: {:?}", subfile);
}
let mut line_stream =
LineStream::from_path(&subfile).expect("Failed to open line stream");
let mut scan_result = Scanner::scan_hour_file(
&mut line_stream,
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
);
scan_result.new_blocks.clear(); // Only store ranges, load data lazily
u_cache.load_scan_result(scan_result);
}
u_cache.log_range_summary(root);
Ok(())
}
async fn start_local_ingest_loop(&self, current_head: u64) {
let root = self.args.root.to_owned();
let cache = self.local_blocks_cache.clone();
tokio::spawn(async move {
let mut next_height = current_head;
let mut dt = loop {
if let Some(f) = FileOperations::find_latest_hourly_file(&root) {
break TimeUtils::datetime_from_path(&f).unwrap();
}
tokio::time::sleep(TAIL_INTERVAL).await;
};
let mut current_file = CurrentFile::from_datetime(dt, &root);
info!("Starting local ingest loop from height: {}", current_head);
loop {
let _ = current_file.open();
if let Some(line_stream) = &mut current_file.line_stream {
let scan_result = Scanner::scan_hour_file(
line_stream,
ScanOptions { start_height: next_height, only_load_ranges: false },
);
next_height = scan_result.next_expected_height;
cache.lock().await.load_scan_result(scan_result);
}
let now = OffsetDateTime::now_utc();
if dt + ONE_HOUR < now {
dt += ONE_HOUR;
current_file = CurrentFile::from_datetime(dt, &root);
info!("Moving to new file: {:?}", current_file.path);
continue;
}
tokio::time::sleep(TAIL_INTERVAL).await;
}
});
}
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
let _ = Self::try_backfill_local_blocks(
&self.args.root,
&self.local_blocks_cache,
next_block_number,
)
.await;
self.start_local_ingest_loop(next_block_number).await;
Ok(())
}
pub async fn new(
fallback: BlockSourceBoxed,
args: HlNodeBlockSourceArgs,
next_block_number: u64,
) -> Self {
let block_source = Self {
fallback,
args,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
last_local_fetch: Arc::new(Mutex::new(None)),
metrics: HlNodeBlockSourceMetrics::default(),
};
block_source.run(next_block_number).await.unwrap();
block_source
}
}

View File

@ -0,0 +1,132 @@
use crate::node::types::{BlockAndReceipts, EvmBlock};
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{BufRead, BufReader, Seek, SeekFrom},
ops::RangeInclusive,
path::{Path, PathBuf},
};
use tracing::warn;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LocalBlockAndReceipts(pub String, pub BlockAndReceipts);
pub struct ScanResult {
pub path: PathBuf,
pub next_expected_height: u64,
pub new_blocks: Vec<BlockAndReceipts>,
pub new_block_ranges: Vec<RangeInclusive<u64>>,
}
pub struct ScanOptions {
pub start_height: u64,
pub only_load_ranges: bool,
}
pub struct Scanner;
/// Stream for sequentially reading lines from a file.
///
/// This struct allows sequential iteration over lines over [Self::next] method.
/// It is resilient to cases where the line producer process is interrupted while writing:
/// - If a line is incomplete but still ends with a line ending, it is skipped: later, the fallback
/// block source will be used to retrieve the missing block.
/// - If a line does not end with a newline (i.e., the write was incomplete), the method returns
/// `None` to break out of the loop and avoid reading partial data.
/// - If a temporary I/O error occurs, the stream exits the loop without rewinding the cursor, which
/// will result in skipping ahead to the next unread bytes.
pub struct LineStream {
path: PathBuf,
reader: BufReader<File>,
}
impl LineStream {
pub fn from_path(path: &Path) -> std::io::Result<Self> {
let reader = BufReader::with_capacity(1024 * 1024, File::open(path)?);
Ok(Self { path: path.to_path_buf(), reader })
}
pub fn next(&mut self) -> Option<String> {
let mut line_buffer = vec![];
let Ok(size) = self.reader.read_until(b'\n', &mut line_buffer) else {
// Temporary I/O error; restart the loop
return None;
};
// Now cursor is right after the end of the line
// On UTF-8 error, skip the line
let Ok(mut line) = String::from_utf8(line_buffer) else {
return Some(String::new());
};
// If line is not completed yet, return None so that we can break the loop
if line.ends_with('\n') {
if line.ends_with('\r') {
line.pop();
}
line.pop();
return Some(line);
}
// info!("Line is not completed yet: {}", line);
if size != 0 {
self.reader.seek(SeekFrom::Current(-(size as i64))).unwrap();
}
None
}
}
impl Scanner {
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
serde_json::from_str(line)?;
let height = match &parsed_block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
Ok((parsed_block, height))
}
pub fn scan_hour_file(line_stream: &mut LineStream, options: ScanOptions) -> ScanResult {
let mut new_blocks = Vec::new();
let mut last_height = options.start_height;
let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None;
while let Some(line) = line_stream.next() {
match Self::line_to_evm_block(&line) {
Ok((parsed_block, height)) => {
if height >= options.start_height {
last_height = last_height.max(height);
if !options.only_load_ranges {
new_blocks.push(parsed_block);
}
}
match current_range {
Some((start, end)) if end + 1 == height => {
current_range = Some((start, height))
}
_ => {
if let Some((start, end)) = current_range.take() {
block_ranges.push(start..=end);
}
current_range = Some((height, height));
}
}
}
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(&line)),
}
}
if let Some((start, end)) = current_range {
block_ranges.push(start..=end);
}
ScanResult {
path: line_stream.path.clone(),
next_expected_height: last_height + current_range.is_some() as u64,
new_blocks,
new_block_ranges: block_ranges,
}
}
}

View File

@ -0,0 +1,214 @@
use super::*;
use crate::{
node::types::{ReadPrecompileCalls, reth_compat},
pseudo_peer::sources::{LocalBlockSource, hl_node::scan::LocalBlockAndReceipts},
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, B64, B256, Bloom, Bytes, U256};
use std::{io::Write, time::Duration};
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
#[test]
fn test_datetime_from_path() {
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
let dt = TimeUtils::datetime_from_path(path).unwrap();
println!("{dt:?}");
}
#[tokio::test]
async fn test_backfill() {
let test_path = Path::new("/root/evm_block_and_receipts");
if !test_path.exists() {
return;
}
let cache = Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE)));
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
let u_cache = cache.lock().await;
assert_eq!(
u_cache.get_path_for_height(9735058),
Some(test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
);
}
fn scan_result_from_single_block(block: BlockAndReceipts) -> scan::ScanResult {
use crate::node::types::EvmBlock;
let height = match &block.block {
EvmBlock::Reth115(b) => b.header.header.number,
};
scan::ScanResult {
path: PathBuf::from("/nonexistent-block"),
next_expected_height: height + 1,
new_blocks: vec![block],
new_block_ranges: vec![height..=height],
}
}
fn empty_block(number: u64, timestamp: u64, extra_data: &'static [u8]) -> LocalBlockAndReceipts {
use crate::node::types::EvmBlock;
LocalBlockAndReceipts(
timestamp.to_string(),
BlockAndReceipts {
block: EvmBlock::Reth115(reth_compat::SealedBlock {
header: reth_compat::SealedHeader {
header: Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Bloom::ZERO,
difficulty: U256::ZERO,
number,
gas_limit: 0,
gas_used: 0,
timestamp,
extra_data: Bytes::from_static(extra_data),
mix_hash: B256::ZERO,
nonce: B64::ZERO,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
hash: B256::ZERO,
},
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
}),
receipts: vec![],
system_txs: vec![],
read_precompile_calls: ReadPrecompileCalls(vec![]),
highest_precompile_address: None,
},
)
}
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, std::fs::File)> {
let now = OffsetDateTime::now_utc();
let temp_dir = tempfile::tempdir()?;
let path = temp_dir
.path()
.join(HOURLY_SUBDIR)
.join(TimeUtils::date_from_datetime(now))
.join(format!("{}", now.hour()));
std::fs::create_dir_all(path.parent().unwrap())?;
Ok((temp_dir, std::fs::File::create(path)?))
}
struct BlockSourceHierarchy {
block_source: HlNodeBlockSource,
_temp_dir: tempfile::TempDir,
file1: std::fs::File,
current_block: LocalBlockAndReceipts,
future_block_hl_node: LocalBlockAndReceipts,
future_block_fallback: LocalBlockAndReceipts,
}
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
// Setup fallback block source
let block_source_fallback = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
HlNodeBlockSourceArgs {
root: { PathBuf::from("/nonexistent") },
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
let block_source = HlNodeBlockSource::new(
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
HlNodeBlockSourceArgs {
root: temp_dir1.path().to_path_buf(),
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
},
1000000,
)
.await;
block_source_fallback
.local_blocks_cache
.lock()
.await
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
Ok(BlockSourceHierarchy {
block_source,
_temp_dir: temp_dir1,
file1,
current_block: block_hl_node_0,
future_block_hl_node: block_hl_node_1,
future_block_fallback: block_fallback_1,
})
}
#[tokio::test]
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_hl_node, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
let block = block_source.collect_block(1000001).await;
assert!(block.is_err());
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
tokio::time::sleep(Duration::from_millis(100)).await;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_hl_node.1);
Ok(())
}
#[tokio::test]
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
let hierarchy = setup_block_source_hierarchy().await?;
let BlockSourceHierarchy {
block_source, current_block, future_block_fallback, mut file1, ..
} = hierarchy;
let block = block_source.collect_block(1000000).await.unwrap();
assert_eq!(block, current_block.1);
tokio::time::sleep(DEFAULT_FALLBACK_THRESHOLD_FOR_TEST).await;
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
let block = block_source.collect_block(1000001).await.unwrap();
assert_eq!(block, future_block_fallback.1);
Ok(())
}
#[test]
fn test_hourly_files_sort() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir()?;
// create 20250826/9, 20250826/14
let targets = [("20250826", "9"), ("20250826", "14")];
for (date, hour) in targets {
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
let parent = hourly_file.parent().unwrap();
std::fs::create_dir_all(parent)?;
std::fs::File::create(hourly_file)?;
}
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
let file_names: Vec<_> =
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
assert_eq!(file_names, ["9", "14"]);
Ok(())
}

View File

@ -0,0 +1,19 @@
use std::path::Path;
use time::{Date, OffsetDateTime, Time, macros::format_description};
pub struct TimeUtils;
impl TimeUtils {
pub fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
let (dt_part, hour_part) =
(path.parent()?.file_name()?.to_str()?, path.file_name()?.to_str()?);
Some(OffsetDateTime::new_utc(
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
Time::from_hms(hour_part.parse().ok()?, 0, 0).ok()?,
))
}
pub fn date_from_datetime(dt: OffsetDateTime) -> String {
dt.format(&format_description!("[year][month][day]")).unwrap()
}
}

View File

@ -0,0 +1,79 @@
use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts;
use eyre::Context;
use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::path::PathBuf;
use tracing::info;
/// Block source that reads blocks from local filesystem (--ingest-dir)
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
metrics: LocalBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.local")]
pub struct LocalBlockSourceMetrics {
/// How many times the local block source is polling for a block
pub polling_attempt: Counter,
/// How many times the local block source is fetched from the local filesystem
pub fetched: Counter,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into(), metrics: LocalBlockSourceMetrics::default() }
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
let metrics = self.metrics.clone();
async move {
let path = dir.join(utils::rmp_path(height));
metrics.polling_attempt.increment(1);
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
metrics.fetched.increment(1);
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}

View File

@ -1,269 +1,40 @@
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use eyre::Context;
use futures::{future::BoxFuture, FutureExt};
use reth_network::cache::LruMap;
use std::{
path::PathBuf,
sync::{Arc, RwLock},
};
use tracing::info;
use auto_impl::auto_impl;
use futures::future::BoxFuture;
use std::{sync::Arc, time::Duration};
// Module declarations
mod cached;
mod hl_node;
pub use hl_node::HlNodeBlockSource;
mod local;
mod s3;
mod utils;
// Public exports
pub use cached::CachedBlockSource;
pub use hl_node::{HlNodeBlockSource, HlNodeBlockSourceArgs};
pub use local::LocalBlockSource;
pub use s3::S3BlockSource;
const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(25);
/// Trait for block sources that can retrieve blocks from various sources
#[auto_impl(&, &mut, Box, Arc)]
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>>;
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>>;
/// Retrieves a block at the specified height
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>>;
/// Finds the latest block number available from this source
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>>;
/// Returns the recommended chunk size for batch operations
fn recommended_chunk_size(&self) -> u64;
/// Returns the polling interval
fn polling_interval(&self) -> Duration {
DEFAULT_POLLING_INTERVAL
}
}
/// Type alias for a boxed block source
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: aws_sdk_s3::Client,
bucket: String,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String) -> Self {
Self { client, bucket }
}
async fn pick_path_with_highest_number(
client: aws_sdk_s3::Client,
bucket: String,
dir: String,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(&bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes
.unwrap()
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents
.unwrap()
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let path = rmp_path(height);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
"".to_string(),
true,
)
.await?;
let (_, second_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
first_level,
true,
)
.await?;
let (block_number, third_level) = Self::pick_path_with_highest_number(
client.clone(),
bucket.clone(),
second_level,
false,
)
.await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone();
async move {
let path = dir.join(rmp_path(height));
let file = tokio::fs::read(&path)
.await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
let dir = self.dir.clone();
async move {
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
}
#[derive(Debug, Clone)]
pub struct LocalBlockSource {
dir: PathBuf,
}
impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into() }
}
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().map(|(number, file)| (*number, file.to_string()))
}
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
let files = files
.into_iter()
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
.collect::<Vec<_>>();
Self::name_with_largest_number_static(&files, is_dir)
}
}
fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
let path = format!("{f}/{s}/{height}.rmp.lz4");
path
}
impl BlockSource for BlockSourceBoxed {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
self.as_ref().collect_block(height)
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.as_ref().find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.as_ref().recommended_chunk_size()
}
}
#[derive(Debug, Clone)]
pub struct CachedBlockSource {
block_source: BlockSourceBoxed,
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
}
impl CachedBlockSource {
const CACHE_LIMIT: u32 = 100000;
pub fn new(block_source: BlockSourceBoxed) -> Self {
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
}
}
impl BlockSource for CachedBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
let block_source = self.block_source.clone();
let cache = self.cache.clone();
async move {
if let Some(block) = cache.write().unwrap().get(&height) {
return Ok(block.clone());
}
let block = block_source.collect_block(height).await?;
cache.write().unwrap().insert(height, block.clone());
Ok(block)
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
self.block_source.find_latest_block_number()
}
fn recommended_chunk_size(&self) -> u64 {
self.block_source.recommended_chunk_size()
}
}

View File

@ -0,0 +1,115 @@
use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer;
use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{sync::Arc, time::Duration};
use tracing::info;
/// Block source that reads blocks from S3 (--s3)
#[derive(Debug, Clone)]
pub struct S3BlockSource {
client: Arc<aws_sdk_s3::Client>,
bucket: String,
polling_interval: Duration,
metrics: S3BlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.s3")]
pub struct S3BlockSourceMetrics {
/// How many times the S3 block source is polling for a block
pub polling_attempt: Counter,
/// How many times the S3 block source has polled a block
pub fetched: Counter,
}
impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
Self {
client: client.into(),
bucket,
polling_interval,
metrics: S3BlockSourceMetrics::default(),
}
}
async fn pick_path_with_highest_number(
client: &aws_sdk_s3::Client,
bucket: &str,
dir: &str,
is_dir: bool,
) -> Option<(u64, String)> {
let request = client
.list_objects()
.bucket(bucket)
.prefix(dir)
.delimiter("/")
.request_payer(RequestPayer::Requester);
let response = request.send().await.ok()?;
let files: Vec<String> = if is_dir {
response
.common_prefixes?
.iter()
.map(|object| object.prefix.as_ref().unwrap().to_string())
.collect()
} else {
response
.contents?
.iter()
.map(|object| object.key.as_ref().unwrap().to_string())
.collect()
};
utils::name_with_largest_number(&files, is_dir)
}
}
impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
let metrics = self.metrics.clone();
async move {
let path = utils::rmp_path(height);
metrics.polling_attempt.increment(1);
let request = client
.get_object()
.request_payer(RequestPayer::Requester)
.bucket(&bucket)
.key(path);
let response = request.send().await?;
metrics.fetched.increment(1);
let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
Ok(blocks[0].clone())
}
.boxed()
}
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
let client = self.client.clone();
let bucket = self.bucket.clone();
async move {
let (_, first_level) =
Self::pick_path_with_highest_number(&client, &bucket, "", true).await?;
let (_, second_level) =
Self::pick_path_with_highest_number(&client, &bucket, &first_level, true).await?;
let (block_number, third_level) =
Self::pick_path_with_highest_number(&client, &bucket, &second_level, false).await?;
info!("Latest block number: {} with path {}", block_number, third_level);
Some(block_number)
}
.boxed()
}
fn recommended_chunk_size(&self) -> u64 {
1000
}
fn polling_interval(&self) -> Duration {
self.polling_interval
}
}

View File

@ -0,0 +1,26 @@
//! Shared utilities for block sources
/// Finds the file/directory with the largest number in its name from a list of files
pub fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
let mut files = files
.iter()
.filter_map(|file_raw| {
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
let file = file.split("/").last().unwrap();
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
})
.collect::<Vec<_>>();
if files.is_empty() {
return None;
}
files.sort_by_key(|(number, _)| *number);
files.last().cloned()
}
/// Generates the RMP file path for a given block height
pub fn rmp_path(height: u64) -> String {
let f = ((height - 1) / 1_000_000) * 1_000_000;
let s = ((height - 1) / 1_000) * 1_000;
format!("{f}/{s}/{height}.rmp.lz4")
}

View File

@ -1,30 +0,0 @@
use std::path::Path;
use crate::pseudo_peer::{prelude::*, BlockSourceType};
#[tokio::test]
async fn test_block_source_config_s3() {
let config = BlockSourceConfig::s3("test-bucket".to_string()).await;
assert!(
matches!(config.source_type, BlockSourceType::S3 { bucket } if bucket == "test-bucket")
);
}
#[tokio::test]
async fn test_block_source_config_local() {
let config = BlockSourceConfig::local("/test/path".into());
assert!(
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
);
}
#[test]
fn test_error_types() {
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
let benchmark_error: PseudoPeerError = io_error.into();
match benchmark_error {
PseudoPeerError::Io(_) => (),
_ => panic!("Expected Io error"),
}
}

35
src/version.rs Normal file
View File

@ -0,0 +1,35 @@
use std::borrow::Cow;
use reth_node_core::version::{RethCliVersionConsts, try_init_version_metadata};
pub fn init_reth_hl_version() {
let cargo_pkg_version = env!("CARGO_PKG_VERSION").to_string();
let short = env!("RETH_HL_SHORT_VERSION").to_string();
let long = format!(
"{}\n{}\n{}\n{}\n{}",
env!("RETH_HL_LONG_VERSION_0"),
env!("RETH_HL_LONG_VERSION_1"),
env!("RETH_HL_LONG_VERSION_2"),
env!("RETH_HL_LONG_VERSION_3"),
env!("RETH_HL_LONG_VERSION_4"),
);
let p2p = env!("RETH_HL_P2P_CLIENT_VERSION").to_string();
let meta = RethCliVersionConsts {
name_client: Cow::Borrowed("reth_hl"),
cargo_pkg_version: Cow::Owned(cargo_pkg_version.clone()),
vergen_git_sha_long: Cow::Owned(env!("VERGEN_GIT_SHA").to_string()),
vergen_git_sha: Cow::Owned(env!("VERGEN_GIT_SHA_SHORT").to_string()),
vergen_build_timestamp: Cow::Owned(env!("VERGEN_BUILD_TIMESTAMP").to_string()),
vergen_cargo_target_triple: Cow::Owned(env!("VERGEN_CARGO_TARGET_TRIPLE").to_string()),
vergen_cargo_features: Cow::Owned(env!("VERGEN_CARGO_FEATURES").to_string()),
short_version: Cow::Owned(short),
long_version: Cow::Owned(long),
build_profile_name: Cow::Owned(env!("RETH_HL_BUILD_PROFILE").to_string()),
p2p_client_version: Cow::Owned(p2p),
extra_data: Cow::Owned(format!("reth_hl/v{}/{}", cargo_pkg_version, std::env::consts::OS)),
};
let _ = try_init_version_metadata(meta);
}