96 Commits

Author SHA1 Message Date
f109130f88 feat(draft): Spot meta 2025-11-05 04:07:46 +00:00
010d056aad Merge pull request #102 from hl-archive-node/fix/testnet-txs-tracking
fix: Fix testnet transaction types
2025-11-04 12:24:04 -05:00
821c63494e fix: Fix testnet transaction types 2025-11-04 17:23:31 +00:00
f915aba568 Merge pull request #100 from hl-archive-node/feat/deprecate-migrator
feat: Place migrator behind `CHECK_DB_MIGRATION` env
2025-11-01 06:23:01 -04:00
1fe03bfc41 feat: Place migrator behind CHECK_DB_MIGRATION env 2025-11-01 09:36:30 +00:00
893822e5b0 Merge pull request #98 from hl-archive-node/fix/testnet-system-tx 2025-10-26 03:39:06 -04:00
c2528ce223 fix: Support certain types of system tx 2025-10-26 06:42:14 +00:00
d46e808b8d Merge pull request #94 from hl-archive-node/fix/migrator-typo
fix(migrate): Fix wrong chunk ranges
2025-10-16 10:42:59 -04:00
497353fd2f fix(migrate): Fix wrong chunk ranges 2025-10-16 14:35:04 +00:00
eee6eeb2fc Merge pull request #93 from hl-archive-node/fix/subscriptions
fix: Prevent #89 from overriding --hl-node-compliant subscriptions
2025-10-13 01:27:19 -04:00
611e6867bf fix: Do not override --hl-node-compliant for subscription 2025-10-13 02:57:25 +00:00
6c3ed63c3c fix: Override NewHeads only 2025-10-13 02:57:05 +00:00
51924e9671 Merge pull request #91 from hl-archive-node/fix/debug-cutoff
fix: Fix --debug-cutoff-height semantics
2025-10-11 22:29:45 -04:00
8f15aa311f fix: Fix --debug-cutoff-height semantics
NOTE: This is a debug feature not on by default.

The original intention of it was limiting the highest block number. But it was instead enforcing the starting block number for fetching, leading to block progression.
2025-10-12 02:22:55 +00:00
bc66716a41 Merge pull request #89 from hl-archive-node/cleanup
fix: Convert header type for eth_subscribe
2025-10-10 23:09:33 -04:00
fc819dbba2 test: Add regression tests 2025-10-11 02:52:09 +00:00
1c5a22a814 fix: Convert header type for eth_subscribe
Due to custom header usage, only `eth_subscribe` method was returning the new header format in raw format, while other part were using RpcConvert to convert headers.

Make `eth_subscribe` newHeads to return the `inner` field (original eth header) instead.
2025-10-11 02:49:19 +00:00
852e186b1a Merge pull request #88 from hl-archive-node/hotfix
hotfix: Mark migrator experimantal
2025-10-09 04:55:53 -04:00
f83326059f chore: clippy 2025-10-09 08:55:40 +00:00
ca8c374116 feat: Mark migrator as experimental 2025-10-09 08:49:29 +00:00
5ba12a4850 perf: adjust chunk size, do not hold tx too long 2025-10-09 08:20:22 +00:00
8a179a6d9e perf: Use smaller chunks 2025-10-09 08:13:53 +00:00
d570cf3e8d fix: Create directory before migration 2025-10-09 08:13:45 +00:00
0e49e65068 Merge pull request #86 from hl-archive-node/breaking/hl-header
feat(breaking): Use custom header format (HlHeader)
2025-10-09 02:51:09 -04:00
13b63ff136 feat: add migrator for mdbx as well 2025-10-09 06:35:56 +00:00
233026871f perf: chunkify block ranges 2025-10-08 13:54:16 +00:00
7e169d409d chore: Change branch to v1.8.2-fork-hl-header 2025-10-08 13:04:11 +00:00
47aaad6ed9 feat: add migrator 2025-10-08 13:03:51 +00:00
9f73b1ede0 refactor: Move BlockBody from transaction to body 2025-10-06 06:43:17 +00:00
bcdf4d933d feat(breaking): Use HlHeader for HlPrimitives 2025-10-06 06:21:08 +00:00
2390ed864a feat(breaking): Use HlHeader for storing header 2025-10-06 06:21:08 +00:00
567d6ce2e4 feat: Introduce HlHeader 2025-10-06 06:21:08 +00:00
8b2c3a4a34 refactor: Move primitives into files 2025-10-06 06:21:08 +00:00
92759f04db Merge pull request #84 from hl-archive-node/fix/no-panic
fix: Fix panic when block receipts are called on non-existing blocks
2025-10-05 19:47:22 -04:00
71bb70bca6 fix: Fix panic when block receipts are called on non-existing blocks 2025-10-05 14:54:55 +00:00
5327ebc97a Merge pull request #82 from hl-archive-node/fix/local-reader
fix(local-ingest-dir): Use more robust resumption for hl-node line reader, fix block number increment for reading files
2025-10-05 07:36:32 -04:00
4d83b687d4 feat: Add metrics for file read triggered
Usually, "Loading block data from ..." shouldn't be shown in logs at all. Add metrics to detect the file read.
2025-10-05 11:28:11 +00:00
12f366573e fix: Do not increase block counter when no block is read
This made ingest loop to infinitely increase the block number
2025-10-05 11:28:11 +00:00
b8bae7cde9 fix: Utillize LruMap better
LruMap was introduced to allow getting the same block twice, so removing the item when getting the block doesn't make sense.
2025-10-05 11:28:11 +00:00
0fd4b7943f refactor: Use offsets instead of lines, wrap related structs in one 2025-10-05 11:28:04 +00:00
bfd61094ee chore: cargo fmt 2025-10-05 09:58:13 +00:00
3b33b0a526 Merge pull request #81 from hl-archive-node/fix/typo-local
fix: Fix typo in --local (default hl-node dir)
2025-10-05 05:54:35 -04:00
de7b524f0b fix: Fix typo in --local (default hl-node dir) 2025-10-05 04:39:09 -04:00
24f2460337 Merge pull request #80 from hl-archive-node/chore/v1.8.2
chore: Upgrade to reth v1.8.2
2025-10-05 04:38:54 -04:00
b55ddc54ad chore: clippy 2025-10-05 04:04:30 -04:00
aa73fab281 chore: Now cargo fmt sorts imports and trait methods 2025-10-05 03:56:23 -04:00
ae0cb0da6d chore: Move sprites0/reth to hl-archive-node/reth 2025-10-05 03:56:23 -04:00
8605be9864 chore: Upgrade to reth v1.8.2 2025-10-05 03:56:23 -04:00
c93ff90f94 Merge pull request #79 from hl-archive-node/fix/issue-78
fix: Do not filter out logs based on bloom (which is for perf optimization)
2025-10-05 00:43:20 -04:00
ce64e00e2f fix: Do not filter out logs based on bloom (which is for perf optimization)
Resolves #78
2025-10-05 00:33:44 -04:00
8d8da57d3a Merge pull request #77 from hl-archive-node/feat/cutoff-latest
feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
2025-10-02 10:57:04 -04:00
875304f891 feat: Add debug CLI flag to enforce latest blocks (--debug-cutoff-height)
This is useful when syncing to specific testnet blocks
2025-10-02 14:53:47 +00:00
b37ba15765 Merge pull request #74 from Quertyy/feat/block-precompila-data-rpc-method
feat(rpc): add HlBlockPrecompile rpc API
2025-09-19 02:42:21 -04:00
3080665702 style: pass clippy check 2025-09-19 13:23:49 +07:00
4896e4f0ea refactor: use BlockId as block type 2025-09-19 12:41:14 +07:00
458f506ad2 refactor: use BlockHashOrNumber as block type 2025-09-19 12:33:32 +07:00
1c7136bfab feat(rpc): add HlBlockPrecompile rpc API 2025-09-18 04:57:49 +07:00
491e902904 Merge pull request #69 from hl-archive-node/fix/call-and-estimate
fix: Apply precompiles for eth_call and eth_estimateGas
2025-09-15 02:22:21 -04:00
45648a7a98 fix: Apply precompiles for eth_call and eth_estimateGas 2025-09-15 02:21:45 -04:00
c87c5a055a Merge pull request #68 from hl-archive-node/fix/testnet-token
fix: Add a manual mapping for testnet
2025-09-14 23:31:19 -04:00
c9416a3948 fix: Add a manual mapping for testnet 2025-09-14 23:24:00 -04:00
db10c23c56 Merge pull request #66 from hl-archive-node/feat/nb-release
fix: Fix tag format
2025-09-13 16:48:00 -04:00
fc395123f3 fix: Fix tag format 2025-09-13 16:47:05 -04:00
84ea1af682 Merge pull request #64 from sentioxyz/node-builder
fix docker build args
2025-09-13 16:43:35 -04:00
bd3e0626ed fix docker build args 2025-09-13 15:28:36 +08:00
7d223a464e Merge pull request #63 from hl-archive-node/feat/nb-release
feat: Add nb tag to docker releases
2025-09-11 19:36:43 -04:00
afcc551f67 feat: Add nb tag to docker releases 2025-09-11 19:35:50 -04:00
0dfd7a4c7f Merge pull request #62 from hl-archive-node/doc/testnet
doc: Update testnet instruction, add support channel
2025-09-11 19:33:50 -04:00
8faac526b7 doc: Add support channel 2025-09-11 19:32:55 -04:00
acfabf969c doc: Update testnet block number 2025-09-11 19:31:37 -04:00
fccf877a3a Merge pull request #61 from hl-archive-node/chore/v1.7.0
chore: Upgrade to reth v1.7.0
2025-09-11 19:26:47 -04:00
9e3f0c722e chore: Upgrade to reth v1.7.0 2025-09-11 19:25:48 -04:00
cd5bcc4cb0 chore: Add issue templates from reth 2025-09-11 19:00:09 -04:00
d831a459bb Merge pull request #60 from hl-archive-node/feat/block-metrics
feat: Add block source metrics
2025-09-11 18:56:18 -04:00
66c2ee654c feat: Add block source metrics 2025-09-11 18:50:22 -04:00
701e6a25e6 refactor: Remove duplications 2025-09-11 18:47:58 -04:00
ab11ce513f Merge pull request #57 from Quertyy/chore/reth-hl-version
chore(build): add reth-hl version output
2025-09-09 09:43:12 -04:00
37b852e810 chore(build): add reth-hl version output 2025-09-09 20:19:52 +07:00
51c43d6dbd Create a docker release github action (#54)
* create docker release action

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .

* .
2025-09-08 10:26:20 -04:00
3f08b0a4e6 Merge pull request #55 from hl-archive-node/fix/txenv-on-trace
fix: Fill precompiles when tracing
2025-09-04 20:39:16 -04:00
d7992ab8ff remove: Remove unnecessary trait implementation 2025-09-04 20:38:41 -04:00
b37a30fb37 fix: Fill precompiles in tracing APIs 2025-09-04 20:37:10 -04:00
f6432498d8 refactor: Relax apply_precompiles and expose 2025-09-04 20:37:07 -04:00
772ff250ce Merge pull request #52 from hl-archive-node/fix/avoid-crash-on-eth-failure
fix: Do not crash when collect_block failed
2025-08-29 02:51:10 +09:00
5ee9053286 fix: Do not crash when collect_block failed
Just gracefully return it as error and log it
2025-08-28 13:47:44 -04:00
29e6972d58 Merge pull request #51 from hl-archive-node/feat/no-eth-proof
fix: Disable eth_getProof by default
2025-08-29 02:07:24 +09:00
e87b9232cc fix: Disable eth_getProof by default
No need to give malfunctioning feature by default. Issue #15 affects
StoragesTrie, AccountsTrie table which is used only for state root and
proof generation.
Also clearing the table does not affect any other parts of reth node.

Meanwhile, add --experimental-eth-get-proof flag to enable eth_getProof
forcefully.
2025-08-28 10:27:32 -04:00
b004263f82 Merge pull request #50 from Quertyy/feat/rpc-system-tx-receipts
chore(rpc): add eth_getEvmSystemTxsReceiptsByBlockHash and eth_getEvmSystemTxsReceiptsByBlockHash rpc method
2025-08-28 23:26:05 +09:00
74e27b5ee2 refactor(rpc): extract common logic for getting system txs 2025-08-28 16:10:41 +02:00
09fcf0751f chore(rpc): add eth_getSystemTxsReceiptsByBlockNumber and eth_getSystemTxsReceiptsByBlockNumber rpc method 2025-08-28 15:39:37 +02:00
8f2eca4754 Merge pull request #48 from Quertyy/feat/rpc-block-system-tx
chore(rpc): add eth_getEvmSystemTxsByBlockNumber and eth_getEvmSystemTxsByBlockHash rpc methods
2025-08-28 17:45:43 +09:00
707b4fb709 chore(rpc): return types compliance 2025-08-27 10:34:34 +02:00
62dd5a71b5 chore(rpc): change methods name 2025-08-26 22:03:40 +02:00
412c38a8cd chore(rpc): add eth_getSystemTxsByBlockNumber and eth_getSystemTxsByBlockNumber rpc method 2025-08-26 21:24:28 +02:00
796ea518bd Merge pull request #47 from hl-archive-node/fix/issue-46
fix: Sort hl-node files correctly
2025-08-27 02:49:16 +09:00
dd2c925af2 fix: Sort hl-node files correctly 2025-08-26 13:47:34 -04:00
81 changed files with 3700 additions and 1358 deletions

127
.github/ISSUE_TEMPLATE/bug.yml vendored Normal file
View File

@ -0,0 +1,127 @@
name: Bug Report
description: Create a bug report
labels: ["C-bug", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report! Please provide as much detail as possible.
If you believe you have found a vulnerability, please provide details [here](mailto:georgios@paradigm.xyz) instead.
- type: textarea
id: what-happened
attributes:
label: Describe the bug
description: |
A clear and concise description of what the bug is.
If the bug is in a crate you are using (i.e. you are not running the standard `reth` binary) please mention that as well.
validations:
required: true
- type: textarea
id: reproduction-steps
attributes:
label: Steps to reproduce
description: Please provide any steps you think might be relevant to reproduce the bug.
placeholder: |
Steps to reproduce:
1. Start '...'
2. Then '...'
3. Check '...'
4. See error
validations:
required: true
- type: textarea
id: logs
attributes:
label: Node logs
description: |
If applicable, please provide the node logs leading up to the bug.
**Please also provide debug logs.** By default, these can be found in:
- `~/.cache/reth/logs` on Linux
- `~/Library/Caches/reth/logs` on macOS
- `%localAppData%/reth/logs` on Windows
render: text
validations:
required: false
- type: dropdown
id: platform
attributes:
label: Platform(s)
description: What platform(s) did this occur on?
multiple: true
options:
- Linux (x86)
- Linux (ARM)
- Mac (Intel)
- Mac (Apple Silicon)
- Windows (x86)
- Windows (ARM)
- type: dropdown
id: container_type
attributes:
label: Container Type
description: Were you running it in a container?
multiple: true
options:
- Not running in a container
- Docker
- Kubernetes
- LXC/LXD
- Other
validations:
required: true
- type: textarea
id: client-version
attributes:
label: What version/commit are you on?
description: This can be obtained with `reth --version`
validations:
required: true
- type: textarea
id: database-version
attributes:
label: What database version are you on?
description: This can be obtained with `reth db version`
validations:
required: true
- type: textarea
id: network
attributes:
label: Which chain / network are you on?
description: This is the argument you pass to `reth --chain`. If you are using `--dev`, type in 'dev' here. If you are not running with `--chain` or `--dev` then it is mainnet.
validations:
required: true
- type: dropdown
id: node-type
attributes:
label: What type of node are you running?
options:
- Archive (default)
- Full via --full flag
- Pruned with custom reth.toml config
validations:
required: true
- type: textarea
id: prune-config
attributes:
label: What prune config do you use, if any?
description: The `[prune]` section in `reth.toml` file
validations:
required: false
- type: input
attributes:
label: If you've built Reth from source, provide the full command you used
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md#code-of-conduct)
options:
- label: I agree to follow the Code of Conduct
required: true

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: GitHub Discussions
url: https://github.com/paradigmxyz/reth/discussions
about: Please ask and answer questions here to keep the issue tracker clean.

19
.github/ISSUE_TEMPLATE/docs.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Documentation
description: Suggest a change to our documentation
labels: ["C-docs", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
If you are unsure if the docs are relevant or needed, please open up a discussion first.
- type: textarea
attributes:
label: Describe the change
description: |
Please describe the documentation you want to change or add, and if it is for end-users or contributors.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

21
.github/ISSUE_TEMPLATE/feature.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Feature request
description: Suggest a feature
labels: ["C-enhancement", "S-needs-triage"]
body:
- type: markdown
attributes:
value: |
Please ensure that the feature has not already been requested in the issue tracker.
- type: textarea
attributes:
label: Describe the feature
description: |
Please describe the feature and what it is aiming to solve, if relevant.
If the feature is for a crate, please include a proposed API surface.
validations:
required: true
- type: textarea
attributes:
label: Additional context
description: Add any other context to the feature (like screenshots, resources)

38
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,38 @@
# Publishes the Docker image.
name: docker
on:
push:
tags:
- v*
- nb-*
env:
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
CARGO_TERM_COLOR: always
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
DOCKER_USERNAME: ${{ github.actor }}
jobs:
build:
name: build and push as latest
runs-on: ubuntu-24.04
permissions:
packages: write
contents: read
steps:
- uses: actions/checkout@v5
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Log in to Docker
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
- name: Set up Docker builder
run: |
docker buildx create --use --name builder
- name: Build and push nanoreth image
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest

838
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,8 @@
[package] [package]
name = "reth_hl" name = "reth_hl"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2024"
build = "build.rs"
[lib] [lib]
name = "reth_hl" name = "reth_hl"
@ -25,67 +26,73 @@ lto = "fat"
codegen-units = 1 codegen-units = 1
[dependencies] [dependencies]
reth = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-cli = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-commands = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-cli-commands = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-basic-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-basic-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-db-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-db-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-chainspec = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-chainspec = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-cli-util = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-cli-util = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-discv4 = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-discv4 = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-engine-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-engine-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-forks = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-ethereum-forks = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-payload-builder = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-ethereum-payload-builder = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-ethereum-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-ethereum-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-eth-wire = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-eth-wire-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-eth-wire-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-evm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-evm-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-evm-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-core = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-node-core = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-revm = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-revm = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-network = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-p2p = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-network-p2p = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-network-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-node-ethereum = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-node-ethereum = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-network-peers = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-network-peers = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-payload-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-payload-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-primitives = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-primitives-traits = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-primitives-traits = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-provider = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb", features = ["test-utils"] } reth-provider = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a", features = ["test-utils"] }
reth-rpc = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-rpc = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-rpc-eth-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-engine-api = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-rpc-engine-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-tracing = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-tracing = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-common = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-trie-common = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-trie-db = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-trie-db = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-codecs = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-codecs = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-transaction-pool = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-transaction-pool = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-stages-types = { git = "https://github.com/sprites0/reth", rev = "a690ef25b56039195e7e4a4abd01c78aedcc73fb" } reth-stages-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
revm = { version = "28.0.1", default-features = false } reth-storage-api = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-errors = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-convert = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-eth-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-rpc-server-types = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
reth-metrics = { git = "https://github.com/hl-archive-node/reth", rev = "416c2e26756f1c8ee86e6b8e4081f434952b3a1a" }
revm = { version = "29.0.1", default-features = false }
# alloy dependencies # alloy dependencies
alloy-genesis = { version = "1.0.23", default-features = false } alloy-genesis = { version = "1.0.37", default-features = false }
alloy-consensus = { version = "1.0.23", default-features = false } alloy-consensus = { version = "1.0.37", default-features = false }
alloy-chains = { version = "0.2.5", default-features = false } alloy-chains = { version = "0.2.5", default-features = false }
alloy-eips = { version = "1.0.23", default-features = false } alloy-eips = { version = "1.0.37", default-features = false }
alloy-evm = { version = "0.18.2", default-features = false } alloy-evm = { version = "0.21.0", default-features = false }
alloy-json-abi = { version = "1.3.1", default-features = false } alloy-json-abi = { version = "1.3.1", default-features = false }
alloy-json-rpc = { version = "1.0.23", default-features = false } alloy-json-rpc = { version = "1.0.37", default-features = false }
alloy-dyn-abi = "1.3.1" alloy-dyn-abi = "1.3.1"
alloy-network = { version = "1.0.23", default-features = false } alloy-network = { version = "1.0.37", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] } alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] }
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false } alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false }
alloy-rpc-types-eth = { version = "1.0.23", default-features = false } alloy-rpc-types-eth = { version = "1.0.37", default-features = false }
alloy-rpc-types-engine = { version = "1.0.23", default-features = false } alloy-rpc-types-engine = { version = "1.0.37", default-features = false }
alloy-signer = { version = "1.0.23", default-features = false } alloy-signer = { version = "1.0.37", default-features = false }
alloy-sol-macro = "1.3.1" alloy-sol-macro = "1.3.1"
alloy-sol-types = { version = "1.3.1", default-features = false } alloy-sol-types = { version = "1.3.1", default-features = false }
jsonrpsee = "0.25.1" jsonrpsee = "0.26.0"
jsonrpsee-core = "0.25.1" jsonrpsee-core = "0.26.0"
jsonrpsee-types = "0.25.1" jsonrpsee-types = "0.26.0"
# misc dependencies # misc dependencies
auto_impl = "1" auto_impl = "1"
@ -166,3 +173,7 @@ client = [
[dev-dependencies] [dev-dependencies]
tempfile = "3.20.0" tempfile = "3.20.0"
[build-dependencies]
vergen = { version = "9.0.4", features = ["build", "cargo", "emit_and_set"] }
vergen-git2 = "1.0.5"

View File

@ -1,6 +1,8 @@
# Modifed from reth Makefile # Modifed from reth Makefile
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
GIT_SHA ?= $(shell git rev-parse HEAD)
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
BIN_DIR = "dist/bin" BIN_DIR = "dist/bin"
# List of features to use when building. Can be overridden via the environment. # List of features to use when building. Can be overridden via the environment.
@ -17,6 +19,9 @@ PROFILE ?= release
# Extra flags for Cargo # Extra flags for Cargo
CARGO_INSTALL_EXTRA_FLAGS ?= CARGO_INSTALL_EXTRA_FLAGS ?=
# The docker image name
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
##@ Help ##@ Help
.PHONY: help .PHONY: help
@ -207,3 +212,49 @@ check-features:
--package reth-primitives-traits \ --package reth-primitives-traits \
--package reth-primitives \ --package reth-primitives \
--feature-powerset --feature-powerset
##@ Docker
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-git-sha
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --driver docker-container --name cross-builder`
.PHONY: docker-build-push-latest
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
$(call docker_build_push,$(GIT_TAG),latest)
# Note: This requires a buildx builder with emulation support. For example:
#
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
# `docker buildx create --use --name cross-builder`
.PHONY: docker-build-push-nightly
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
$(call docker_build_push,nightly,nightly)
# Create a Docker image using the main Dockerfile
define docker_build_push
docker buildx build --file ./Dockerfile . \
--platform linux/amd64 \
--tag $(DOCKER_IMAGE_NAME):$(1) \
--tag $(DOCKER_IMAGE_NAME):$(2) \
--build-arg BUILD_PROFILE="$(PROFILE)" \
--build-arg FEATURES="jemalloc,asm-keccak" \
--provenance=false \
--push
endef

View File

@ -3,6 +3,8 @@
HyperEVM archive node implementation based on [reth](https://github.com/paradigmxyz/reth). HyperEVM archive node implementation based on [reth](https://github.com/paradigmxyz/reth).
NodeBuilder API version is heavily inspired by [reth-bsc](https://github.com/loocapro/reth-bsc). NodeBuilder API version is heavily inspired by [reth-bsc](https://github.com/loocapro/reth-bsc).
Got questions? Drop by the [Hyperliquid Discord](https://discord.gg/hyperliquid) #node-operators channel.
## ⚠️ IMPORTANT: System Transactions Appear as Pseudo Transactions ## ⚠️ IMPORTANT: System Transactions Appear as Pseudo Transactions
Deposit transactions from [System Addresses](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/hypercore-less-than-greater-than-hyperevm-transfers#system-addresses) like `0x222..22` / `0x200..xx` to user addresses are intentionally recorded as pseudo transactions. Deposit transactions from [System Addresses](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/hypercore-less-than-greater-than-hyperevm-transfers#system-addresses) like `0x222..22` / `0x200..xx` to user addresses are intentionally recorded as pseudo transactions.
@ -58,19 +60,19 @@ $ reth-hl node --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \
## How to run (testnet) ## How to run (testnet)
Testnet is supported since block 21304281. Testnet is supported since block 30281484.
```sh ```sh
# Get testnet genesis at block 21304281 # Get testnet genesis at block 30281484
$ cd ~ $ cd ~
$ git clone https://github.com/sprites0/hl-testnet-genesis $ git clone https://github.com/sprites0/hl-testnet-genesis
$ zstd --rm -d ~/hl-testnet-genesis/*.zst $ zstd --rm -d ~/hl-testnet-genesis/*.zst
# Init node # Init node
$ make install $ make install
$ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/21304281.rlp \ $ reth-hl init-state --without-evm --chain testnet --header ~/hl-testnet-genesis/30281484.rlp \
--header-hash 0x5b10856d2b1ad241c9bd6136bcc60ef7e8553560ca53995a590db65f809269b4 \ --header-hash 0x147cc3c09e9ddbb11799c826758db284f77099478ab5f528d3a57a6105516c21 \
~/hl-testnet-genesis/21304281.jsonl --total-difficulty 0 ~/hl-testnet-genesis/30281484.jsonl --total-difficulty 0
# Run node # Run node
$ reth-hl node --chain testnet --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \ $ reth-hl node --chain testnet --http --http.addr 0.0.0.0 --http.api eth,ots,net,web3 \

91
build.rs Normal file
View File

@ -0,0 +1,91 @@
use std::{env, error::Error};
use vergen::{BuildBuilder, CargoBuilder, Emitter};
use vergen_git2::Git2Builder;
fn main() -> Result<(), Box<dyn Error>> {
let mut emitter = Emitter::default();
let build_builder = BuildBuilder::default().build_timestamp(true).build()?;
emitter.add_instructions(&build_builder)?;
let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?;
emitter.add_instructions(&cargo_builder)?;
let git_builder =
Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?;
emitter.add_instructions(&git_builder)?;
emitter.emit_and_set()?;
let sha = env::var("VERGEN_GIT_SHA")?;
let sha_short = &sha[0..7];
let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true";
// > git describe --always --tags
// if not on a tag: v0.2.0-beta.3-82-g1939939b
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_HL_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);
// Set the build profile
let out_dir = env::var("OUT_DIR").unwrap();
let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap();
println!("cargo:rustc-env=RETH_HL_BUILD_PROFILE={profile}");
// Set formatted version strings
let pkg_version = env!("CARGO_PKG_VERSION");
// The short version information for reth.
// - The latest version from Cargo.toml
// - The short SHA of the latest commit.
// Example: 0.1.0 (defa64b2)
println!("cargo:rustc-env=RETH_HL_SHORT_VERSION={pkg_version}{version_suffix} ({sha_short})");
// LONG_VERSION
// The long version information for reth.
//
// - The latest version from Cargo.toml + version suffix (if any)
// - The full SHA of the latest commit
// - The build datetime
// - The build features
// - The build profile
//
// Example:
//
// ```text
// Version: 0.1.0
// Commit SHA: defa64b2
// Build Timestamp: 2023-05-19T01:47:19.815651705Z
// Build Features: jemalloc
// Build Profile: maxperf
// ```
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_0=Version: {pkg_version}{version_suffix}");
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_1=Commit SHA: {sha}");
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_2=Build Timestamp: {}",
env::var("VERGEN_BUILD_TIMESTAMP")?
);
println!(
"cargo:rustc-env=RETH_HL_LONG_VERSION_3=Build Features: {}",
env::var("VERGEN_CARGO_FEATURES")?
);
println!("cargo:rustc-env=RETH_HL_LONG_VERSION_4=Build Profile: {profile}");
// The version information for reth formatted for P2P (devp2p).
// - The latest version from Cargo.toml
// - The target triple
//
// Example: reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin
println!(
"cargo:rustc-env=RETH_HL_P2P_CLIENT_VERSION={}",
format_args!("reth/v{pkg_version}-{sha_short}/{}", env::var("VERGEN_CARGO_TARGET_TRIPLE")?)
);
Ok(())
}

View File

@ -2,18 +2,18 @@ use alloy_eips::BlockId;
use alloy_json_rpc::RpcObject; use alloy_json_rpc::RpcObject;
use alloy_primitives::{Bytes, U256}; use alloy_primitives::{Bytes, U256};
use alloy_rpc_types_eth::{ use alloy_rpc_types_eth::{
state::{EvmOverrides, StateOverride},
BlockOverrides, BlockOverrides,
state::{EvmOverrides, StateOverride},
}; };
use jsonrpsee::{ use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder}, http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc, proc_macros::rpc,
rpc_params, rpc_params,
types::{error::INTERNAL_ERROR_CODE, ErrorObject}, types::{ErrorObject, error::INTERNAL_ERROR_CODE},
}; };
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult}; use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth_rpc::eth::EthApiTypes; use reth_rpc::eth::EthApiTypes;
use reth_rpc_eth_api::{helpers::EthCall, RpcTxReq}; use reth_rpc_eth_api::{RpcTxReq, helpers::EthCall};
#[rpc(server, namespace = "eth")] #[rpc(server, namespace = "eth")]
pub(crate) trait CallForwarderApi<TxReq: RpcObject> { pub(crate) trait CallForwarderApi<TxReq: RpcObject> {

View File

@ -7,68 +7,245 @@
//! For non-system transactions, we can just return the log as is, and the client will //! For non-system transactions, we can just return the log as is, and the client will
//! adjust the transaction index accordingly. //! adjust the transaction index accordingly.
use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use alloy_consensus::{
BlockHeader, TxReceipt,
transaction::{TransactionMeta, TxHashRef},
};
use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_json_rpc::RpcObject; use alloy_json_rpc::RpcObject;
use alloy_primitives::{B256, U256}; use alloy_primitives::{B256, U256};
use alloy_rpc_types::{ use alloy_rpc_types::{
pubsub::{Params, SubscriptionKind},
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind, BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
TransactionInfo,
pubsub::{Params, SubscriptionKind},
}; };
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink}; use jsonrpsee::{PendingSubscriptionSink, proc_macros::rpc};
use jsonrpsee_core::{async_trait, RpcResult}; use jsonrpsee_core::{RpcResult, async_trait};
use jsonrpsee_types::ErrorObject; use jsonrpsee_types::{ErrorObject, error::INTERNAL_ERROR_CODE};
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner}; use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_primitives_traits::SignedTransaction;
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
use reth_rpc::{eth::pubsub::SubscriptionSerializeError, EthFilter, EthPubSub, RpcTypes}; use reth_rpc::{EthFilter, EthPubSub};
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
helpers::{EthBlocks, EthTransactions, LoadReceipt}, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, RpcBlock, RpcConvert, RpcReceipt,
transaction::ConvertReceiptInput, RpcTransaction, helpers::EthBlocks, transaction::ConvertReceiptInput,
EthApiServer, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock,
RpcConvert, RpcHeader, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
}; };
use serde::Serialize; use reth_rpc_eth_types::EthApiError;
use std::{borrow::Cow, marker::PhantomData, sync::Arc}; use std::{marker::PhantomData, sync::Arc};
use tokio_stream::{Stream, StreamExt}; use tokio_stream::StreamExt;
use tracing::{trace, Instrument}; use tracing::{Instrument, trace};
use crate::{node::primitives::HlPrimitives, HlBlock}; use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
pub trait EthWrapper: #[rpc(server, namespace = "eth")]
EthApiServer< #[async_trait]
RpcTxReq<Self::NetworkTypes>, pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
RpcTransaction<Self::NetworkTypes>, #[method(name = "getEvmSystemTxsByBlockHash")]
RpcBlock<Self::NetworkTypes>, async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>, #[method(name = "getEvmSystemTxsByBlockNumber")]
> + FullEthApiTypes< async fn get_evm_system_txs_by_block_number(
Primitives = HlPrimitives, &self,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>, block_id: Option<BlockId>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>> ) -> RpcResult<Option<Vec<T>>>;
+ EthBlocks
+ EthTransactions #[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
+ LoadReceipt async fn get_evm_system_txs_receipts_by_block_hash(
+ 'static &self,
{ hash: B256,
) -> RpcResult<Option<Vec<R>>>;
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<R>>>;
} }
impl<T> EthWrapper for T where pub struct HlSystemTransactionExt<Eth: EthWrapper> {
T: EthApiServer< eth_api: Eth,
RpcTxReq<Self::NetworkTypes>, _marker: PhantomData<Eth>,
RpcTransaction<Self::NetworkTypes>, }
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>, impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
RpcHeader<Self::NetworkTypes>, pub fn new(eth_api: Eth) -> Self {
> + FullEthApiTypes< Self { eth_api, _marker: PhantomData }
Primitives = HlPrimitives, }
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>> async fn get_system_txs_by_block_id(
+ EthBlocks &self,
+ EthTransactions block_id: BlockId,
+ LoadReceipt ) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
+ 'static where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
let system_txs = block
.transactions_with_sender()
.enumerate()
.filter_map(|(index, (signer, tx))| {
if tx.is_system_transaction() {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
self.eth_api
.tx_resp_builder()
.fill(tx.clone().with_signer(*signer), tx_info)
.ok()
} else {
None
}
})
.collect();
Ok(Some(system_txs))
} else {
Ok(None)
}
}
async fn get_system_txs_receipts_by_block_id(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
if let Some((block, receipts)) =
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
{
let block_number = block.number;
let base_fee = block.base_fee_per_gas;
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas;
let timestamp = block.timestamp;
let mut gas_used = 0;
let mut next_log_index = 0;
let mut inputs = Vec::new();
for (idx, (tx, receipt)) in
block.transactions_recovered().zip(receipts.iter()).enumerate()
{
if receipt.cumulative_gas_used() != 0 {
break;
}
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: receipt.clone(),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
inputs.push(input);
}
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
Ok(Some(receipts))
} else {
Ok(None)
}
}
}
#[async_trait]
impl<Eth: EthWrapper>
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
for HlSystemTransactionExt<Eth>
where
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{ {
/// Returns the system transactions for a given block hash.
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
Ok(txs) => Ok(txs),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the system transactions for a given block number, or the latest block if no block
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
///
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
async fn get_evm_system_txs_by_block_number(
&self,
id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
Some(txs) => Ok(Some(txs)),
None => {
// hl-node returns an error if the block is not found
Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {id:?}"),
Some(()),
))
}
}
}
/// Returns the receipts for the system transactions for a given block hash.
async fn get_evm_system_txs_receipts_by_block_hash(
&self,
hash: B256,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
Ok(receipts) => Ok(receipts),
// hl-node returns none if the block is not found
Err(_) => Ok(None),
}
}
/// Returns the receipts for the system transactions for a given block number, or the latest
/// block if no block
async fn get_evm_system_txs_receipts_by_block_number(
&self,
block_id: Option<BlockId>,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
Some(receipts) => Ok(Some(receipts)),
None => Err(ErrorObject::owned(
INTERNAL_ERROR_CODE,
format!("invalid block height: {block_id:?}"),
Some(()),
)),
}
}
} }
pub struct HlNodeFilterHttp<Eth: EthWrapper> { pub struct HlNodeFilterHttp<Eth: EthWrapper> {
@ -146,8 +323,9 @@ impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
} }
#[async_trait] #[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
for HlNodeFilterWs<Eth> where
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{ {
async fn subscribe( async fn subscribe(
&self, &self,
@ -169,6 +347,8 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)), pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
) )
.await; .await;
} else if kind == SubscriptionKind::NewHeads {
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
} else { } else {
let _ = pubsub.handle_accepted(sink, kind, params).await; let _ = pubsub.handle_accepted(sink, kind, params).await;
} }
@ -195,23 +375,6 @@ fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option
Some(log) Some(log)
} }
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => break Ok(()),
maybe_item = stream.next() => {
let Some(item) = maybe_item else { break Ok(()) };
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(SubscriptionSerializeError::from)?;
if sink.send(msg).await.is_err() { break Ok(()); }
}
}
}
}
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> { pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
eth_api: Arc<Eth>, eth_api: Arc<Eth>,
_marker: PhantomData<Eth>, _marker: PhantomData<Eth>,
@ -317,7 +480,7 @@ async fn adjust_block_receipts<Eth: EthWrapper>(
}; };
let input = ConvertReceiptInput { let input = ConvertReceiptInput {
receipt: Cow::Borrowed(receipt), receipt: receipt.clone(),
tx, tx,
gas_used: receipt.cumulative_gas_used() - gas_used, gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index, next_log_index,
@ -362,10 +525,9 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
// This function assumes that `block_id` is already validated by the caller. // This function assumes that `block_id` is already validated by the caller.
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize { fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
let provider = eth_api.provider(); let provider = eth_api.provider();
let block = provider.block_by_id(block_id).unwrap().unwrap(); let header = provider.header_by_id(block_id).unwrap().unwrap();
let system_tx_count =
block.body.transactions().iter().filter(|tx| tx.is_system_transaction()).count(); header.extras.system_tx_count.try_into().unwrap()
system_tx_count
} }
#[async_trait] #[async_trait]
@ -439,6 +601,9 @@ where
block_id: BlockId, block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> { ) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts");
if self.eth_api.provider().block_by_id(block_id).map_err(EthApiError::from)?.is_none() {
return Ok(None);
}
let result = let result =
adjust_block_receipts(block_id, &*self.eth_api).instrument(engine_span!()).await?; adjust_block_receipts(block_id, &*self.eth_api).instrument(engine_span!()).await?;
Ok(result.map(|(_, receipts)| receipts)) Ok(result.map(|(_, receipts)| receipts))
@ -446,7 +611,7 @@ where
} }
pub fn install_hl_node_compliance<Node, EthApi>( pub fn install_hl_node_compliance<Node, EthApi>(
ctx: RpcContext<Node, EthApi>, ctx: &mut RpcContext<Node, EthApi>,
) -> Result<(), eyre::Error> ) -> Result<(), eyre::Error>
where where
Node: FullNodeComponents, Node: FullNodeComponents,
@ -473,5 +638,9 @@ where
ctx.modules.replace_configured( ctx.modules.replace_configured(
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(), HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
)?; )?;
ctx.modules
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
Ok(()) Ok(())
} }

View File

@ -1,3 +1,5 @@
pub mod call_forwarder; pub mod call_forwarder;
pub mod hl_node_compliance; pub mod hl_node_compliance;
pub mod tx_forwarder; pub mod tx_forwarder;
pub mod subscribe_fixup;
mod utils;

View File

@ -0,0 +1,54 @@
use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
use alloy_rpc_types::pubsub::{Params, SubscriptionKind};
use async_trait::async_trait;
use jsonrpsee::PendingSubscriptionSink;
use jsonrpsee_types::ErrorObject;
use reth::tasks::TaskSpawner;
use reth_rpc::EthPubSub;
use reth_rpc_convert::RpcTransaction;
use reth_rpc_eth_api::{EthApiTypes, EthPubSubApiServer};
use std::sync::Arc;
pub struct SubscribeFixup<Eth: EthWrapper> {
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
}
#[async_trait]
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for SubscribeFixup<Eth>
where
ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
{
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
self.subscription_task_spawner.spawn(Box::pin(async move {
if kind == SubscriptionKind::NewHeads {
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
} else {
let _ = pubsub.handle_accepted(sink, kind, params).await;
}
}));
Ok(())
}
}
impl<Eth: EthWrapper> SubscribeFixup<Eth> {
pub fn new(
pubsub: Arc<EthPubSub<Eth>>,
provider: Arc<Eth::Provider>,
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
) -> Self
where
Eth: EthWrapper,
ErrorObject<'static>: From<Eth::Error>,
{
Self { pubsub, provider, subscription_task_spawner }
}
}

View File

@ -2,14 +2,14 @@ use std::time::Duration;
use alloy_json_rpc::RpcObject; use alloy_json_rpc::RpcObject;
use alloy_network::Ethereum; use alloy_network::Ethereum;
use alloy_primitives::{Bytes, B256}; use alloy_primitives::{B256, Bytes};
use alloy_rpc_types::TransactionRequest; use alloy_rpc_types::TransactionRequest;
use jsonrpsee::{ use jsonrpsee::{
http_client::{HttpClient, HttpClientBuilder}, http_client::{HttpClient, HttpClientBuilder},
proc_macros::rpc, proc_macros::rpc,
types::{error::INTERNAL_ERROR_CODE, ErrorObject}, types::{ErrorObject, error::INTERNAL_ERROR_CODE},
}; };
use jsonrpsee_core::{async_trait, client::ClientT, ClientError, RpcResult}; use jsonrpsee_core::{ClientError, RpcResult, async_trait, client::ClientT};
use reth::rpc::{result::internal_rpc_err, server_types::eth::EthApiError}; use reth::rpc::{result::internal_rpc_err, server_types::eth::EthApiError};
use reth_rpc_eth_api::RpcReceipt; use reth_rpc_eth_api::RpcReceipt;

90
src/addons/utils.rs Normal file
View File

@ -0,0 +1,90 @@
use std::sync::Arc;
use crate::{HlBlock, HlPrimitives};
use alloy_primitives::U256;
use alloy_rpc_types::Header;
use futures::StreamExt;
use jsonrpsee::{SubscriptionMessage, SubscriptionSink};
use jsonrpsee_types::ErrorObject;
use reth_primitives::SealedHeader;
use reth_provider::{BlockReader, CanonStateSubscriptions};
use reth_rpc::{RpcTypes, eth::pubsub::SubscriptionSerializeError};
use reth_rpc_convert::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq};
use reth_rpc_eth_api::{
EthApiServer, FullEthApiTypes, RpcNodeCoreExt,
helpers::{EthBlocks, EthTransactions, LoadReceipt},
};
use serde::Serialize;
use tokio_stream::Stream;
pub trait EthWrapper:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
impl<T> EthWrapper for T where
T: EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApiTypes<
Primitives = HlPrimitives,
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
+ EthBlocks
+ EthTransactions
+ LoadReceipt
+ 'static
{
}
pub(super) async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => break Ok(()),
maybe_item = stream.next() => {
let Some(item) = maybe_item else { break Ok(()) };
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(SubscriptionSerializeError::from)?;
if sink.send(msg).await.is_err() { break Ok(()); }
}
}
}
}
pub(super) fn new_headers_stream<Eth: EthWrapper>(
provider: &Arc<Eth::Provider>,
) -> impl Stream<Item = Header<alloy_consensus::Header>> {
provider.canonical_state_stream().flat_map(|new_chain| {
let headers = new_chain
.committed()
.blocks_iter()
.map(|block| {
Header::from_consensus(
SealedHeader::new(block.header().inner.clone(), block.hash()).into(),
None,
Some(U256::from(block.rlp_length())),
)
})
.collect::<Vec<_>>();
futures::stream::iter(headers)
})
}

View File

@ -1,5 +1,5 @@
use alloy_chains::{Chain, NamedChain}; use alloy_chains::{Chain, NamedChain};
use alloy_primitives::{b256, Address, Bytes, B256, B64, U256}; use alloy_primitives::{Address, B64, B256, Bytes, U256, b256};
use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, Hardfork}; use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, Hardfork};
use reth_primitives::{Header, SealedHeader}; use reth_primitives::{Header, SealedHeader};
use std::sync::LazyLock; use std::sync::LazyLock;

View File

@ -1,8 +1,7 @@
pub mod hl; pub mod hl;
pub mod parser; pub mod parser;
use crate::hardforks::HlHardforks; use crate::{hardforks::HlHardforks, node::primitives::{header::HlHeaderExtras, HlHeader}};
use alloy_consensus::Header;
use alloy_eips::eip7840::BlobParams; use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis; use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256, U256}; use alloy_primitives::{Address, B256, U256};
@ -20,10 +19,11 @@ pub const TESTNET_CHAIN_ID: u64 = 998;
#[derive(Debug, Default, Clone, PartialEq, Eq)] #[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct HlChainSpec { pub struct HlChainSpec {
pub inner: ChainSpec, pub inner: ChainSpec,
pub genesis_header: HlHeader,
} }
impl EthChainSpec for HlChainSpec { impl EthChainSpec for HlChainSpec {
type Header = Header; type Header = HlHeader;
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> { fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> {
self.inner.blob_params_at_timestamp(timestamp) self.inner.blob_params_at_timestamp(timestamp)
@ -37,10 +37,6 @@ impl EthChainSpec for HlChainSpec {
self.inner.chain() self.inner.chain()
} }
fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_block(block_number)
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp) self.inner.base_fee_params_at_timestamp(timestamp)
} }
@ -61,8 +57,8 @@ impl EthChainSpec for HlChainSpec {
Box::new(self.inner.display_hardforks()) Box::new(self.inner.display_hardforks())
} }
fn genesis_header(&self) -> &Header { fn genesis_header(&self) -> &HlHeader {
self.inner.genesis_header() &self.genesis_header
} }
fn genesis(&self) -> &Genesis { fn genesis(&self) -> &Genesis {
@ -131,4 +127,10 @@ impl HlChainSpec {
_ => unreachable!("Unreachable since ChainSpecParser won't return other chains"), _ => unreachable!("Unreachable since ChainSpecParser won't return other chains"),
} }
} }
fn new(inner: ChainSpec) -> Self {
let genesis_header =
HlHeader { inner: inner.genesis_header().clone(), extras: HlHeaderExtras::default() };
Self { inner, genesis_header }
}
} }

View File

@ -1,4 +1,4 @@
use crate::chainspec::{hl::hl_testnet, HlChainSpec}; use crate::chainspec::{HlChainSpec, hl::hl_testnet};
use super::hl::hl_mainnet; use super::hl::hl_mainnet;
use reth_cli::chainspec::ChainSpecParser; use reth_cli::chainspec::ChainSpecParser;
@ -26,8 +26,8 @@ impl ChainSpecParser for HlChainSpecParser {
/// Currently only mainnet is supported. /// Currently only mainnet is supported.
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> { pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<HlChainSpec>> {
match s { match s {
"mainnet" => Ok(Arc::new(HlChainSpec { inner: hl_mainnet() })), "mainnet" => Ok(Arc::new(HlChainSpec::new(hl_mainnet()))),
"testnet" => Ok(Arc::new(HlChainSpec { inner: hl_testnet() })), "testnet" => Ok(Arc::new(HlChainSpec::new(hl_testnet()))),
_ => Err(eyre::eyre!("Unsupported chain: {}", s)), _ => Err(eyre::eyre!("Unsupported chain: {}", s)),
} }
} }

View File

@ -1,4 +1,4 @@
use alloy_primitives::{BlockNumber, B256}; use alloy_primitives::{B256, BlockNumber};
use reth_provider::{BlockNumReader, ProviderError}; use reth_provider::{BlockNumReader, ProviderError};
use std::cmp::Ordering; use std::cmp::Ordering;

View File

@ -2,8 +2,8 @@ use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr}; use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use reth_revm::context::ContextTr; use reth_revm::context::ContextTr;
use revm::{ use revm::{
context::Cfg, context_interface::Block, handler::instructions::EthInstructions, Context, Database, context::Cfg, context_interface::Block,
interpreter::interpreter::EthInterpreter, Context, Database, handler::instructions::EthInstructions, interpreter::interpreter::EthInterpreter,
}; };
/// Trait that allows for hl HlEvm to be built. /// Trait that allows for hl HlEvm to be built.

View File

@ -1,8 +1,8 @@
use crate::evm::{spec::HlSpecId, transaction::HlTxEnv}; use crate::evm::{spec::HlSpecId, transaction::HlTxEnv};
use revm::{ use revm::{
Context, Journal, MainContext,
context::{BlockEnv, CfgEnv, TxEnv}, context::{BlockEnv, CfgEnv, TxEnv},
database_interface::EmptyDB, database_interface::EmptyDB,
Context, Journal, MainContext,
}; };
/// Type alias for the default context type of the HlEvm. /// Type alias for the default context type of the HlEvm.

View File

@ -1,16 +1,16 @@
use super::HlEvmInner; use super::HlEvmInner;
use crate::evm::{spec::HlSpecId, transaction::HlTxTr}; use crate::evm::{spec::HlSpecId, transaction::HlTxTr};
use revm::{ use revm::{
context::{result::HaltReason, ContextSetters},
context_interface::{
result::{EVMError, ExecutionResult, ResultAndState},
Cfg, ContextTr, Database, JournalTr,
},
handler::{instructions::EthInstructions, PrecompileProvider},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
state::EvmState,
DatabaseCommit, ExecuteCommitEvm, ExecuteEvm, DatabaseCommit, ExecuteCommitEvm, ExecuteEvm,
context::{ContextSetters, result::HaltReason},
context_interface::{
Cfg, ContextTr, Database, JournalTr,
result::{EVMError, ExecutionResult, ResultAndState},
},
handler::{PrecompileProvider, instructions::EthInstructions},
inspector::{InspectCommitEvm, InspectEvm, Inspector, JournalExt},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
}; };
// Type alias for HL context // Type alias for HL context

View File

@ -1,15 +1,15 @@
use revm::{ use revm::{
Inspector,
bytecode::opcode::BLOCKHASH, bytecode::opcode::BLOCKHASH,
context::{ContextSetters, Evm, FrameStack}, context::{ContextSetters, Evm, FrameStack},
context_interface::ContextTr, context_interface::ContextTr,
handler::{ handler::{
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
evm::{ContextDbError, FrameInitResult}, evm::{ContextDbError, FrameInitResult},
instructions::{EthInstructions, InstructionProvider}, instructions::{EthInstructions, InstructionProvider},
EthFrame, EthPrecompiles, EvmTr, FrameInitOrResult, FrameTr, PrecompileProvider,
}, },
inspector::{InspectorEvmTr, JournalExt}, inspector::{InspectorEvmTr, JournalExt},
interpreter::{interpreter::EthInterpreter, Instruction, InterpreterResult}, interpreter::{Instruction, InterpreterResult, interpreter::EthInterpreter},
Inspector,
}; };
use crate::chainspec::MAINNET_CHAIN_ID; use crate::chainspec::MAINNET_CHAIN_ID;

View File

@ -7,36 +7,12 @@ use alloy_primitives::keccak256;
use revm::{ use revm::{
context::Host, context::Host,
interpreter::{ interpreter::{
as_u64_saturated, interpreter_types::StackTr, popn_top, InstructionContext, _count, InstructionContext, InterpreterTypes, as_u64_saturated, interpreter_types::StackTr,
InterpreterTypes, popn_top,
}, },
primitives::{BLOCK_HASH_HISTORY, U256}, primitives::{BLOCK_HASH_HISTORY, U256},
}; };
#[doc(hidden)]
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! _count {
(@count) => { 0 };
(@count $head:tt $($tail:tt)*) => { 1 + _count!(@count $($tail)*) };
($($arg:tt)*) => { _count!(@count $($arg)*) };
}
/// Pops n values from the stack and returns the top value. Fails the instruction if n values can't
/// be popped.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! popn_top {
([ $($x:ident),* ], $top:ident, $interpreter:expr $(,$ret:expr)? ) => {
// Workaround for https://github.com/rust-lang/rust/issues/144329.
if $interpreter.stack.len() < (1 + $crate::_count!($($x)*)) {
$interpreter.halt_underflow();
return $($ret)?;
}
let ([$( $x ),*], $top) = unsafe { $interpreter.stack.popn_top().unwrap_unchecked() };
};
}
/// Implements the BLOCKHASH instruction. /// Implements the BLOCKHASH instruction.
/// ///
/// Gets the hash of one of the 256 most recent complete blocks. /// Gets the hash of one of the 256 most recent complete blocks.

View File

@ -7,7 +7,7 @@ use reth_primitives_traits::SignerRecoverable;
use revm::{ use revm::{
context::TxEnv, context::TxEnv,
context_interface::transaction::Transaction, context_interface::transaction::Transaction,
primitives::{Address, Bytes, TxKind, B256, U256}, primitives::{Address, B256, Bytes, TxKind, U256},
}; };
#[auto_impl(&, &mut, Box, Arc)] #[auto_impl(&, &mut, Box, Arc)]

View File

@ -2,7 +2,7 @@
use alloy_chains::{Chain, NamedChain}; use alloy_chains::{Chain, NamedChain};
use core::any::Any; use core::any::Any;
use reth_chainspec::ForkCondition; use reth_chainspec::ForkCondition;
use reth_ethereum_forks::{hardfork, ChainHardforks, EthereumHardfork, Hardfork}; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, Hardfork, hardfork};
hardfork!( hardfork!(
/// The name of a hl hardfork. /// The name of a hl hardfork.

View File

@ -5,5 +5,6 @@ mod evm;
mod hardforks; mod hardforks;
pub mod node; pub mod node;
pub mod pseudo_peer; pub mod pseudo_peer;
pub mod version;
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives}; pub use node::primitives::{HlBlock, HlBlockBody, HlHeader, HlPrimitives};

View File

@ -1,19 +1,26 @@
use std::sync::Arc; use std::sync::Arc;
use clap::Parser; use clap::Parser;
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext}; use reth::{
builder::{NodeBuilder, NodeHandle, WithLaunchContext},
rpc::{api::EthPubSubApiServer, eth::RpcNodeCore},
};
use reth_db::DatabaseEnv; use reth_db::DatabaseEnv;
use reth_hl::{ use reth_hl::{
addons::{ addons::{
call_forwarder::{self, CallForwarderApiServer}, call_forwarder::{self, CallForwarderApiServer},
hl_node_compliance::install_hl_node_compliance, hl_node_compliance::install_hl_node_compliance,
subscribe_fixup::SubscribeFixup,
tx_forwarder::{self, EthForwarderApiServer}, tx_forwarder::{self, EthForwarderApiServer},
}, },
chainspec::{parser::HlChainSpecParser, HlChainSpec}, chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{ node::{
cli::{Cli, HlNodeArgs},
storage::tables::Tables,
HlNode, HlNode,
cli::{Cli, HlNodeArgs},
rpc::precompile::{HlBlockPrecompileApiServer, HlBlockPrecompileExt},
spot_meta::init as spot_meta_init,
storage::tables::Tables,
types::set_spot_metadata_db,
}, },
}; };
use tracing::info; use tracing::info;
@ -26,20 +33,19 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
fn main() -> eyre::Result<()> { fn main() -> eyre::Result<()> {
reth_cli_util::sigsegv_handler::install(); reth_cli_util::sigsegv_handler::install();
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. // Initialize custom version metadata before parsing CLI so --version uses reth-hl values
if std::env::var_os("RUST_BACKTRACE").is_none() { reth_hl::version::init_reth_hl_version();
std::env::set_var("RUST_BACKTRACE", "1");
}
Cli::<HlChainSpecParser, HlNodeArgs>::parse().run( Cli::<HlChainSpecParser, HlNodeArgs>::parse().run(
|builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>, |builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, HlChainSpec>>,
ext: HlNodeArgs| async move { ext: HlNodeArgs| async move {
let default_upstream_rpc_url = builder.config().chain.official_rpc_url(); let default_upstream_rpc_url = builder.config().chain.official_rpc_url();
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?); let (node, engine_handle_tx) =
HlNode::new(ext.block_source_args.parse().await?, ext.debug_cutoff_height);
let NodeHandle { node, node_exit_future: exit_future } = builder let NodeHandle { node, node_exit_future: exit_future } = builder
.node(node) .node(node)
.extend_rpc_modules(move |ctx| { .extend_rpc_modules(move |mut ctx| {
let upstream_rpc_url = let upstream_rpc_url =
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned()); ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
@ -59,15 +65,45 @@ fn main() -> eyre::Result<()> {
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url); info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
} }
// This is a temporary workaround to fix the issue with custom headers
// affects `eth_subscribe[type=newHeads]`
ctx.modules.replace_configured(
SubscribeFixup::new(
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
Arc::new(ctx.registry.eth_api().provider().clone()),
Box::new(ctx.node().task_executor.clone()),
)
.into_rpc(),
)?;
if ext.hl_node_compliant { if ext.hl_node_compliant {
install_hl_node_compliance(ctx)?; install_hl_node_compliance(&mut ctx)?;
info!("hl-node compliant mode enabled"); info!("hl-node compliant mode enabled");
} }
if !ext.experimental_eth_get_proof {
ctx.modules.remove_method_from_configured("eth_getProof");
info!("eth_getProof is disabled by default");
}
ctx.modules.merge_configured(
HlBlockPrecompileExt::new(ctx.registry.eth_api().clone()).into_rpc(),
)?;
Ok(()) Ok(())
}) })
.apply(|builder| { .apply(|mut builder| {
builder.db().create_tables_for::<Tables>().expect("create tables"); builder.db_mut().create_tables_for::<Tables>().expect("create tables");
let chain_id = builder.config().chain.inner.chain().id();
let db = builder.db_mut().clone();
// Set database handle for on-demand persistence
set_spot_metadata_db(db.clone());
// Load spot metadata from database and initialize cache
spot_meta_init::load_spot_metadata_cache(&db, chain_id);
builder builder
}) })
.launch() .launch()

View File

@ -1,26 +1,30 @@
use crate::{ use crate::{
chainspec::{parser::HlChainSpecParser, HlChainSpec}, chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{consensus::HlConsensus, evm::config::HlEvmConfig, storage::tables::Tables, HlNode}, node::{
HlNode,
consensus::HlConsensus,
evm::config::HlEvmConfig,
migrate::Migrator,
spot_meta::init as spot_meta_init,
storage::tables::Tables,
},
pseudo_peer::BlockSourceArgs, pseudo_peer::BlockSourceArgs,
}; };
use clap::{Args, Parser}; use clap::{Args, Parser};
use reth::{ use reth::{
args::LogArgs, CliRunner,
args::{DatabaseArgs, DatadirArgs, LogArgs},
builder::{NodeBuilder, WithLaunchContext}, builder::{NodeBuilder, WithLaunchContext},
cli::Commands, cli::Commands,
prometheus_exporter::install_prometheus_recorder, prometheus_exporter::install_prometheus_recorder,
version::version_metadata, version::version_metadata,
CliRunner,
}; };
use reth_chainspec::EthChainSpec; use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser; use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::{common::EnvironmentArgs, launcher::FnLauncher}; use reth_cli_commands::{common::EnvironmentArgs, launcher::FnLauncher};
use reth_db::{init_db, mdbx::init_db_for, DatabaseEnv}; use reth_db::{DatabaseEnv, init_db, mdbx::init_db_for};
use reth_tracing::FileWorkerGuard; use reth_tracing::FileWorkerGuard;
use std::{ use std::{fmt::{self}, sync::Arc};
fmt::{self},
sync::Arc,
};
use tracing::info; use tracing::info;
macro_rules! not_applicable { macro_rules! not_applicable {
@ -35,6 +39,12 @@ pub struct HlNodeArgs {
#[command(flatten)] #[command(flatten)]
pub block_source_args: BlockSourceArgs, pub block_source_args: BlockSourceArgs,
/// Debug cutoff height.
///
/// This option is used to cut off the block import at a specific height.
#[arg(long, env = "DEBUG_CUTOFF_HEIGHT")]
pub debug_cutoff_height: Option<u64>,
/// Upstream RPC URL to forward incoming transactions. /// Upstream RPC URL to forward incoming transactions.
/// ///
/// Default to Hyperliquid's RPC URL when not provided (https://rpc.hyperliquid.xyz/evm). /// Default to Hyperliquid's RPC URL when not provided (https://rpc.hyperliquid.xyz/evm).
@ -55,6 +65,24 @@ pub struct HlNodeArgs {
/// This is useful when read precompile is needed for gas estimation. /// This is useful when read precompile is needed for gas estimation.
#[arg(long, env = "FORWARD_CALL")] #[arg(long, env = "FORWARD_CALL")]
pub forward_call: bool, pub forward_call: bool,
/// Experimental: enables the eth_getProof RPC method.
///
/// Note: Due to the state root difference, trie updates* may not function correctly in all
/// scenarios. For example, incremental root updates are not possible, which can cause
/// eth_getProof to malfunction in some cases.
///
/// This limitation does not impact normal node functionality, except for state root (which is
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
/// potential issues.
///
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
/// working as intended. Enabling this by default will be tracked in #15.
///
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
pub experimental_eth_get_proof: bool,
} }
/// The main reth_hl cli interface. /// The main reth_hl cli interface.
@ -112,11 +140,18 @@ where
// Install the prometheus recorder to be sure to record all metrics // Install the prometheus recorder to be sure to record all metrics
let _ = install_prometheus_recorder(); let _ = install_prometheus_recorder();
let components = let components = |spec: Arc<C::ChainSpec>| {
|spec: Arc<C::ChainSpec>| (HlEvmConfig::new(spec.clone()), HlConsensus::new(spec)); (HlEvmConfig::new(spec.clone()), Arc::new(HlConsensus::new(spec)))
};
match self.command { match self.command {
Commands::Node(command) => runner.run_command_until_exit(|ctx| { Commands::Node(command) => runner.run_command_until_exit(|ctx| {
// NOTE: This is for one time migration around Oct 10 upgrade:
// It's not necessary anymore, an environment variable gate is added here.
if std::env::var("CHECK_DB_MIGRATION").is_ok() {
Self::migrate_db(&command.chain, &command.datadir, &command.db)
.expect("Failed to migrate database");
}
command.execute(ctx, FnLauncher::new::<C, Ext>(launcher)) command.execute(ctx, FnLauncher::new::<C, Ext>(launcher))
}), }),
Commands::Init(command) => { Commands::Init(command) => {
@ -133,9 +168,6 @@ where
runner.run_command_until_exit(|ctx| command.execute::<HlNode, _>(ctx, components)) runner.run_command_until_exit(|ctx| command.execute::<HlNode, _>(ctx, components))
} }
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
Commands::Recover(command) => {
runner.run_command_until_exit(|ctx| command.execute::<HlNode>(ctx))
}
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()), Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<HlNode>()),
Commands::Import(command) => { Commands::Import(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _>(components)) runner.run_blocking_until_ctrl_c(command.execute::<HlNode, _>(components))
@ -163,7 +195,21 @@ where
let data_dir = env.datadir.clone().resolve_datadir(env.chain.chain()); let data_dir = env.datadir.clone().resolve_datadir(env.chain.chain());
let db_path = data_dir.db(); let db_path = data_dir.db();
init_db(db_path.clone(), env.db.database_args())?; init_db(db_path.clone(), env.db.database_args())?;
init_db_for::<_, Tables>(db_path, env.db.database_args())?; init_db_for::<_, Tables>(db_path.clone(), env.db.database_args())?;
// Initialize spot metadata in database
let chain_id = env.chain.chain().id();
spot_meta_init::init_spot_metadata(db_path, env.db.database_args(), chain_id)?;
Ok(())
}
fn migrate_db(
chain: &HlChainSpec,
datadir: &DatadirArgs,
db: &DatabaseArgs,
) -> eyre::Result<()> {
Migrator::<HlNode>::new(chain.clone(), datadir.clone(), *db)?.migrate_db()?;
Ok(()) Ok(())
} }
} }

View File

@ -1,9 +1,8 @@
use crate::{hardforks::HlHardforks, node::HlNode, HlBlock, HlBlockBody, HlPrimitives}; use crate::{hardforks::HlHardforks, node::{primitives::HlHeader, HlNode}, HlBlock, HlBlockBody, HlPrimitives};
use alloy_consensus::Header;
use reth::{ use reth::{
api::FullNodeTypes, api::{FullNodeTypes, NodeTypes},
beacon_consensus::EthBeaconConsensus, beacon_consensus::EthBeaconConsensus,
builder::{components::ConsensusBuilder, BuilderContext}, builder::{BuilderContext, components::ConsensusBuilder},
consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}, consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator},
consensus_common::validation::{ consensus_common::validation::{
validate_against_parent_4844, validate_against_parent_hash_number, validate_against_parent_4844, validate_against_parent_hash_number,
@ -24,7 +23,7 @@ impl<Node> ConsensusBuilder<Node> for HlConsensusBuilder
where where
Node: FullNodeTypes<Types = HlNode>, Node: FullNodeTypes<Types = HlNode>,
{ {
type Consensus = Arc<dyn FullConsensus<HlPrimitives, Error = ConsensusError>>; type Consensus = Arc<HlConsensus<<Node::Types as NodeTypes>::ChainSpec>>;
async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> { async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> {
Ok(Arc::new(HlConsensus::new(ctx.chain_spec()))) Ok(Arc::new(HlConsensus::new(ctx.chain_spec())))
@ -101,14 +100,14 @@ where
impl<ChainSpec> Consensus<HlBlock> for HlConsensus<ChainSpec> impl<ChainSpec> Consensus<HlBlock> for HlConsensus<ChainSpec>
where where
ChainSpec: EthChainSpec<Header = Header> + HlHardforks, ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{ {
type Error = ConsensusError; type Error = ConsensusError;
fn validate_body_against_header( fn validate_body_against_header(
&self, &self,
body: &HlBlockBody, body: &HlBlockBody,
header: &SealedHeader, header: &SealedHeader<HlHeader>,
) -> Result<(), ConsensusError> { ) -> Result<(), ConsensusError> {
Consensus::<HlBlock>::validate_body_against_header(&self.inner, body, header) Consensus::<HlBlock>::validate_body_against_header(&self.inner, body, header)
} }
@ -148,7 +147,7 @@ mod reth_copy;
impl<ChainSpec> FullConsensus<HlPrimitives> for HlConsensus<ChainSpec> impl<ChainSpec> FullConsensus<HlPrimitives> for HlConsensus<ChainSpec>
where where
ChainSpec: EthChainSpec<Header = Header> + HlHardforks, ChainSpec: EthChainSpec<Header = HlHeader> + HlHardforks,
{ {
fn validate_block_post_execution( fn validate_block_post_execution(
&self, &self,

View File

@ -1,21 +1,21 @@
//! Copy of reth codebase. //! Copy of reth codebase.
use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt}; use crate::HlBlock;
use alloy_consensus::{BlockHeader, TxReceipt, proofs::calculate_receipt_root};
use alloy_eips::eip7685::Requests; use alloy_eips::eip7685::Requests;
use alloy_primitives::{Bloom, B256}; use alloy_primitives::{B256, Bloom};
use reth::consensus::ConsensusError; use reth::consensus::ConsensusError;
use reth_chainspec::EthereumHardforks; use reth_chainspec::EthereumHardforks;
use reth_primitives::{gas_spent_by_transactions, GotExpected, RecoveredBlock}; use reth_primitives::{GotExpected, RecoveredBlock, gas_spent_by_transactions};
use reth_primitives_traits::{Block, Receipt as ReceiptTrait}; use reth_primitives_traits::Receipt as ReceiptTrait;
pub fn validate_block_post_execution<B, R, ChainSpec>( pub fn validate_block_post_execution<R, ChainSpec>(
block: &RecoveredBlock<B>, block: &RecoveredBlock<HlBlock>,
chain_spec: &ChainSpec, chain_spec: &ChainSpec,
receipts: &[R], receipts: &[R],
requests: &Requests, requests: &Requests,
) -> Result<(), ConsensusError> ) -> Result<(), ConsensusError>
where where
B: Block,
R: ReceiptTrait, R: ReceiptTrait,
ChainSpec: EthereumHardforks, ChainSpec: EthereumHardforks,
{ {
@ -42,7 +42,7 @@ where
receipts.iter().filter(|&r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>(); receipts.iter().filter(|&r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
if let Err(error) = verify_receipts( if let Err(error) = verify_receipts(
block.header().receipts_root(), block.header().receipts_root(),
block.header().logs_bloom(), block.header().inner.logs_bloom(),
&receipts_for_root, &receipts_for_root,
) { ) {
tracing::debug!(%error, ?receipts, "receipts verification failed"); tracing::debug!(%error, ?receipts, "receipts verification failed");

View File

@ -1,8 +1,6 @@
use crate::{ use crate::{
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig}, node::evm::config::{HlBlockExecutorFactory, HlEvmConfig}, HlBlock, HlHeader
HlBlock,
}; };
use alloy_consensus::Header;
use reth_evm::{ use reth_evm::{
block::BlockExecutionError, block::BlockExecutionError,
execute::{BlockAssembler, BlockAssemblerInput}, execute::{BlockAssembler, BlockAssemblerInput},
@ -13,7 +11,7 @@ impl BlockAssembler<HlBlockExecutorFactory> for HlEvmConfig {
fn assemble_block( fn assemble_block(
&self, &self,
input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, Header>, input: BlockAssemblerInput<'_, '_, HlBlockExecutorFactory, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> { ) -> Result<Self::Block, BlockExecutionError> {
let HlBlock { header, body } = self.block_assembler.assemble_block(input)?; let HlBlock { header, body } = self.block_assembler.assemble_block(input)?;
Ok(HlBlock { header, body }) Ok(HlBlock { header, body })

View File

@ -1,5 +1,6 @@
use super::{executor::HlBlockExecutor, factory::HlEvmFactory}; use super::{executor::HlBlockExecutor, factory::HlEvmFactory};
use crate::{ use crate::{
HlBlock, HlBlockBody, HlHeader, HlPrimitives,
chainspec::HlChainSpec, chainspec::HlChainSpec,
evm::{spec::HlSpecId, transaction::HlTxEnv}, evm::{spec::HlSpecId, transaction::HlTxEnv},
hardforks::HlHardforks, hardforks::HlHardforks,
@ -9,31 +10,30 @@ use crate::{
rpc::engine_api::validator::HlExecutionData, rpc::engine_api::validator::HlExecutionData,
types::HlExtras, types::HlExtras,
}, },
HlBlock, HlBlockBody, HlPrimitives,
}; };
use alloy_consensus::{BlockHeader, Header, Transaction as _, TxReceipt, EMPTY_OMMER_ROOT_HASH}; use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH, Header, Transaction as _, TxReceipt};
use alloy_eips::{merge::BEACON_NONCE, Encodable2718}; use alloy_eips::{Encodable2718, merge::BEACON_NONCE};
use alloy_primitives::{Log, U256}; use alloy_primitives::{Log, U256};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{ use reth_evm::{
block::{BlockExecutionError, BlockExecutorFactory, BlockExecutorFor},
eth::{receipt_builder::ReceiptBuilder, EthBlockExecutionCtx},
execute::{BlockAssembler, BlockAssemblerInput},
precompiles::PrecompilesMap,
ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator, ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator,
ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, NextBlockEnvAttributes, ExecutionCtxFor, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, NextBlockEnvAttributes,
block::{BlockExecutionError, BlockExecutorFactory, BlockExecutorFor},
eth::{EthBlockExecutionCtx, receipt_builder::ReceiptBuilder},
execute::{BlockAssembler, BlockAssemblerInput},
precompiles::PrecompilesMap,
}; };
use reth_evm_ethereum::EthBlockAssembler; use reth_evm_ethereum::EthBlockAssembler;
use reth_payload_primitives::NewPayloadError; use reth_payload_primitives::NewPayloadError;
use reth_primitives::{logs_bloom, BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader}; use reth_primitives::{BlockTy, HeaderTy, Receipt, SealedBlock, SealedHeader, logs_bloom};
use reth_primitives_traits::{proofs, SignerRecoverable, WithEncoded}; use reth_primitives_traits::{SignerRecoverable, WithEncoded, proofs};
use reth_provider::BlockExecutionResult; use reth_provider::BlockExecutionResult;
use reth_revm::State; use reth_revm::State;
use revm::{ use revm::{
Inspector,
context::{BlockEnv, CfgEnv, TxEnv}, context::{BlockEnv, CfgEnv, TxEnv},
context_interface::block::BlobExcessGasAndPrice, context_interface::block::BlobExcessGasAndPrice,
primitives::hardfork::SpecId, primitives::hardfork::SpecId,
Inspector,
}; };
use std::{borrow::Cow, convert::Infallible, sync::Arc}; use std::{borrow::Cow, convert::Infallible, sync::Arc};
@ -45,16 +45,16 @@ pub struct HlBlockAssembler {
impl<F> BlockAssembler<F> for HlBlockAssembler impl<F> BlockAssembler<F> for HlBlockAssembler
where where
F: for<'a> BlockExecutorFactory< F: for<'a> BlockExecutorFactory<
ExecutionCtx<'a> = HlBlockExecutionCtx<'a>, ExecutionCtx<'a> = HlBlockExecutionCtx<'a>,
Transaction = TransactionSigned, Transaction = TransactionSigned,
Receipt = Receipt, Receipt = Receipt,
>, >,
{ {
type Block = HlBlock; type Block = HlBlock;
fn assemble_block( fn assemble_block(
&self, &self,
input: BlockAssemblerInput<'_, '_, F>, input: BlockAssemblerInput<'_, '_, F, HlHeader>,
) -> Result<Self::Block, BlockExecutionError> { ) -> Result<Self::Block, BlockExecutionError> {
// TODO: Copy of EthBlockAssembler::assemble_block // TODO: Copy of EthBlockAssembler::assemble_block
let inner = &self.inner; let inner = &self.inner;
@ -106,7 +106,10 @@ where
} else { } else {
// for the first post-fork block, both parent.blob_gas_used and // for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0 // parent.excess_blob_gas are evaluated as 0
Some(alloy_eips::eip7840::BlobParams::cancun().next_block_excess_blob_gas(0, 0)) Some(
alloy_eips::eip7840::BlobParams::cancun()
.next_block_excess_blob_gas_osaka(0, 0, 0),
)
}; };
} }
@ -133,6 +136,9 @@ where
excess_blob_gas, excess_blob_gas,
requests_hash, requests_hash,
}; };
let system_tx_count =
transactions.iter().filter(|t| is_system_transaction(t)).count() as u64;
let header = HlHeader::from_ethereum_header(header, receipts, system_tx_count);
Ok(Self::Block { Ok(Self::Block {
header, header,
@ -237,9 +243,9 @@ where
R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt<Log = Log>>, R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt<Log = Log>>,
Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks + Clone, Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks + Clone,
EvmF: EvmFactory< EvmF: EvmFactory<
Tx: FromRecoveredTx<TransactionSigned> + FromTxWithEncoded<TransactionSigned>, Tx: FromRecoveredTx<TransactionSigned> + FromTxWithEncoded<TransactionSigned>,
Precompiles = PrecompilesMap, Precompiles = PrecompilesMap,
>, >,
R::Transaction: From<TransactionSigned> + Clone, R::Transaction: From<TransactionSigned> + Clone,
Self: 'static, Self: 'static,
HlTxEnv<TxEnv>: IntoTxEnv<<EvmF as EvmFactory>::Tx>, HlTxEnv<TxEnv>: IntoTxEnv<<EvmF as EvmFactory>::Tx>,
@ -266,6 +272,8 @@ where
} }
} }
static EMPTY_OMMERS: [Header; 0] = [];
impl ConfigureEvm for HlEvmConfig impl ConfigureEvm for HlEvmConfig
where where
Self: Send + Sync + Unpin + Clone + 'static, Self: Send + Sync + Unpin + Clone + 'static,
@ -284,7 +292,7 @@ where
self self
} }
fn evm_env(&self, header: &Header) -> EvmEnv<HlSpecId> { fn evm_env(&self, header: &HlHeader) -> Result<EvmEnv<HlSpecId>, Self::Error> {
let blob_params = self.chain_spec().blob_params_at_timestamp(header.timestamp); let blob_params = self.chain_spec().blob_params_at_timestamp(header.timestamp);
let spec = revm_spec_by_timestamp_and_block_number( let spec = revm_spec_by_timestamp_and_block_number(
self.chain_spec().clone(), self.chain_spec().clone(),
@ -324,12 +332,12 @@ where
blob_excess_gas_and_price, blob_excess_gas_and_price,
}; };
EvmEnv { cfg_env, block_env } Ok(EvmEnv { cfg_env, block_env })
} }
fn next_evm_env( fn next_evm_env(
&self, &self,
parent: &Header, parent: &HlHeader,
attributes: &Self::NextBlockEnvCtx, attributes: &Self::NextBlockEnvCtx,
) -> Result<EvmEnv<HlSpecId>, Self::Error> { ) -> Result<EvmEnv<HlSpecId>, Self::Error> {
// ensure we're not missing any timestamp based hardforks // ensure we're not missing any timestamp based hardforks
@ -373,28 +381,28 @@ where
fn context_for_block<'a>( fn context_for_block<'a>(
&self, &self,
block: &'a SealedBlock<BlockTy<Self::Primitives>>, block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> ExecutionCtxFor<'a, Self> { ) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> {
let block_body = block.body(); let block_body = block.body();
HlBlockExecutionCtx { Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx { ctx: EthBlockExecutionCtx {
parent_hash: block.header().parent_hash, parent_hash: block.header().parent_hash,
parent_beacon_block_root: block.header().parent_beacon_block_root, parent_beacon_block_root: block.header().parent_beacon_block_root,
ommers: &block.body().ommers, ommers: &EMPTY_OMMERS,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed), withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
}, },
extras: HlExtras { extras: HlExtras {
read_precompile_calls: block_body.read_precompile_calls.clone(), read_precompile_calls: block_body.read_precompile_calls.clone(),
highest_precompile_address: block_body.highest_precompile_address, highest_precompile_address: block_body.highest_precompile_address,
}, },
} })
} }
fn context_for_next_block( fn context_for_next_block(
&self, &self,
parent: &SealedHeader<HeaderTy<Self::Primitives>>, parent: &SealedHeader<HeaderTy<Self::Primitives>>,
attributes: Self::NextBlockEnvCtx, attributes: Self::NextBlockEnvCtx,
) -> ExecutionCtxFor<'_, Self> { ) -> Result<ExecutionCtxFor<'_, Self>, Self::Error> {
HlBlockExecutionCtx { Ok(HlBlockExecutionCtx {
ctx: EthBlockExecutionCtx { ctx: EthBlockExecutionCtx {
parent_hash: parent.hash(), parent_hash: parent.hash(),
parent_beacon_block_root: attributes.parent_beacon_block_root, parent_beacon_block_root: attributes.parent_beacon_block_root,
@ -402,13 +410,13 @@ where
withdrawals: attributes.withdrawals.map(Cow::Owned), withdrawals: attributes.withdrawals.map(Cow::Owned),
}, },
extras: HlExtras::default(), // TODO: hacky, double check if this is correct extras: HlExtras::default(), // TODO: hacky, double check if this is correct
} })
} }
} }
impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig { impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
fn evm_env_for_payload(&self, payload: &HlExecutionData) -> EvmEnvFor<Self> { fn evm_env_for_payload(&self, payload: &HlExecutionData) -> EvmEnvFor<Self> {
self.evm_env(&payload.0.header) self.evm_env(&payload.0.header).unwrap()
} }
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> { fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
@ -417,7 +425,7 @@ impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
ctx: EthBlockExecutionCtx { ctx: EthBlockExecutionCtx {
parent_hash: block.header.parent_hash, parent_hash: block.header.parent_hash,
parent_beacon_block_root: block.header.parent_beacon_block_root, parent_beacon_block_root: block.header.parent_beacon_block_root,
ommers: &block.body.ommers, ommers: &EMPTY_OMMERS,
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed), withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
}, },
extras: HlExtras { extras: HlExtras {

View File

@ -4,33 +4,30 @@ use crate::{
hardforks::HlHardforks, hardforks::HlHardforks,
node::{ node::{
primitives::TransactionSigned, primitives::TransactionSigned,
types::{ReadPrecompileInput, ReadPrecompileResult}, types::{HlExtras, ReadPrecompileInput, ReadPrecompileResult},
}, },
}; };
use alloy_consensus::{Transaction, TxReceipt}; use alloy_consensus::{Transaction, TxReceipt};
use alloy_eips::{eip7685::Requests, Encodable2718}; use alloy_eips::{Encodable2718, eip7685::Requests};
use alloy_evm::{block::ExecutableTx, eth::receipt_builder::ReceiptBuilderCtx}; use alloy_evm::{block::ExecutableTx, eth::receipt_builder::ReceiptBuilderCtx};
use alloy_primitives::{address, hex, Address, Bytes, U160, U256}; use alloy_primitives::{Address, Bytes, U160, U256, address, hex};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::{ use reth_evm::{
block::{BlockValidationError, CommitChanges}, Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
block::BlockValidationError,
eth::receipt_builder::ReceiptBuilder, eth::receipt_builder::ReceiptBuilder,
execute::{BlockExecutionError, BlockExecutor}, execute::{BlockExecutionError, BlockExecutor},
precompiles::{DynPrecompile, PrecompileInput, PrecompilesMap}, precompiles::{DynPrecompile, PrecompileInput, PrecompilesMap},
Database, Evm, FromRecoveredTx, FromTxWithEncoded, IntoTxEnv, OnStateHook,
}; };
use reth_provider::BlockExecutionResult; use reth_provider::BlockExecutionResult;
use reth_revm::State; use reth_revm::State;
use revm::{ use revm::{
context::{ DatabaseCommit,
result::{ExecutionResult, ResultAndState}, context::{TxEnv, result::ResultAndState},
TxEnv,
},
interpreter::instructions::utility::IntoU256, interpreter::instructions::utility::IntoU256,
precompile::{PrecompileError, PrecompileOutput, PrecompileResult}, precompile::{PrecompileError, PrecompileOutput, PrecompileResult},
primitives::HashMap, primitives::HashMap,
state::Bytecode, state::Bytecode,
DatabaseCommit,
}; };
pub fn is_system_transaction(tx: &TransactionSigned) -> bool { pub fn is_system_transaction(tx: &TransactionSigned) -> bool {
@ -87,12 +84,12 @@ impl<'a, DB, EVM, Spec, R: ReceiptBuilder> HlBlockExecutor<'a, EVM, Spec, R>
where where
DB: Database + 'a, DB: Database + 'a,
EVM: Evm< EVM: Evm<
DB = &'a mut State<DB>, DB = &'a mut State<DB>,
Precompiles = PrecompilesMap, Precompiles = PrecompilesMap,
Tx: FromRecoveredTx<R::Transaction> Tx: FromRecoveredTx<R::Transaction>
+ FromRecoveredTx<TransactionSigned> + FromRecoveredTx<TransactionSigned>
+ FromTxWithEncoded<TransactionSigned>, + FromTxWithEncoded<TransactionSigned>,
>, >,
Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks + Clone, Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks + Clone,
R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt>, R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt>,
<R as ReceiptBuilder>::Transaction: Unpin + From<TransactionSigned>, <R as ReceiptBuilder>::Transaction: Unpin + From<TransactionSigned>,
@ -102,7 +99,7 @@ where
{ {
/// Creates a new HlBlockExecutor. /// Creates a new HlBlockExecutor.
pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self { pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self {
apply_precompiles(&mut evm, &ctx); apply_precompiles(&mut evm, &ctx.extras);
Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx } Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx }
} }
@ -110,7 +107,9 @@ where
const COREWRITER_ENABLED_BLOCK_NUMBER: u64 = 7578300; const COREWRITER_ENABLED_BLOCK_NUMBER: u64 = 7578300;
const COREWRITER_CONTRACT_ADDRESS: Address = const COREWRITER_CONTRACT_ADDRESS: Address =
address!("0x3333333333333333333333333333333333333333"); address!("0x3333333333333333333333333333333333333333");
const COREWRITER_CODE: &[u8] = &hex!("608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033"); const COREWRITER_CODE: &[u8] = &hex!(
"608060405234801561000f575f5ffd5b5060043610610029575f3560e01c806317938e131461002d575b5f5ffd5b61004760048036038101906100429190610123565b610049565b005b5f5f90505b61019081101561006557808060010191505061004e565b503373ffffffffffffffffffffffffffffffffffffffff167f8c7f585fb295f7eb1e6aeb8fba61b23a4fe60beda405f0045073b185c74412e383836040516100ae9291906101c8565b60405180910390a25050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f8401126100e3576100e26100c2565b5b8235905067ffffffffffffffff811115610100576100ff6100c6565b5b60208301915083600182028301111561011c5761011b6100ca565b5b9250929050565b5f5f60208385031215610139576101386100ba565b5b5f83013567ffffffffffffffff811115610156576101556100be565b5b610162858286016100ce565b92509250509250929050565b5f82825260208201905092915050565b828183375f83830152505050565b5f601f19601f8301169050919050565b5f6101a7838561016e565b93506101b483858461017e565b6101bd8361018c565b840190509392505050565b5f6020820190508181035f8301526101e181848661019c565b9050939250505056fea2646970667358221220f01517e1fbaff8af4bd72cb063cccecbacbb00b07354eea7dd52265d355474fb64736f6c634300081c0033"
);
if self.evm.block().number != U256::from(COREWRITER_ENABLED_BLOCK_NUMBER) { if self.evm.block().number != U256::from(COREWRITER_ENABLED_BLOCK_NUMBER) {
return Ok(()); return Ok(());
@ -137,12 +136,12 @@ impl<'a, DB, E, Spec, R> BlockExecutor for HlBlockExecutor<'a, E, Spec, R>
where where
DB: Database + 'a, DB: Database + 'a,
E: Evm< E: Evm<
DB = &'a mut State<DB>, DB = &'a mut State<DB>,
Tx: FromRecoveredTx<R::Transaction> Tx: FromRecoveredTx<R::Transaction>
+ FromRecoveredTx<TransactionSigned> + FromRecoveredTx<TransactionSigned>
+ FromTxWithEncoded<TransactionSigned>, + FromTxWithEncoded<TransactionSigned>,
Precompiles = PrecompilesMap, Precompiles = PrecompilesMap,
>, >,
Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks, Spec: EthereumHardforks + HlHardforks + EthChainSpec + Hardforks,
R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt>, R: ReceiptBuilder<Transaction = TransactionSigned, Receipt: TxReceipt>,
<R as ReceiptBuilder>::Transaction: Unpin + From<TransactionSigned>, <R as ReceiptBuilder>::Transaction: Unpin + From<TransactionSigned>,
@ -155,17 +154,16 @@ where
type Evm = E; type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
apply_precompiles(&mut self.evm, &self.ctx); apply_precompiles(&mut self.evm, &self.ctx.extras);
self.deploy_corewriter_contract()?; self.deploy_corewriter_contract()?;
Ok(()) Ok(())
} }
fn execute_transaction_with_commit_condition( fn execute_transaction_without_commit(
&mut self, &mut self,
tx: impl ExecutableTx<Self>, tx: impl ExecutableTx<Self>,
f: impl FnOnce(&ExecutionResult<<Self::Evm as Evm>::HaltReason>) -> CommitChanges, ) -> Result<ResultAndState<<Self::Evm as Evm>::HaltReason>, BlockExecutionError> {
) -> Result<Option<u64>, BlockExecutionError> {
// The sum of the transaction's gas limit, Tg, and the gas utilized in this block prior, // The sum of the transaction's gas limit, Tg, and the gas utilized in this block prior,
// must be no greater than the block's gasLimit. // must be no greater than the block's gasLimit.
let block_available_gas = self.evm.block().gas_limit - self.gas_used; let block_available_gas = self.evm.block().gas_limit - self.gas_used;
@ -178,15 +176,19 @@ where
.into()); .into());
} }
// Execute transaction. // Execute transaction and return the result
let ResultAndState { result, mut state } = self self.evm.transact(&tx).map_err(|err| {
.evm let hash = tx.tx().trie_hash();
.transact(&tx) BlockExecutionError::evm(err, hash)
.map_err(|err| BlockExecutionError::evm(err, tx.tx().trie_hash()))?; })
}
if !f(&result).should_commit() { fn commit_transaction(
return Ok(None); &mut self,
} output: ResultAndState<<Self::Evm as Evm>::HaltReason>,
tx: impl ExecutableTx<Self>,
) -> Result<u64, BlockExecutionError> {
let ResultAndState { result, mut state } = output;
let gas_used = result.gas_used(); let gas_used = result.gas_used();
@ -215,7 +217,7 @@ where
// Commit the state changes. // Commit the state changes.
self.evm.db_mut().commit(state); self.evm.db_mut().commit(state);
Ok(Some(gas_used)) Ok(gas_used)
} }
fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<R::Receipt>), BlockExecutionError> { fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<R::Receipt>), BlockExecutionError> {
@ -240,10 +242,9 @@ where
} }
} }
fn apply_precompiles<'a, DB, EVM>(evm: &mut EVM, ctx: &HlBlockExecutionCtx<'a>) pub fn apply_precompiles<EVM>(evm: &mut EVM, extras: &HlExtras)
where where
EVM: Evm<DB = &'a mut State<DB>, Precompiles = PrecompilesMap>, EVM: Evm<Precompiles = PrecompilesMap>,
DB: Database + 'a,
{ {
let block_number = evm.block().number; let block_number = evm.block().number;
let precompiles_mut = evm.precompiles_mut(); let precompiles_mut = evm.precompiles_mut();
@ -255,9 +256,7 @@ where
precompiles_mut.apply_precompile(&address, |_| None); precompiles_mut.apply_precompile(&address, |_| None);
} }
} }
for (address, precompile) in for (address, precompile) in extras.read_precompile_calls.clone().unwrap_or_default().0.iter() {
ctx.extras.read_precompile_calls.clone().unwrap_or_default().0.iter()
{
let precompile = precompile.clone(); let precompile = precompile.clone();
precompiles_mut.apply_precompile(address, |_| { precompiles_mut.apply_precompile(address, |_| {
let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> = let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> =
@ -271,7 +270,7 @@ where
// NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5 // NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5
const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684; const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684;
if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) { if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) {
fill_all_precompiles(ctx, precompiles_mut); fill_all_precompiles(extras, precompiles_mut);
} }
} }
@ -279,9 +278,9 @@ fn address_to_u64(address: Address) -> u64 {
address.into_u256().try_into().unwrap() address.into_u256().try_into().unwrap()
} }
fn fill_all_precompiles<'a>(ctx: &HlBlockExecutionCtx<'a>, precompiles_mut: &mut PrecompilesMap) { fn fill_all_precompiles(extras: &HlExtras, precompiles_mut: &mut PrecompilesMap) {
let lowest_address = 0x800; let lowest_address = 0x800;
let highest_address = ctx.extras.highest_precompile_address.map_or(0x80D, address_to_u64); let highest_address = extras.highest_precompile_address.map_or(0x80D, address_to_u64);
for address in lowest_address..=highest_address { for address in lowest_address..=highest_address {
let address = Address::from(U160::from(address)); let address = Address::from(U160::from(address));
precompiles_mut.apply_precompile(&address, |f| { precompiles_mut.apply_precompile(&address, |f| {

View File

@ -7,16 +7,16 @@ use crate::evm::{
spec::HlSpecId, spec::HlSpecId,
transaction::HlTxEnv, transaction::HlTxEnv,
}; };
use reth_evm::{precompiles::PrecompilesMap, Database, EvmEnv, EvmFactory}; use reth_evm::{Database, EvmEnv, EvmFactory, precompiles::PrecompilesMap};
use reth_revm::Context; use reth_revm::Context;
use revm::{ use revm::{
Inspector,
context::{ context::{
result::{EVMError, HaltReason},
TxEnv, TxEnv,
result::{EVMError, HaltReason},
}, },
inspector::NoOpInspector, inspector::NoOpInspector,
precompile::{PrecompileSpecId, Precompiles}, precompile::{PrecompileSpecId, Precompiles},
Inspector,
}; };
/// Factory producing [`HlEvm`]. /// Factory producing [`HlEvm`].

View File

@ -1,6 +1,6 @@
use crate::{ use crate::{
evm::{ evm::{
api::{ctx::HlContext, HlEvmInner}, api::{HlEvmInner, ctx::HlContext},
spec::HlSpecId, spec::HlSpecId,
transaction::HlTxEnv, transaction::HlTxEnv,
}, },
@ -10,18 +10,18 @@ use alloy_primitives::{Address, Bytes};
use config::HlEvmConfig; use config::HlEvmConfig;
use reth::{ use reth::{
api::FullNodeTypes, api::FullNodeTypes,
builder::{components::ExecutorBuilder, BuilderContext}, builder::{BuilderContext, components::ExecutorBuilder},
}; };
use reth_evm::{Database, Evm, EvmEnv}; use reth_evm::{Database, Evm, EvmEnv};
use revm::{ use revm::{
context::{
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
BlockEnv, TxEnv,
},
handler::{instructions::EthInstructions, EthPrecompiles, PrecompileProvider},
interpreter::{interpreter::EthInterpreter, InterpreterResult},
state::EvmState,
Context, ExecuteEvm, InspectEvm, Inspector, Context, ExecuteEvm, InspectEvm, Inspector,
context::{
BlockEnv, TxEnv,
result::{EVMError, ExecutionResult, HaltReason, Output, ResultAndState, SuccessReason},
},
handler::{EthPrecompiles, PrecompileProvider, instructions::EthInstructions},
interpreter::{InterpreterResult, interpreter::EthInterpreter},
state::EvmState,
}; };
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
@ -32,6 +32,8 @@ mod factory;
mod patch; mod patch;
pub mod receipt_builder; pub mod receipt_builder;
pub use executor::apply_precompiles;
/// HL EVM implementation. /// HL EVM implementation.
/// ///
/// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing) /// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing)
@ -96,11 +98,7 @@ where
&mut self, &mut self,
tx: Self::Tx, tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> { ) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
if self.inspect { if self.inspect { self.inner.inspect_tx(tx) } else { self.inner.transact(tx) }
self.inner.inspect_tx(tx)
} else {
self.inner.transact(tx)
}
} }
fn transact_system_call( fn transact_system_call(

View File

@ -1,4 +1,4 @@
use alloy_primitives::{address, Address}; use alloy_primitives::{Address, address};
use reth_evm::block::BlockExecutionError; use reth_evm::block::BlockExecutionError;
use revm::{primitives::HashMap, state::Account}; use revm::{primitives::HashMap, state::Account};

View File

@ -1,5 +1,6 @@
use crate::node::primitives::TransactionSigned; use crate::node::primitives::TransactionSigned;
use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx}; use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx};
use reth_codecs::alloy::transaction::Envelope;
use reth_evm::Evm; use reth_evm::Evm;
use reth_primitives::Receipt; use reth_primitives::Receipt;

429
src/node/migrate.rs Normal file
View File

@ -0,0 +1,429 @@
use alloy_consensus::Header;
use alloy_primitives::{B256, BlockHash, Bytes, U256, b256, hex::ToHexExt};
use reth::{
api::NodeTypesWithDBAdapter,
args::{DatabaseArgs, DatadirArgs},
dirs::{ChainPath, DataDirPath},
};
use reth_chainspec::EthChainSpec;
use reth_db::{
DatabaseEnv,
mdbx::{RO, tx::Tx},
models::CompactU256,
static_file::iter_static_files,
table::Decompress,
tables,
};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut},
};
use reth_errors::ProviderResult;
use reth_ethereum_primitives::EthereumReceipt;
use reth_provider::{
DatabaseProvider, ProviderFactory, ReceiptProvider, StaticFileProviderFactory,
StaticFileSegment, StaticFileWriter,
providers::{NodeTypesForProvider, StaticFileProvider},
static_file::SegmentRangeInclusive,
};
use std::{fs::File, io::Write, path::PathBuf, sync::Arc};
use tracing::{info, warn};
use crate::{HlHeader, HlPrimitives, chainspec::HlChainSpec};
pub(crate) trait HlNodeType:
NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>
{
}
impl<N: NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>> HlNodeType for N {}
pub(super) struct Migrator<N: HlNodeType> {
data_dir: ChainPath<DataDirPath>,
provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
}
impl<N: HlNodeType> Migrator<N> {
const MIGRATION_PATH_SUFFIX: &'static str = "migration-tmp";
pub fn new(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<Self> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let provider_factory = Self::provider_factory(chain_spec, datadir, database_args)?;
Ok(Self { data_dir, provider_factory })
}
pub fn sf_provider(&self) -> StaticFileProvider<HlPrimitives> {
self.provider_factory.static_file_provider()
}
pub fn migrate_db(&self) -> eyre::Result<()> {
let is_empty = Self::highest_block_number(&self.sf_provider()).is_none();
if is_empty {
return Ok(());
}
self.migrate_db_inner()
}
fn highest_block_number(sf_provider: &StaticFileProvider<HlPrimitives>) -> Option<u64> {
sf_provider.get_highest_static_file_block(StaticFileSegment::Headers)
}
fn migrate_db_inner(&self) -> eyre::Result<()> {
let migrated_mdbx = MigratorMdbx::<N>(self).migrate_mdbx()?;
let migrated_static_files = MigrateStaticFiles::<N>(self).migrate_static_files()?;
if migrated_mdbx || migrated_static_files {
info!("Database migrated successfully");
}
Ok(())
}
fn conversion_tmp_dir(&self) -> PathBuf {
self.data_dir.data_dir().join(Self::MIGRATION_PATH_SUFFIX)
}
fn provider_factory(
chain_spec: HlChainSpec,
datadir: DatadirArgs,
database_args: DatabaseArgs,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
let db_env = reth_db::init_db(data_dir.db(), database_args.database_args())?;
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?;
let db = Arc::new(db_env);
Ok(ProviderFactory::new(db, Arc::new(chain_spec), static_file_provider))
}
}
struct MigratorMdbx<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigratorMdbx<'a, N> {
fn migrate_mdbx(&self) -> eyre::Result<bool> {
// if any header is in old format, we need to migrate it, so we pick the first and last one
let db_env = self.0.provider_factory.provider()?;
let mut cursor = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let migration_needed = {
let first_is_old = match cursor.first()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
let last_is_old = match cursor.last()? {
Some((number, header)) => using_old_header(number, &header),
None => false,
};
first_is_old || last_is_old
};
if !migration_needed {
return Ok(false);
}
check_if_migration_enabled()?;
self.migrate_mdbx_inner()?;
Ok(true)
}
fn migrate_mdbx_inner(&self) -> eyre::Result<()> {
// There shouldn't be many headers in mdbx, but using file for safety
info!("Old database detected, migrating mdbx...");
let conversion_tmp = self.0.conversion_tmp_dir();
let tmp_path = conversion_tmp.join("headers.rmp");
if conversion_tmp.exists() {
std::fs::remove_dir_all(&conversion_tmp)?;
}
std::fs::create_dir_all(&conversion_tmp)?;
let count = self.export_old_headers(&tmp_path)?;
self.import_new_headers(tmp_path, count)?;
Ok(())
}
fn export_old_headers(&self, tmp_path: &PathBuf) -> Result<i32, eyre::Error> {
let db_env = self.0.provider_factory.provider()?;
let mut cursor_read = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
let mut tmp_writer = File::create(tmp_path)?;
let mut count = 0;
let old_headers = cursor_read.walk(None)?.filter_map(|row| {
let (block_number, header) = row.ok()?;
if !using_old_header(block_number, &header) {
None
} else {
Some((block_number, Header::decompress(&header).ok()?))
}
});
for (block_number, header) in old_headers {
let receipt =
db_env.receipts_by_block(block_number.into())?.expect("Receipt not found");
let new_header = to_hl_header(receipt, header);
tmp_writer.write_all(&rmp_serde::to_vec(&(block_number, new_header))?)?;
count += 1;
}
Ok(count)
}
fn import_new_headers(&self, tmp_path: PathBuf, count: i32) -> Result<(), eyre::Error> {
let mut tmp_reader = File::open(tmp_path)?;
let db_env = self.0.provider_factory.provider_rw()?;
let mut cursor_write = db_env.tx_ref().cursor_write::<tables::Headers<Bytes>>()?;
for _ in 0..count {
let (number, header) = rmp_serde::from_read::<_, (u64, HlHeader)>(&mut tmp_reader)?;
cursor_write.upsert(number, &rmp_serde::to_vec(&header)?.into())?;
}
db_env.commit()?;
Ok(())
}
}
fn check_if_migration_enabled() -> Result<(), eyre::Error> {
if std::env::var("EXPERIMENTAL_MIGRATE_DB").is_err() {
let err_msg = concat!(
"Detected an old database format but experimental database migration is currently disabled. ",
"To enable migration, set EXPERIMENTAL_MIGRATE_DB=1, or alternatively, resync your node (safest option)."
);
warn!("{}", err_msg);
return Err(eyre::eyre!("{}", err_msg));
}
Ok(())
}
struct MigrateStaticFiles<'a, N: HlNodeType>(&'a Migrator<N>);
impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
fn iterate_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
dir: &PathBuf,
) -> eyre::Result<Vec<(PathBuf, String)>> {
let prefix = StaticFileSegment::Headers.filename(&block_range);
let entries = std::fs::read_dir(dir)?
.map(|res| res.map(|e| e.path()))
.collect::<Result<Vec<_>, _>>()?;
Ok(entries
.into_iter()
.filter_map(|path| {
let file_name = path.file_name().and_then(|f| f.to_str())?;
if file_name.starts_with(&prefix) {
Some((path.clone(), file_name.to_string()))
} else {
None
}
})
.collect())
}
fn create_placeholder(&self, block_range: SegmentRangeInclusive) -> eyre::Result<()> {
// The direction is opposite here
let src = self.0.data_dir.static_files();
let dst = self.0.conversion_tmp_dir();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
if dst_path.exists() {
std::fs::remove_file(&dst_path)?;
}
std::os::unix::fs::symlink(src_path, dst_path)?;
}
Ok(())
}
fn move_static_files_for_segment(
&self,
block_range: SegmentRangeInclusive,
) -> eyre::Result<()> {
let src = self.0.conversion_tmp_dir();
let dst = self.0.data_dir.static_files();
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
let dst_path = dst.join(file_name);
std::fs::remove_file(&dst_path)?;
std::fs::rename(&src_path, &dst_path)?;
}
// Still StaticFileProvider needs the file to exist, so we create a symlink
self.create_placeholder(block_range)
}
fn migrate_static_files(&self) -> eyre::Result<bool> {
let conversion_tmp = self.0.conversion_tmp_dir();
let old_path = self.0.data_dir.static_files();
if conversion_tmp.exists() {
std::fs::remove_dir_all(&conversion_tmp)?;
}
std::fs::create_dir_all(&conversion_tmp)?;
let mut all_static_files = iter_static_files(&old_path)?;
let all_static_files =
all_static_files.remove(&StaticFileSegment::Headers).unwrap_or_default();
let mut first = true;
for (block_range, _tx_ranges) in all_static_files {
let migration_needed = self.using_old_header(block_range.start())?
|| self.using_old_header(block_range.end())?;
if !migration_needed {
// Create a placeholder symlink
self.create_placeholder(block_range)?;
continue;
}
if first {
check_if_migration_enabled()?;
info!("Old database detected, migrating static files...");
first = false;
}
let sf_provider = self.0.sf_provider();
let sf_tmp_provider = StaticFileProvider::<HlPrimitives>::read_write(&conversion_tmp)?;
let provider = self.0.provider_factory.provider()?;
let block_range_for_filename = sf_provider.find_fixed_range(block_range.start());
migrate_single_static_file(&sf_tmp_provider, &sf_provider, &provider, block_range)?;
self.move_static_files_for_segment(block_range_for_filename)?;
}
Ok(!first)
}
fn using_old_header(&self, number: u64) -> eyre::Result<bool> {
let sf_provider = self.0.sf_provider();
let content = old_headers_range(&sf_provider, number..=number)?;
let &[row] = &content.as_slice() else {
warn!("No header found for block {}", number);
return Ok(false);
};
Ok(using_old_header(number, &row[0]))
}
}
// Problem is that decompress just panics when the header is not valid
// So we need heuristics...
fn is_old_header(header: &[u8]) -> bool {
const SHA3_UNCLE_OFFSET: usize = 0x24;
const SHA3_UNCLE_HASH: B256 =
b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347");
const GENESIS_PREFIX: [u8; 4] = [0x01, 0x20, 0x00, 0xf8];
let Some(sha3_uncle_hash) = header.get(SHA3_UNCLE_OFFSET..SHA3_UNCLE_OFFSET + 32) else {
return false;
};
if sha3_uncle_hash == SHA3_UNCLE_HASH {
return true;
}
// genesis block might be different
if header.starts_with(&GENESIS_PREFIX) {
return true;
}
false
}
fn is_new_header(header: &[u8]) -> bool {
rmp_serde::from_slice::<HlHeader>(header).is_ok()
}
fn migrate_single_static_file<N: HlNodeType>(
sf_out: &StaticFileProvider<HlPrimitives>,
sf_in: &StaticFileProvider<HlPrimitives>,
provider: &DatabaseProvider<Tx<RO>, NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
block_range: SegmentRangeInclusive,
) -> Result<(), eyre::Error> {
info!("Migrating block range {}...", block_range);
// block_ranges into chunks of 50000 blocks
const CHUNK_SIZE: u64 = 50000;
for chunk in (block_range.start()..=block_range.end()).step_by(CHUNK_SIZE as usize) {
let end = std::cmp::min(chunk + CHUNK_SIZE - 1, block_range.end());
let block_range = chunk..=end;
let headers = old_headers_range(sf_in, block_range.clone())?;
let receipts = provider.receipts_by_block_range(block_range.clone())?;
assert_eq!(headers.len(), receipts.len());
let mut writer = sf_out.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
let new_headers = std::iter::zip(headers, receipts)
.map(|(header, receipts)| {
let eth_header = Header::decompress(&header[0]).unwrap();
let hl_header = to_hl_header(receipts, eth_header);
let difficulty: U256 = CompactU256::decompress(&header[1]).unwrap().into();
let hash = BlockHash::decompress(&header[2]).unwrap();
(hl_header, difficulty, hash)
})
.collect::<Vec<_>>();
for header in new_headers {
writer.append_header(&header.0, header.1, &header.2)?;
}
writer.commit().unwrap();
info!("Migrated block range {:?}...", block_range);
}
Ok(())
}
fn to_hl_header(receipts: Vec<EthereumReceipt>, eth_header: Header) -> HlHeader {
let system_tx_count = receipts.iter().filter(|r| r.cumulative_gas_used == 0).count();
HlHeader::from_ethereum_header(eth_header, &receipts, system_tx_count as u64)
}
fn old_headers_range(
provider: &StaticFileProvider<HlPrimitives>,
block_range: impl std::ops::RangeBounds<u64>,
) -> ProviderResult<Vec<Vec<Vec<u8>>>> {
Ok(provider
.fetch_range_with_predicate(
StaticFileSegment::Headers,
to_range(block_range),
|cursor, number| {
cursor.get(number.into(), 0b111).map(|rows| {
rows.map(|columns| columns.into_iter().map(|column| column.to_vec()).collect())
})
},
|_| true,
)?
.into_iter()
.collect())
}
// Copied from reth
fn to_range<R: std::ops::RangeBounds<u64>>(bounds: R) -> std::ops::Range<u64> {
let start = match bounds.start_bound() {
std::ops::Bound::Included(&v) => v,
std::ops::Bound::Excluded(&v) => v + 1,
std::ops::Bound::Unbounded => 0,
};
let end = match bounds.end_bound() {
std::ops::Bound::Included(&v) => v + 1,
std::ops::Bound::Excluded(&v) => v,
std::ops::Bound::Unbounded => u64::MAX,
};
start..end
}
fn using_old_header(number: u64, header: &[u8]) -> bool {
let deserialized_old = is_old_header(header);
let deserialized_new = is_new_header(header);
assert!(
deserialized_old ^ deserialized_new,
"Header is not valid: {} {}\ndeserialized_old: {}\ndeserialized_new: {}",
number,
header.encode_hex(),
deserialized_old,
deserialized_new
);
deserialized_old && !deserialized_new
}

View File

@ -4,11 +4,11 @@ use crate::{
pool::HlPoolBuilder, pool::HlPoolBuilder,
primitives::{HlBlock, HlPrimitives}, primitives::{HlBlock, HlPrimitives},
rpc::{ rpc::{
HlEthApiBuilder,
engine_api::{ engine_api::{
builder::HlEngineApiBuilder, payload::HlPayloadTypes, builder::HlEngineApiBuilder, payload::HlPayloadTypes,
validator::HlPayloadValidatorBuilder, validator::HlPayloadValidatorBuilder,
}, },
HlEthApiBuilder,
}, },
storage::HlStorage, storage::HlStorage,
}, },
@ -20,19 +20,20 @@ use network::HlNetworkBuilder;
use reth::{ use reth::{
api::{FullNodeTypes, NodeTypes}, api::{FullNodeTypes, NodeTypes},
builder::{ builder::{
Node, NodeAdapter,
components::{ComponentsBuilder, NoopPayloadServiceBuilder}, components::{ComponentsBuilder, NoopPayloadServiceBuilder},
rpc::RpcAddOns, rpc::RpcAddOns,
Node, NodeAdapter,
}, },
}; };
use reth_engine_primitives::ConsensusEngineHandle; use reth_engine_primitives::ConsensusEngineHandle;
use std::{marker::PhantomData, sync::Arc}; use std::{marker::PhantomData, sync::Arc};
use tokio::sync::{oneshot, Mutex}; use tokio::sync::{Mutex, oneshot};
pub mod cli; pub mod cli;
pub mod consensus; pub mod consensus;
pub mod engine; pub mod engine;
pub mod evm; pub mod evm;
pub mod migrate;
pub mod network; pub mod network;
pub mod primitives; pub mod primitives;
pub mod rpc; pub mod rpc;
@ -49,14 +50,23 @@ pub type HlNodeAddOns<N> =
pub struct HlNode { pub struct HlNode {
engine_handle_rx: Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>, engine_handle_rx: Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
block_source_config: BlockSourceConfig, block_source_config: BlockSourceConfig,
debug_cutoff_height: Option<u64>,
} }
impl HlNode { impl HlNode {
pub fn new( pub fn new(
block_source_config: BlockSourceConfig, block_source_config: BlockSourceConfig,
debug_cutoff_height: Option<u64>,
) -> (Self, oneshot::Sender<ConsensusEngineHandle<HlPayloadTypes>>) { ) -> (Self, oneshot::Sender<ConsensusEngineHandle<HlPayloadTypes>>) {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
(Self { engine_handle_rx: Arc::new(Mutex::new(Some(rx))), block_source_config }, tx) (
Self {
engine_handle_rx: Arc::new(Mutex::new(Some(rx))),
block_source_config,
debug_cutoff_height,
},
tx,
)
} }
} }
@ -84,6 +94,7 @@ impl HlNode {
.network(HlNetworkBuilder { .network(HlNetworkBuilder {
engine_handle_rx: self.engine_handle_rx.clone(), engine_handle_rx: self.engine_handle_rx.clone(),
block_source_config: self.block_source_config.clone(), block_source_config: self.block_source_config.clone(),
debug_cutoff_height: self.debug_cutoff_height,
}) })
.consensus(HlConsensusBuilder::default()) .consensus(HlConsensusBuilder::default())
} }

View File

@ -8,7 +8,7 @@ use reth_primitives::NodePrimitives;
use service::{BlockMsg, ImportEvent, Outcome}; use service::{BlockMsg, ImportEvent, Outcome};
use std::{ use std::{
fmt, fmt,
task::{ready, Context, Poll}, task::{Context, Poll, ready},
}; };
use crate::node::network::HlNewBlock; use crate::node::network::HlNewBlock;

View File

@ -1,17 +1,17 @@
use super::handle::ImportHandle; use super::handle::ImportHandle;
use crate::{ use crate::{
HlBlock, HlBlockBody,
consensus::HlConsensus, consensus::HlConsensus,
node::{ node::{
network::HlNewBlock, network::HlNewBlock,
rpc::engine_api::payload::HlPayloadTypes, rpc::engine_api::payload::HlPayloadTypes,
types::{BlockAndReceipts, EvmBlock}, types::{BlockAndReceipts, EvmBlock},
}, },
HlBlock, HlBlockBody,
}; };
use alloy_consensus::{BlockBody, Header}; use alloy_consensus::{BlockBody, Header};
use alloy_primitives::U128; use alloy_primitives::U128;
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
use futures::{future::Either, stream::FuturesUnordered, StreamExt}; use futures::{StreamExt, future::Either, stream::FuturesUnordered};
use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes}; use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes};
use reth_eth_wire::NewBlock; use reth_eth_wire::NewBlock;
use reth_network::{ use reth_network::{
@ -179,7 +179,7 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::chainspec::hl::hl_mainnet; use crate::{chainspec::hl::hl_mainnet, HlHeader};
use super::*; use super::*;
use alloy_primitives::{B256, U128}; use alloy_primitives::{B256, U128};
@ -355,7 +355,7 @@ mod tests {
/// Creates a test block message /// Creates a test block message
fn create_test_block() -> NewBlockMessage<HlNewBlock> { fn create_test_block() -> NewBlockMessage<HlNewBlock> {
let block = HlBlock { let block = HlBlock {
header: Header::default(), header: HlHeader::default(),
body: HlBlockBody { body: HlBlockBody {
inner: BlockBody { inner: BlockBody {
transactions: Vec::new(), transactions: Vec::new(),

View File

@ -1,20 +1,20 @@
#![allow(clippy::owned_cow)] #![allow(clippy::owned_cow)]
use crate::{ use crate::{
HlBlock,
consensus::HlConsensus, consensus::HlConsensus,
node::{ node::{
network::block_import::{handle::ImportHandle, service::ImportService, HlBlockImport}, HlNode,
network::block_import::{HlBlockImport, handle::ImportHandle, service::ImportService},
primitives::HlPrimitives, primitives::HlPrimitives,
rpc::engine_api::payload::HlPayloadTypes, rpc::engine_api::payload::HlPayloadTypes,
types::ReadPrecompileCalls, types::ReadPrecompileCalls,
HlNode,
}, },
pseudo_peer::{start_pseudo_peer, BlockSourceConfig}, pseudo_peer::{BlockSourceConfig, start_pseudo_peer},
HlBlock,
}; };
use alloy_rlp::{Decodable, Encodable}; use alloy_rlp::{Decodable, Encodable};
use reth::{ use reth::{
api::{FullNodeTypes, TxTy}, api::{FullNodeTypes, TxTy},
builder::{components::NetworkBuilder, BuilderContext}, builder::{BuilderContext, components::NetworkBuilder},
transaction_pool::{PoolTransaction, TransactionPool}, transaction_pool::{PoolTransaction, TransactionPool},
}; };
use reth_discv4::NodeRecord; use reth_discv4::NodeRecord;
@ -26,7 +26,7 @@ use reth_network_api::PeersInfo;
use reth_provider::StageCheckpointReader; use reth_provider::StageCheckpointReader;
use reth_stages_types::StageId; use reth_stages_types::StageId;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{mpsc, oneshot, Mutex}; use tokio::sync::{Mutex, mpsc, oneshot};
use tracing::info; use tracing::info;
pub mod block_import; pub mod block_import;
@ -38,10 +38,10 @@ pub struct HlNewBlock(pub NewBlock<HlBlock>);
mod rlp { mod rlp {
use super::*; use super::*;
use crate::{ use crate::{
HlBlockBody, HlHeader,
node::primitives::{BlockBody, TransactionSigned}, node::primitives::{BlockBody, TransactionSigned},
HlBlockBody,
}; };
use alloy_consensus::{BlobTransactionSidecar, Header}; use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::{Address, U128}; use alloy_primitives::{Address, U128};
use alloy_rlp::{RlpDecodable, RlpEncodable}; use alloy_rlp::{RlpDecodable, RlpEncodable};
use alloy_rpc_types::Withdrawals; use alloy_rpc_types::Withdrawals;
@ -50,9 +50,9 @@ mod rlp {
#[derive(RlpEncodable, RlpDecodable)] #[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)] #[rlp(trailing)]
struct BlockHelper<'a> { struct BlockHelper<'a> {
header: Cow<'a, Header>, header: Cow<'a, HlHeader>,
transactions: Cow<'a, Vec<TransactionSigned>>, transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>, ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>, withdrawals: Option<Cow<'a, Withdrawals>>,
} }
@ -142,6 +142,8 @@ pub struct HlNetworkBuilder {
Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>, Arc<Mutex<Option<oneshot::Receiver<ConsensusEngineHandle<HlPayloadTypes>>>>>,
pub(crate) block_source_config: BlockSourceConfig, pub(crate) block_source_config: BlockSourceConfig,
pub(crate) debug_cutoff_height: Option<u64>,
} }
impl HlNetworkBuilder { impl HlNetworkBuilder {
@ -203,6 +205,7 @@ where
pool: Pool, pool: Pool,
) -> eyre::Result<Self::Network> { ) -> eyre::Result<Self::Network> {
let block_source_config = self.block_source_config.clone(); let block_source_config = self.block_source_config.clone();
let debug_cutoff_height = self.debug_cutoff_height;
let handle = let handle =
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool); ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
let local_node_record = handle.local_node_record(); let local_node_record = handle.local_node_record();
@ -223,6 +226,7 @@ where
block_source_config block_source_config
.create_cached_block_source((*chain_spec).clone(), next_block_number) .create_cached_block_source((*chain_spec).clone(), next_block_number)
.await, .await,
debug_cutoff_height,
) )
.await .await
.unwrap(); .unwrap();

View File

@ -6,12 +6,12 @@
//! Ethereum transaction pool only supports TransactionSigned (EthereumTxEnvelope<TxEip4844>), //! Ethereum transaction pool only supports TransactionSigned (EthereumTxEnvelope<TxEip4844>),
//! hence this placeholder for the transaction pool. //! hence this placeholder for the transaction pool.
use crate::node::{primitives::TransactionSigned, HlNode}; use crate::node::{HlNode, primitives::TransactionSigned};
use alloy_consensus::{ use alloy_consensus::{
error::ValueError, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844, EthereumTxEnvelope, Transaction as TransactionTrait, TxEip4844, error::ValueError,
}; };
use alloy_eips::{eip7702::SignedAuthorization, Typed2718}; use alloy_eips::{Typed2718, eip7702::SignedAuthorization};
use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; use alloy_primitives::{Address, B256, Bytes, ChainId, TxHash, TxKind, U256};
use alloy_rpc_types::AccessList; use alloy_rpc_types::AccessList;
use reth::{ use reth::{
api::FullNodeTypes, builder::components::PoolBuilder, transaction_pool::PoolTransaction, api::FullNodeTypes, builder::components::PoolBuilder, transaction_pool::PoolTransaction,
@ -19,7 +19,7 @@ use reth::{
use reth_ethereum_primitives::PooledTransactionVariant; use reth_ethereum_primitives::PooledTransactionVariant;
use reth_primitives::Recovered; use reth_primitives::Recovered;
use reth_primitives_traits::InMemorySize; use reth_primitives_traits::InMemorySize;
use reth_transaction_pool::{noop::NoopTransactionPool, EthPoolTransaction}; use reth_transaction_pool::{EthPoolTransaction, noop::NoopTransactionPool};
use std::sync::Arc; use std::sync::Arc;
pub struct HlPoolBuilder; pub struct HlPoolBuilder;

View File

@ -0,0 +1,49 @@
use super::{HlBlockBody, HlHeader, rlp};
use alloy_rlp::Encodable;
use reth_primitives_traits::{Block, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: HlHeader,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = HlHeader;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}

View File

@ -0,0 +1,77 @@
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::{BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
use crate::{HlHeader, node::primitives::TransactionSigned};
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned, HlHeader>;
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size()
+ self
.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>())
+ self
.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = super::HlHeader;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}

View File

@ -0,0 +1,241 @@
use alloy_consensus::Header;
use alloy_primitives::{Address, B64, B256, BlockNumber, Bloom, Bytes, Sealable, U256};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use reth_cli_commands::common::CliHeader;
use reth_codecs::Compact;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives::{SealedHeader, logs_bloom};
use reth_primitives_traits::{BlockHeader, InMemorySize, serde_bincode_compat::RlpBincode};
use reth_rpc_convert::transaction::FromConsensusHeader;
use serde::{Deserialize, Serialize};
/// The header type of this node
///
/// This type extends the regular ethereum header with an extension.
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
derive_more::AsRef,
derive_more::Deref,
Default,
RlpEncodable,
RlpDecodable,
Serialize,
Deserialize,
)]
#[serde(rename_all = "camelCase")]
pub struct HlHeader {
/// The regular eth header
#[as_ref]
#[deref]
pub inner: Header,
/// The extended header fields that is not part of the block hash
pub extras: HlHeaderExtras,
}
#[derive(
Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Hash,
)]
pub struct HlHeaderExtras {
pub logs_bloom_with_system_txs: Bloom,
pub system_tx_count: u64,
}
impl HlHeader {
pub(crate) fn from_ethereum_header(header: Header, receipts: &[EthereumReceipt], system_tx_count: u64) -> HlHeader {
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs));
HlHeader {
inner: header,
extras: HlHeaderExtras { logs_bloom_with_system_txs: logs_bloom, system_tx_count },
}
}
}
impl From<Header> for HlHeader {
fn from(_value: Header) -> Self {
unreachable!()
}
}
impl AsRef<Self> for HlHeader {
fn as_ref(&self) -> &Self {
self
}
}
impl Sealable for HlHeader {
fn hash_slow(&self) -> B256 {
self.inner.hash_slow()
}
}
impl alloy_consensus::BlockHeader for HlHeader {
fn parent_hash(&self) -> B256 {
self.inner.parent_hash()
}
fn ommers_hash(&self) -> B256 {
self.inner.ommers_hash()
}
fn beneficiary(&self) -> Address {
self.inner.beneficiary()
}
fn state_root(&self) -> B256 {
self.inner.state_root()
}
fn transactions_root(&self) -> B256 {
self.inner.transactions_root()
}
fn receipts_root(&self) -> B256 {
self.inner.receipts_root()
}
fn withdrawals_root(&self) -> Option<B256> {
self.inner.withdrawals_root()
}
fn logs_bloom(&self) -> Bloom {
self.extras.logs_bloom_with_system_txs
}
fn difficulty(&self) -> U256 {
self.inner.difficulty()
}
fn number(&self) -> BlockNumber {
self.inner.number()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_used(&self) -> u64 {
self.inner.gas_used()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn mix_hash(&self) -> Option<B256> {
self.inner.mix_hash()
}
fn nonce(&self) -> Option<B64> {
self.inner.nonce()
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.inner.base_fee_per_gas()
}
fn blob_gas_used(&self) -> Option<u64> {
self.inner.blob_gas_used()
}
fn excess_blob_gas(&self) -> Option<u64> {
self.inner.excess_blob_gas()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.inner.parent_beacon_block_root()
}
fn requests_hash(&self) -> Option<B256> {
self.inner.requests_hash()
}
fn extra_data(&self) -> &Bytes {
self.inner.extra_data()
}
fn is_empty(&self) -> bool {
self.extras.system_tx_count == 0 && self.inner.is_empty()
}
}
impl InMemorySize for HlHeader {
fn size(&self) -> usize {
self.inner.size() + self.extras.size()
}
}
impl InMemorySize for HlHeaderExtras {
fn size(&self) -> usize {
self.logs_bloom_with_system_txs.data().len() + self.system_tx_count.size()
}
}
impl reth_codecs::Compact for HlHeader {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: alloy_rlp::bytes::BufMut + AsMut<[u8]>,
{
// Because Header ends with extra_data which is `Bytes`, we can't use to_compact for extras,
// because Compact trait requires the Bytes field to be placed at the end of the struct.
// Bytes::from_compact just reads all trailing data as the Bytes field.
//
// Hence we need to use other form of serialization, since extra headers are not Compact-compatible.
// We just treat all header fields as rmp-serialized one `Bytes` field.
let result: Bytes = rmp_serde::to_vec(&self).unwrap().into();
result.to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (bytes, remaining) = Bytes::from_compact(buf, len);
let header: HlHeader = rmp_serde::from_slice(&bytes).unwrap();
(header, remaining)
}
}
impl reth_db_api::table::Compress for HlHeader {
type Compressed = Vec<u8>;
fn compress_to_buf<B: alloy_primitives::bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl reth_db_api::table::Decompress for HlHeader {
fn decompress(value: &[u8]) -> Result<Self, reth_db_api::DatabaseError> {
let (obj, _) = Compact::from_compact(value, value.len());
Ok(obj)
}
}
impl BlockHeader for HlHeader {}
impl RlpBincode for HlHeader {}
impl CliHeader for HlHeader {
fn set_number(&mut self, number: u64) {
self.inner.set_number(number);
}
}
impl From<HlHeader> for Header {
fn from(value: HlHeader) -> Self {
value.inner
}
}
pub fn to_ethereum_ommers(ommers: &[HlHeader]) -> Vec<Header> {
ommers.iter().map(|ommer| ommer.clone().into()).collect()
}
impl FromConsensusHeader<HlHeader> for alloy_rpc_types::Header {
fn from_consensus_header(header: SealedHeader<HlHeader>, block_size: usize) -> Self {
FromConsensusHeader::<Header>::from_consensus_header(
SealedHeader::<Header>::new(header.inner.clone(), header.hash()),
block_size,
)
}
}

View File

@ -1,17 +1,18 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::{BlobTransactionSidecar, Header};
use alloy_primitives::Address;
use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable};
use reth_ethereum_primitives::Receipt; use reth_ethereum_primitives::Receipt;
use reth_primitives::NodePrimitives; use reth_primitives::NodePrimitives;
use reth_primitives_traits::{Block, BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls}; pub mod transaction;
pub use transaction::TransactionSigned;
pub mod tx_wrapper; pub mod block;
pub use tx_wrapper::{BlockBody, TransactionSigned}; pub use block::HlBlock;
pub mod body;
pub use body::{BlockBody, HlBlockBody};
pub mod header;
pub use header::HlHeader;
pub mod rlp;
pub mod serde_bincode_compat;
/// Primitive types for HyperEVM. /// Primitive types for HyperEVM.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
@ -20,321 +21,8 @@ pub struct HlPrimitives;
impl NodePrimitives for HlPrimitives { impl NodePrimitives for HlPrimitives {
type Block = HlBlock; type Block = HlBlock;
type BlockHeader = Header; type BlockHeader = HlHeader;
type BlockBody = HlBlockBody; type BlockBody = HlBlockBody;
type SignedTx = TransactionSigned; type SignedTx = TransactionSigned;
type Receipt = Receipt; type Receipt = Receipt;
} }
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
#[derive(
Debug,
Clone,
Default,
PartialEq,
Eq,
Serialize,
Deserialize,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct HlBlockBody {
#[serde(flatten)]
#[deref]
#[deref_mut]
pub inner: BlockBody,
pub sidecars: Option<Vec<BlobTransactionSidecar>>,
pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>,
}
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size() +
self.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>()) +
self.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}
}
impl BlockBodyTrait for HlBlockBody {
type Transaction = TransactionSigned;
type OmmerHeader = Header;
fn transactions(&self) -> &[Self::Transaction] {
BlockBodyTrait::transactions(&self.inner)
}
fn into_ethereum_body(self) -> BlockBody {
self.inner
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.inner.into_transactions()
}
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
self.inner.withdrawals()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
self.inner.ommers()
}
fn calculate_tx_root(&self) -> alloy_primitives::B256 {
alloy_consensus::proofs::calculate_transaction_root(
&self
.transactions()
.iter()
.filter(|tx| !tx.is_system_transaction())
.collect::<Vec<_>>(),
)
}
}
/// Block for HL
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct HlBlock {
pub header: Header,
pub body: HlBlockBody,
}
impl InMemorySize for HlBlock {
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl Block for HlBlock {
type Header = Header;
type Body = HlBlockBody;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
rlp::BlockHelper {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(&body.inner.transactions),
ommers: Cow::Borrowed(&body.inner.ommers),
withdrawals: body.inner.withdrawals.as_ref().map(Cow::Borrowed),
sidecars: body.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: body.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: body.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
.length()
}
}
mod rlp {
use super::*;
use alloy_eips::eip4895::Withdrawals;
use alloy_rlp::Decodable;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<Header>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, Header>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<Header>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}
}
pub mod serde_bincode_compat {
use super::*;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, Header>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self
.highest_precompile_address
.as_ref()
.map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: Header::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}
}

142
src/node/primitives/rlp.rs Normal file
View File

@ -0,0 +1,142 @@
#![allow(clippy::owned_cow)]
use super::{HlBlock, HlBlockBody, TransactionSigned};
use crate::{node::types::ReadPrecompileCalls, HlHeader};
use alloy_consensus::{BlobTransactionSidecar, BlockBody};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::Address;
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use std::borrow::Cow;
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
struct BlockBodyHelper<'a> {
transactions: Cow<'a, Vec<TransactionSigned>>,
ommers: Cow<'a, Vec<HlHeader>>,
withdrawals: Option<Cow<'a, Withdrawals>>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(RlpEncodable, RlpDecodable)]
#[rlp(trailing)]
pub(crate) struct BlockHelper<'a> {
pub(crate) header: Cow<'a, HlHeader>,
pub(crate) transactions: Cow<'a, Vec<TransactionSigned>>,
pub(crate) ommers: Cow<'a, Vec<HlHeader>>,
pub(crate) withdrawals: Option<Cow<'a, Withdrawals>>,
pub(crate) sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
pub(crate) read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
pub(crate) highest_precompile_address: Option<Cow<'a, Address>>,
}
impl<'a> From<&'a HlBlockBody> for BlockBodyHelper<'a> {
fn from(value: &'a HlBlockBody) -> Self {
let HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
} = value;
Self {
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl<'a> From<&'a HlBlock> for BlockHelper<'a> {
fn from(value: &'a HlBlock) -> Self {
let HlBlock {
header,
body:
HlBlockBody {
inner: BlockBody { transactions, ommers, withdrawals },
sidecars,
read_precompile_calls,
highest_precompile_address,
},
} = value;
Self {
header: Cow::Borrowed(header),
transactions: Cow::Borrowed(transactions),
ommers: Cow::Borrowed(ommers),
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
sidecars: sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
}
impl Encodable for HlBlockBody {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockBodyHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockBodyHelper::from(self).length()
}
}
impl Decodable for HlBlockBody {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockBodyHelper {
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockBodyHelper::decode(buf)?;
Ok(Self {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
})
}
}
impl Encodable for HlBlock {
fn encode(&self, out: &mut dyn bytes::BufMut) {
BlockHelper::from(self).encode(out);
}
fn length(&self) -> usize {
BlockHelper::from(self).length()
}
}
impl Decodable for HlBlock {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let BlockHelper {
header,
transactions,
ommers,
withdrawals,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = BlockHelper::decode(buf)?;
Ok(Self {
header: header.into_owned(),
body: HlBlockBody {
inner: BlockBody {
transactions: transactions.into_owned(),
ommers: ommers.into_owned(),
withdrawals: withdrawals.map(|w| w.into_owned()),
},
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
},
})
}
}

View File

@ -0,0 +1,64 @@
#![allow(clippy::owned_cow)]
use alloy_consensus::BlobTransactionSidecar;
use alloy_primitives::Address;
use reth_primitives_traits::serde_bincode_compat::{BincodeReprFor, SerdeBincodeCompat};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use super::{HlBlock, HlBlockBody};
use crate::{node::{primitives::BlockBody, types::ReadPrecompileCalls}, HlHeader};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {
inner: BincodeReprFor<'a, BlockBody>,
sidecars: Option<Cow<'a, Vec<BlobTransactionSidecar>>>,
read_precompile_calls: Option<Cow<'a, ReadPrecompileCalls>>,
highest_precompile_address: Option<Cow<'a, Address>>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBincode<'a> {
header: BincodeReprFor<'a, HlHeader>,
body: BincodeReprFor<'a, HlBlockBody>,
}
impl SerdeBincodeCompat for HlBlockBody {
type BincodeRepr<'a> = HlBlockBodyBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBodyBincode {
inner: self.inner.as_repr(),
sidecars: self.sidecars.as_ref().map(Cow::Borrowed),
read_precompile_calls: self.read_precompile_calls.as_ref().map(Cow::Borrowed),
highest_precompile_address: self.highest_precompile_address.as_ref().map(Cow::Borrowed),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBodyBincode {
inner,
sidecars,
read_precompile_calls,
highest_precompile_address,
} = repr;
Self {
inner: BlockBody::from_repr(inner),
sidecars: sidecars.map(|s| s.into_owned()),
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
highest_precompile_address: highest_precompile_address.map(|s| s.into_owned()),
}
}
}
impl SerdeBincodeCompat for HlBlock {
type BincodeRepr<'a> = HlBlockBincode<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
HlBlockBincode { header: self.header.as_repr(), body: self.body.as_repr() }
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
let HlBlockBincode { header, body } = repr;
Self { header: HlHeader::from_repr(header), body: HlBlockBody::from_repr(body) }
}
}

View File

@ -1,33 +1,35 @@
//! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type, //! HlNodePrimitives::TransactionSigned; it's the same as ethereum transaction type,
//! except that it supports pseudo signer for system transactions. //! except that it supports pseudo signer for system transactions.
use std::convert::Infallible;
use crate::evm::transaction::HlTxEnv;
use alloy_consensus::{ use alloy_consensus::{
crypto::RecoveryError, error::ValueError, EthereumTxEnvelope, EthereumTypedTransaction,
SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559, SignableTransaction, Signed, Transaction as TransactionTrait, TransactionEnvelope, TxEip1559,
TxEip2930, TxEip4844, TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, TypedTransaction, TxEip2930, TxEip4844, TxEip7702, TxLegacy, TxType, TypedTransaction, crypto::RecoveryError,
error::ValueError, transaction::TxHashRef,
}; };
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Encodable2718}; use alloy_eips::Encodable2718;
use alloy_network::TxSigner; use alloy_network::TxSigner;
use alloy_primitives::{address, Address, TxHash, U256}; use alloy_primitives::{Address, TxHash, U256, address};
use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest}; use alloy_rpc_types::{Transaction, TransactionInfo, TransactionRequest};
use alloy_signer::Signature; use alloy_signer::Signature;
use reth_codecs::alloy::transaction::FromTxCompact; use reth_codecs::alloy::transaction::{Envelope, FromTxCompact};
use reth_db::{ use reth_db::{
table::{Compress, Decompress},
DatabaseError, DatabaseError,
table::{Compress, Decompress},
}; };
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_evm::FromRecoveredTx; use reth_evm::FromRecoveredTx;
use reth_primitives::Recovered; use reth_primitives::Recovered;
use reth_primitives_traits::{ use reth_primitives_traits::{
serde_bincode_compat::SerdeBincodeCompat, InMemorySize, SignedTransaction, SignerRecoverable, InMemorySize, SignedTransaction, SignerRecoverable, serde_bincode_compat::SerdeBincodeCompat,
}; };
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
transaction::{FromConsensusTx, TryIntoTxEnv},
EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx,
transaction::{FromConsensusTx, TryIntoTxEnv},
}; };
use revm::context::{BlockEnv, CfgEnv, TxEnv}; use revm::context::{BlockEnv, CfgEnv, TxEnv};
use crate::evm::transaction::HlTxEnv;
type InnerType = alloy_consensus::EthereumTxEnvelope<TxEip4844>; type InnerType = alloy_consensus::EthereumTxEnvelope<TxEip4844>;
#[derive(Debug, Clone, TransactionEnvelope)] #[derive(Debug, Clone, TransactionEnvelope)]
@ -46,6 +48,12 @@ fn s_to_address(s: U256) -> Address {
Address::from_slice(&buf) Address::from_slice(&buf)
} }
impl TxHashRef for TransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
}
impl SignerRecoverable for TransactionSigned { impl SignerRecoverable for TransactionSigned {
fn recover_signer(&self) -> Result<Address, RecoveryError> { fn recover_signer(&self) -> Result<Address, RecoveryError> {
if self.is_system_transaction() { if self.is_system_transaction() {
@ -69,11 +77,7 @@ impl SignerRecoverable for TransactionSigned {
} }
} }
impl SignedTransaction for TransactionSigned { impl SignedTransaction for TransactionSigned {}
fn tx_hash(&self) -> &TxHash {
self.inner().tx_hash()
}
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// NOTE: All lines below are just wrappers for the inner type. // NOTE: All lines below are just wrappers for the inner type.
@ -157,16 +161,8 @@ impl TransactionSigned {
} }
} }
pub fn signature(&self) -> &Signature {
self.inner().signature()
}
pub const fn tx_type(&self) -> TxType {
self.inner().tx_type()
}
pub fn is_system_transaction(&self) -> bool { pub fn is_system_transaction(&self) -> bool {
self.gas_price().is_some() && self.gas_price().unwrap() == 0 matches!(self.gas_price(), Some(0))
} }
} }
@ -185,26 +181,16 @@ impl SerdeBincodeCompat for TransactionSigned {
} }
} }
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>; impl TryFrom<TransactionSigned> for PooledTransactionVariant {
type Error = <InnerType as TryInto<PooledTransactionVariant>>::Error;
impl TryFrom<TransactionSigned>
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
{
type Error = <InnerType as TryInto<
EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
>>::Error;
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> { fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
value.into_inner().try_into() value.into_inner().try_into()
} }
} }
impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>> impl From<PooledTransactionVariant> for TransactionSigned {
for TransactionSigned fn from(value: PooledTransactionVariant) -> Self {
{
fn from(
value: EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
) -> Self {
Self::Default(value.into()) Self::Default(value.into())
} }
} }
@ -212,10 +198,6 @@ impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>
impl Compress for TransactionSigned { impl Compress for TransactionSigned {
type Compressed = Vec<u8>; type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
self.into_inner().compress()
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) { fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
self.inner().compress_to_buf(buf); self.inner().compress_to_buf(buf);
} }
@ -227,22 +209,6 @@ impl Decompress for TransactionSigned {
} }
} }
pub fn convert_to_eth_block_body(value: BlockBody) -> alloy_consensus::BlockBody<InnerType> {
alloy_consensus::BlockBody {
transactions: value.transactions.into_iter().map(|tx| tx.into_inner()).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
pub fn convert_to_hl_block_body(value: alloy_consensus::BlockBody<InnerType>) -> BlockBody {
BlockBody {
transactions: value.transactions.into_iter().map(TransactionSigned::Default).collect(),
ommers: value.ommers,
withdrawals: value.withdrawals,
}
}
impl TryIntoSimTx<TransactionSigned> for TransactionRequest { impl TryIntoSimTx<TransactionSigned> for TransactionRequest {
fn try_into_sim_tx(self) -> Result<TransactionSigned, ValueError<Self>> { fn try_into_sim_tx(self) -> Result<TransactionSigned, ValueError<Self>> {
let tx = self let tx = self
@ -270,9 +236,17 @@ impl TryIntoTxEnv<HlTxEnv<TxEnv>> for TransactionRequest {
impl FromConsensusTx<TransactionSigned> for Transaction { impl FromConsensusTx<TransactionSigned> for Transaction {
type TxInfo = TransactionInfo; type TxInfo = TransactionInfo;
type Err = Infallible;
fn from_consensus_tx(tx: TransactionSigned, signer: Address, tx_info: Self::TxInfo) -> Self { fn from_consensus_tx(
Self::from_transaction(Recovered::new_unchecked(tx.into_inner().into(), signer), tx_info) tx: TransactionSigned,
signer: Address,
tx_info: Self::TxInfo,
) -> Result<Self, Self::Err> {
Ok(Self::from_transaction(
Recovered::new_unchecked(tx.into_inner().into(), signer),
tx_info,
))
} }
} }
@ -281,26 +255,7 @@ impl SignableTxRequest<TransactionSigned> for TransactionRequest {
self, self,
signer: impl TxSigner<Signature> + Send, signer: impl TxSigner<Signature> + Send,
) -> Result<TransactionSigned, SignTxRequestError> { ) -> Result<TransactionSigned, SignTxRequestError> {
let mut tx = let signed = SignableTxRequest::<InnerType>::try_build_and_sign(self, signer).await?;
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
let signed = match tx {
EthereumTypedTransaction::Legacy(tx) => {
EthereumTxEnvelope::Legacy(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip2930(tx) => {
EthereumTxEnvelope::Eip2930(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip1559(tx) => {
EthereumTxEnvelope::Eip1559(tx.into_signed(signature))
}
EthereumTypedTransaction::Eip4844(tx) => {
EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature))
}
EthereumTypedTransaction::Eip7702(tx) => {
EthereumTxEnvelope::Eip7702(tx.into_signed(signature))
}
};
Ok(TransactionSigned::Default(signed)) Ok(TransactionSigned::Default(signed))
} }
} }

View File

@ -1,17 +1,17 @@
use crate::node::rpc::HlEthApi; use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use reth::rpc::server_types::eth::{ use reth::rpc::server_types::eth::{
builder::config::PendingBlockKind, error::FromEvmError, EthApiError, PendingBlock, EthApiError, PendingBlock, builder::config::PendingBlockKind, error::FromEvmError,
}; };
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
RpcConvert,
helpers::{ helpers::{
pending_block::PendingEnvBuilder, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, pending_block::PendingEnvBuilder,
}, },
RpcConvert, RpcNodeCore,
}; };
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc> impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
@ -19,7 +19,7 @@ where
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc> impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
@ -27,9 +27,9 @@ where
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc> impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives>,
{ {
#[inline] #[inline]
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> { fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> {
@ -49,8 +49,7 @@ where
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc> impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
} }

View File

@ -1,32 +1,45 @@
use super::HlEthApi; use core::fmt;
use super::{HlEthApi, HlRpcNodeCore};
use crate::{HlBlock, node::evm::apply_precompiles};
use alloy_consensus::transaction::TxHashRef;
use alloy_evm::Evm;
use alloy_primitives::B256;
use reth::rpc::server_types::eth::EthApiError; use reth::rpc::server_types::eth::EthApiError;
use reth_evm::TxEnvFor; use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TxEnvFor};
use reth_primitives::{NodePrimitives, Recovered};
use reth_provider::{ProviderError, ProviderTx};
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
FromEvmError, RpcConvert, RpcNodeCore, FromEvmError, RpcConvert, RpcNodeCore,
helpers::{Call, EthCall},
}; };
use revm::{DatabaseCommit, context::result::ResultAndState};
impl<N> HlRpcNodeCore for N where N: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
impl<N, Rpc> EthCall for HlEthApi<N, Rpc> impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>, Rpc: RpcConvert<
{ Primitives = N::Primitives,
} Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc> Spec = SpecFor<N::Evm>,
where >,
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
{ {
} }
impl<N, Rpc> Call for HlEthApi<N, Rpc> impl<N, Rpc> Call for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>, Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{ {
#[inline] #[inline]
fn call_gas_limit(&self) -> u64 { fn call_gas_limit(&self) -> u64 {
@ -37,4 +50,75 @@ where
fn max_simulate_blocks(&self) -> u64 { fn max_simulate_blocks(&self) -> u64 {
self.inner.eth_api.max_simulate_blocks() self.inner.eth_api.max_simulate_blocks()
} }
fn transact<DB>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn transact_with_inspector<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
fn replay_transactions_until<'a, DB, I>(
&self,
db: &mut DB,
evm_env: EvmEnvFor<Self::Evm>,
transactions: I,
target_tx_hash: B256,
) -> Result<usize, Self::Error>
where
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
let mut index = 0;
for tx in transactions {
if *tx.tx_hash() == target_tx_hash {
// reached the target transaction
break;
}
let tx_env = self.evm_config().tx_env(tx);
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
index += 1;
}
Ok(index)
}
} }

View File

@ -9,7 +9,7 @@ use alloy_primitives::B256;
use alloy_rpc_types_engine::PayloadError; use alloy_rpc_types_engine::PayloadError;
use reth::{ use reth::{
api::{FullNodeComponents, NodeTypes}, api::{FullNodeComponents, NodeTypes},
builder::{rpc::PayloadValidatorBuilder, AddOnsContext}, builder::{AddOnsContext, rpc::PayloadValidatorBuilder},
}; };
use reth_engine_primitives::{ExecutionPayload, PayloadValidator}; use reth_engine_primitives::{ExecutionPayload, PayloadValidator};
use reth_payload_primitives::NewPayloadError; use reth_payload_primitives::NewPayloadError;

214
src/node/rpc/estimate.rs Normal file
View File

@ -0,0 +1,214 @@
use super::{HlEthApi, HlRpcNodeCore, apply_precompiles};
use alloy_evm::overrides::{StateOverrideError, apply_state_overrides};
use alloy_network::TransactionBuilder;
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types_eth::state::StateOverride;
use reth_chainspec::MIN_TRANSACTION_GAS;
use reth_errors::ProviderError;
use reth_evm::{ConfigureEvm, Evm, EvmEnvFor, SpecFor, TransactionEnv, TxEnvFor};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_convert::{RpcConvert, RpcTxReq};
use reth_rpc_eth_api::{
AsEthApiError, IntoEthApiError, RpcNodeCore,
helpers::{
Call,
estimate::{EstimateCall, update_estimated_gas_range},
},
};
use reth_rpc_eth_types::{
EthApiError, RevertError, RpcInvalidTransactionError,
error::{FromEvmError, api::FromEvmHalt},
};
use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO};
use reth_storage_api::StateProvider;
use revm::context_interface::{Transaction, result::ExecutionResult};
use tracing::trace;
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
where
Self: Call,
N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm> + From<StateOverrideError<ProviderError>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
// Modified version that adds `apply_precompiles`; comments are stripped out.
fn estimate_gas_with<S>(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
state: S,
state_override: Option<StateOverride>,
) -> Result<U256, Self::Error>
where
S: StateProvider,
{
evm_env.cfg_env.disable_eip3607 = true;
evm_env.cfg_env.disable_base_fee = true;
request.as_mut().take_nonce();
let tx_request_gas_limit = request.as_ref().gas_limit();
let tx_request_gas_price = request.as_ref().gas_price();
let max_gas_limit = evm_env
.cfg_env
.tx_gas_limit_cap
.map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit));
let mut highest_gas_limit = tx_request_gas_limit
.map(|mut tx_gas_limit| {
if max_gas_limit < tx_gas_limit {
tx_gas_limit = max_gas_limit;
}
tx_gas_limit
})
.unwrap_or(max_gas_limit);
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if let Some(state_override) = state_override {
apply_state_overrides(state_override, &mut db).map_err(
|err: StateOverrideError<ProviderError>| {
let eth_api_error: EthApiError = EthApiError::from(err);
Self::Error::from(eth_api_error)
},
)?;
}
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
let mut is_basic_transfer = false;
if tx_env.input().is_empty() &&
let TxKind::Call(to) = tx_env.kind() &&
let Ok(code) = db.db.account_code(&to)
{
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true);
}
if tx_env.gas_price() > 0 {
highest_gas_limit =
highest_gas_limit.min(self.caller_gas_allowance(&mut db, &evm_env, &tx_env)?);
}
tx_env.set_gas_limit(tx_env.gas_limit().min(highest_gas_limit));
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env(&mut db, evm_env);
apply_precompiles(&mut evm, &hl_extras);
if is_basic_transfer {
let mut min_tx_env = tx_env.clone();
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) &&
res.result.is_success()
{
return Ok(U256::from(MIN_TRANSACTION_GAS));
}
}
trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation");
let mut res = match evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err) {
Err(err)
if err.is_gas_too_high() &&
(tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) =>
{
return Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit);
}
Err(err) if err.is_gas_too_low() => {
return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance {
gas_limit: tx_env.gas_limit(),
}
.into_eth_err());
}
ethres => ethres?,
};
let gas_refund = match res.result {
ExecutionResult::Success { gas_refunded, .. } => gas_refunded,
ExecutionResult::Halt { reason, .. } => {
return Err(Self::Error::from_evm_halt(reason, tx_env.gas_limit()));
}
ExecutionResult::Revert { output, .. } => {
return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() {
Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit)
} else {
Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err())
};
}
};
highest_gas_limit = tx_env.gas_limit();
let mut gas_used = res.result.gas_used();
let mut lowest_gas_limit = gas_used.saturating_sub(1);
let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63;
if optimistic_gas_limit < highest_gas_limit {
let mut optimistic_tx_env = tx_env.clone();
optimistic_tx_env.set_gas_limit(optimistic_gas_limit);
res = evm.transact(optimistic_tx_env).map_err(Self::Error::from_evm_err)?;
gas_used = res.result.gas_used();
update_estimated_gas_range(
res.result,
optimistic_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
};
let mut mid_gas_limit = std::cmp::min(
gas_used * 3,
((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64,
);
trace!(target: "rpc::eth::estimate", ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas");
while lowest_gas_limit + 1 < highest_gas_limit {
if (highest_gas_limit - lowest_gas_limit) as f64 / (highest_gas_limit as f64) <
ESTIMATE_GAS_ERROR_RATIO
{
break;
};
let mut mid_tx_env = tx_env.clone();
mid_tx_env.set_gas_limit(mid_gas_limit);
match evm.transact(mid_tx_env).map_err(Self::Error::from_evm_err) {
Err(err) if err.is_gas_too_high() => {
highest_gas_limit = mid_gas_limit;
}
Err(err) if err.is_gas_too_low() => {
lowest_gas_limit = mid_gas_limit;
}
ethres => {
res = ethres?;
update_estimated_gas_range(
res.result,
mid_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
}
}
mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64;
}
Ok(U256::from(highest_gas_limit))
}
}

View File

@ -1,45 +1,58 @@
use crate::{
HlBlock, HlPrimitives,
chainspec::HlChainSpec,
node::{evm::apply_precompiles, types::HlExtras},
};
use alloy_eips::BlockId;
use alloy_evm::Evm;
use alloy_network::Ethereum; use alloy_network::Ethereum;
use alloy_primitives::U256; use alloy_primitives::U256;
use reth::{ use reth::{
api::{FullNodeTypes, HeaderTy, NodeTypes, PrimitivesTy}, api::{FullNodeTypes, HeaderTy, NodeTypes, PrimitivesTy},
builder::{ builder::{
rpc::{EthApiBuilder, EthApiCtx},
FullNodeComponents, FullNodeComponents,
rpc::{EthApiBuilder, EthApiCtx},
}, },
rpc::{ rpc::{
eth::{core::EthApiInner, DevSigner, FullEthApiServer}, eth::{DevSigner, FullEthApiServer, core::EthApiInner},
server_types::eth::{ server_types::eth::{
receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache, EthApiError, EthStateCache, FeeHistoryCache, GasPriceOracle,
GasPriceOracle, receipt::EthReceiptConverter,
}, },
}, },
tasks::{ tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner, TaskSpawner,
pool::{BlockingTaskGuard, BlockingTaskPool},
}, },
}; };
use reth_evm::ConfigureEvm; use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, TxEnvFor};
use reth_provider::{ChainSpecProvider, ProviderHeader, ProviderTx}; use reth_primitives::NodePrimitives;
use reth_provider::{
BlockReaderIdExt, ChainSpecProvider, ProviderError, ProviderHeader, ProviderTx,
};
use reth_rpc::RpcTypes; use reth_rpc::RpcTypes;
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
helpers::{
pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees,
EthState, LoadFee, LoadState, SpawnBlocking, Trace,
},
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt, EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
SignableTxRequest, SignableTxRequest,
helpers::{
AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, LoadState,
SpawnBlocking, Trace, pending_block::BuildPendingEnv, spec::SignersForApi,
},
}; };
use revm::context::result::ResultAndState;
use std::{fmt, marker::PhantomData, sync::Arc}; use std::{fmt, marker::PhantomData, sync::Arc};
use crate::chainspec::HlChainSpec;
mod block; mod block;
mod call; mod call;
pub mod engine_api; pub mod engine_api;
mod estimate;
pub mod precompile;
mod transaction; mod transaction;
pub trait HlRpcNodeCore: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
/// Container type `HlEthApi` /// Container type `HlEthApi`
pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> { pub(crate) struct HlEthApiInner<N: HlRpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components. /// Gateway to node's core components.
pub(crate) eth_api: EthApiInner<N, Rpc>, pub(crate) eth_api: EthApiInner<N, Rpc>,
} }
@ -47,15 +60,20 @@ pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
type HlRpcConvert<N, NetworkT> = type HlRpcConvert<N, NetworkT> =
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>; RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
#[derive(Clone)] pub struct HlEthApi<N: HlRpcNodeCore, Rpc: RpcConvert> {
pub struct HlEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components. /// Gateway to node's core components.
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>, pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
} }
impl<N: HlRpcNodeCore, Rpc: RpcConvert> Clone for HlEthApi<N, Rpc> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc> impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -65,8 +83,8 @@ where
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc> impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives>,
{ {
type Error = EthApiError; type Error = EthApiError;
type NetworkTypes = Rpc::Network; type NetworkTypes = Rpc::Network;
@ -79,7 +97,7 @@ where
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc> impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>, Rpc: RpcConvert<Primitives = N::Primitives>,
{ {
type Primitives = N::Primitives; type Primitives = N::Primitives;
@ -111,7 +129,7 @@ where
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc> impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
#[inline] #[inline]
@ -122,7 +140,7 @@ where
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc> impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
type Transaction = ProviderTx<Self::Provider>; type Transaction = ProviderTx<Self::Provider>;
@ -141,8 +159,8 @@ where
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc> impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives>,
{ {
#[inline] #[inline]
fn io_task_spawner(&self) -> impl TaskSpawner { fn io_task_spawner(&self) -> impl TaskSpawner {
@ -162,7 +180,7 @@ where
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc> impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
@ -179,15 +197,17 @@ where
impl<N, Rpc> LoadState for HlEthApi<N, Rpc> impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{ {
} }
impl<N, Rpc> EthState for HlEthApi<N, Rpc> impl<N, Rpc> EthState for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
Self: LoadPendingBlock,
{ {
#[inline] #[inline]
fn max_proof_window(&self) -> u64 { fn max_proof_window(&self) -> u64 {
@ -197,7 +217,7 @@ where
impl<N, Rpc> EthFees for HlEthApi<N, Rpc> impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
@ -205,15 +225,50 @@ where
impl<N, Rpc> Trace for HlEthApi<N, Rpc> impl<N, Rpc> Trace for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
EthApiError: FromEvmError<N::Evm>, EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
fn inspect<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError>,
I: InspectorFor<Self::Evm, DB>,
{
let block_number = evm_env.block_env().number;
let hl_extras = self.get_hl_extras(block_number.to::<u64>().into())?;
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
apply_precompiles(&mut evm, &hl_extras);
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
}
}
impl<N, Rpc> HlEthApi<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
fn get_hl_extras(&self, block: BlockId) -> Result<HlExtras, ProviderError> {
Ok(self
.provider()
.block_by_id(block)?
.map(|block| HlExtras {
read_precompile_calls: block.body.read_precompile_calls.clone(),
highest_precompile_address: block.body.highest_precompile_address,
})
.unwrap_or_default())
}
} }
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc> impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert< Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>, Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>, >,
@ -239,7 +294,7 @@ impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT> impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
where where
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>> N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>>
+ RpcNodeCore< + RpcNodeCore<
Primitives = PrimitivesTy<N::Types>, Primitives = PrimitivesTy<N::Types>,
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>, Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,

View File

@ -0,0 +1,44 @@
use alloy_eips::BlockId;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee_core::{RpcResult, async_trait};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_types::EthApiError;
use tracing::trace;
use crate::node::{
rpc::{HlEthApi, HlRpcNodeCore},
types::HlExtras,
};
/// A custom RPC trait for fetching block precompile data.
#[rpc(server, namespace = "eth")]
#[async_trait]
pub trait HlBlockPrecompileApi {
/// Fetches precompile data for a given block.
#[method(name = "blockPrecompileData")]
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras>;
}
pub struct HlBlockPrecompileExt<N: HlRpcNodeCore, Rpc: RpcConvert> {
eth_api: HlEthApi<N, Rpc>,
}
impl<N: HlRpcNodeCore, Rpc: RpcConvert> HlBlockPrecompileExt<N, Rpc> {
/// Creates a new instance of the [`HlBlockPrecompileExt`].
pub fn new(eth_api: HlEthApi<N, Rpc>) -> Self {
Self { eth_api }
}
}
#[async_trait]
impl<N, Rpc> HlBlockPrecompileApiServer for HlBlockPrecompileExt<N, Rpc>
where
N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
async fn block_precompile_data(&self, block: BlockId) -> RpcResult<HlExtras> {
trace!(target: "rpc::eth", ?block, "Serving eth_blockPrecompileData");
let hl_extras = self.eth_api.get_hl_extras(block).map_err(EthApiError::from)?;
Ok(hl_extras)
}
}

View File

@ -1,21 +1,23 @@
use crate::node::rpc::HlEthApi; use std::time::Duration;
use alloy_primitives::{Bytes, B256};
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
use alloy_primitives::{B256, Bytes};
use reth::rpc::server_types::eth::EthApiError; use reth::rpc::server_types::eth::EthApiError;
use reth_rpc_eth_api::{ use reth_rpc_eth_api::{
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, RpcConvert,
RpcConvert, RpcNodeCore, helpers::{EthTransactions, LoadTransaction, spec::SignersForRpc},
}; };
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc> impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
} }
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc> impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
where where
N: RpcNodeCore, N: HlRpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>, Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{ {
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> { fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
@ -25,4 +27,8 @@ where
async fn send_raw_transaction(&self, _tx: Bytes) -> Result<B256, Self::Error> { async fn send_raw_transaction(&self, _tx: Bytes) -> Result<B256, Self::Error> {
unreachable!() unreachable!()
} }
fn send_raw_transaction_sync_timeout(&self) -> Duration {
self.inner.eth_api.send_raw_transaction_sync_timeout()
}
} }

106
src/node/spot_meta/init.rs Normal file
View File

@ -0,0 +1,106 @@
use crate::node::{
spot_meta::{SpotId, erc20_contract_to_spot_token},
storage::tables::{self, SPOT_METADATA_KEY},
types::reth_compat,
};
use alloy_primitives::{Address, Bytes};
use reth_db::{DatabaseEnv, cursor::{DbCursorRO, DbCursorRW}};
use reth_db_api::{Database, transaction::{DbTx, DbTxMut}};
use std::{collections::BTreeMap, sync::Arc};
use tracing::info;
/// Load spot metadata from database and initialize cache
pub fn load_spot_metadata_cache(db: &Arc<DatabaseEnv>, chain_id: u64) {
// Try to read from database
let data = match db.view(|tx| -> Result<Option<Vec<u8>>, reth_db::DatabaseError> {
let mut cursor = tx.cursor_read::<tables::SpotMetadata>()?;
Ok(cursor.seek_exact(SPOT_METADATA_KEY)?.map(|(_, data)| data.to_vec()))
}) {
Ok(Ok(data)) => data,
Ok(Err(e)) => {
info!("Failed to read spot metadata from database: {}. Will fetch on-demand from API.", e);
return;
}
Err(e) => {
info!("Database view error while loading spot metadata: {}. Will fetch on-demand from API.", e);
return;
}
};
// Check if data exists
let Some(data) = data else {
info!(
"No spot metadata found in database for chain {}. Run 'init-state' to populate, or it will be fetched on-demand from API.",
chain_id
);
return;
};
// Deserialize metadata
let serializable_map = match rmp_serde::from_slice::<BTreeMap<Address, u64>>(&data) {
Ok(map) => map,
Err(e) => {
info!("Failed to deserialize spot metadata: {}. Will fetch on-demand from API.", e);
return;
}
};
// Convert and initialize cache
let metadata: BTreeMap<Address, SpotId> = serializable_map
.into_iter()
.map(|(addr, index)| (addr, SpotId { index }))
.collect();
info!("Loaded spot metadata from database ({} entries)", metadata.len());
reth_compat::initialize_spot_metadata_cache(metadata);
}
/// Initialize spot metadata in database from API
pub fn init_spot_metadata(
db_path: impl AsRef<std::path::Path>,
db_args: reth_db::mdbx::DatabaseArguments,
chain_id: u64,
) -> eyre::Result<()> {
info!("Initializing spot metadata for chain {}", chain_id);
let db = Arc::new(reth_db::open_db(db_path.as_ref(), db_args)?);
// Check if spot metadata already exists
let exists = db.view(|tx| -> Result<bool, reth_db::DatabaseError> {
let mut cursor = tx.cursor_read::<tables::SpotMetadata>()?;
Ok(cursor.seek_exact(SPOT_METADATA_KEY)?.is_some())
})??;
if exists {
info!("Spot metadata already exists in database");
return Ok(());
}
// Fetch from API
let metadata = match erc20_contract_to_spot_token(chain_id) {
Ok(m) => m,
Err(e) => {
info!("Failed to fetch spot metadata from API: {}. Will be fetched on-demand.", e);
return Ok(());
}
};
// Serialize and store
let serializable_map: BTreeMap<Address, u64> =
metadata.iter().map(|(addr, spot)| (*addr, spot.index)).collect();
db.update(|tx| -> Result<(), reth_db::DatabaseError> {
let mut cursor = tx.cursor_write::<tables::SpotMetadata>()?;
cursor.upsert(
SPOT_METADATA_KEY,
&Bytes::from(
rmp_serde::to_vec(&serializable_map)
.expect("Failed to serialize spot metadata"),
),
)?;
Ok(())
})??;
info!("Successfully fetched and stored spot metadata for chain {}", chain_id);
Ok(())
}

View File

@ -5,6 +5,9 @@ use std::collections::BTreeMap;
use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID}; use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID};
pub mod init;
mod patch;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
struct EvmContract { struct EvmContract {
address: Address, address: Address,
@ -23,7 +26,7 @@ pub struct SpotMeta {
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) struct SpotId { pub struct SpotId {
pub index: u64, pub index: u64,
} }
@ -58,5 +61,10 @@ pub(crate) fn erc20_contract_to_spot_token(chain_id: u64) -> Result<BTreeMap<Add
map.insert(evm_contract.address, SpotId { index: token.index }); map.insert(evm_contract.address, SpotId { index: token.index });
} }
} }
if chain_id == TESTNET_CHAIN_ID {
patch::patch_testnet_spot_meta(&mut map);
}
Ok(map) Ok(map)
} }

View File

@ -0,0 +1,8 @@
use crate::node::spot_meta::SpotId;
use alloy_primitives::{Address, address};
use std::collections::BTreeMap;
/// Testnet-specific fix for #67
pub(super) fn patch_testnet_spot_meta(map: &mut BTreeMap<Address, SpotId>) {
map.insert(address!("0xd9cbec81df392a88aeff575e962d149d57f4d6bc"), SpotId { index: 0 });
}

View File

@ -1,29 +1,27 @@
use crate::{ use crate::{
node::{ HlBlock, HlBlockBody, HlHeader, HlPrimitives,
primitives::tx_wrapper::{convert_to_eth_block_body, convert_to_hl_block_body}, node::{primitives::TransactionSigned, types::HlExtras},
types::HlExtras,
},
HlBlock, HlBlockBody, HlPrimitives,
}; };
use alloy_consensus::BlockHeader; use alloy_consensus::BlockHeader;
use alloy_primitives::Bytes; use alloy_primitives::Bytes;
use reth_chainspec::EthereumHardforks; use reth_chainspec::EthereumHardforks;
use reth_db::{ use reth_db::{
DbTxUnwindExt,
cursor::{DbCursorRO, DbCursorRW}, cursor::{DbCursorRO, DbCursorRW},
transaction::{DbTx, DbTxMut}, transaction::{DbTx, DbTxMut},
DbTxUnwindExt,
}; };
use reth_primitives_traits::Block;
use reth_provider::{ use reth_provider::{
providers::{ChainStorage, NodeTypesForProvider},
BlockBodyReader, BlockBodyWriter, ChainSpecProvider, ChainStorageReader, ChainStorageWriter, BlockBodyReader, BlockBodyWriter, ChainSpecProvider, ChainStorageReader, ChainStorageWriter,
DBProvider, DatabaseProvider, EthStorage, ProviderResult, ReadBodyInput, StorageLocation, DBProvider, DatabaseProvider, EthStorage, ProviderResult, ReadBodyInput, StorageLocation,
providers::{ChainStorage, NodeTypesForProvider},
}; };
pub mod tables; pub mod tables;
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
#[non_exhaustive] #[non_exhaustive]
pub struct HlStorage(EthStorage); pub struct HlStorage(EthStorage<TransactionSigned, HlHeader>);
impl HlStorage { impl HlStorage {
fn write_precompile_calls<Provider>( fn write_precompile_calls<Provider>(
@ -89,30 +87,17 @@ where
let mut read_precompile_calls = Vec::with_capacity(bodies.len()); let mut read_precompile_calls = Vec::with_capacity(bodies.len());
for (block_number, body) in bodies { for (block_number, body) in bodies {
match body { let (inner_opt, extras) = match body {
Some(HlBlockBody { Some(HlBlockBody {
inner, inner,
sidecars: _, sidecars: _,
read_precompile_calls: rpc, read_precompile_calls,
highest_precompile_address, highest_precompile_address,
}) => { }) => (Some(inner), HlExtras { read_precompile_calls, highest_precompile_address }),
eth_bodies.push((block_number, Some(convert_to_eth_block_body(inner)))); None => Default::default(),
read_precompile_calls.push(( };
block_number, eth_bodies.push((block_number, inner_opt));
HlExtras { read_precompile_calls: rpc, highest_precompile_address }, read_precompile_calls.push((block_number, extras));
));
}
None => {
eth_bodies.push((block_number, None));
read_precompile_calls.push((
block_number,
HlExtras {
read_precompile_calls: Default::default(),
highest_precompile_address: None,
},
));
}
}
} }
self.0.write_block_bodies(provider, eth_bodies, write_to)?; self.0.write_block_bodies(provider, eth_bodies, write_to)?;
@ -146,22 +131,16 @@ where
inputs: Vec<ReadBodyInput<'_, Self::Block>>, inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<HlBlockBody>> { ) -> ProviderResult<Vec<HlBlockBody>> {
let read_precompile_calls = self.read_precompile_calls(provider, &inputs)?; let read_precompile_calls = self.read_precompile_calls(provider, &inputs)?;
let eth_bodies = self.0.read_block_bodies( let inputs: Vec<(&<Self::Block as Block>::Header, _)> = inputs;
provider, let eth_bodies = self.0.read_block_bodies(provider, inputs)?;
inputs let eth_bodies: Vec<alloy_consensus::BlockBody<_, HlHeader>> = eth_bodies;
.into_iter()
.map(|(header, transactions)| {
(header, transactions.into_iter().map(|tx| tx.into_inner()).collect())
})
.collect(),
)?;
// NOTE: sidecars are not used in HyperEVM yet. // NOTE: sidecars are not used in HyperEVM yet.
Ok(eth_bodies Ok(eth_bodies
.into_iter() .into_iter()
.zip(read_precompile_calls) .zip(read_precompile_calls)
.map(|(inner, extra)| HlBlockBody { .map(|(inner, extra)| HlBlockBody {
inner: convert_to_hl_block_body(inner), inner,
sidecars: None, sidecars: None,
read_precompile_calls: extra.read_precompile_calls, read_precompile_calls: extra.read_precompile_calls,
highest_precompile_address: extra.highest_precompile_address, highest_precompile_address: extra.highest_precompile_address,

View File

@ -1,11 +1,22 @@
use alloy_primitives::{BlockNumber, Bytes}; use alloy_primitives::{BlockNumber, Bytes};
use reth_db::{table::TableInfo, tables, TableSet, TableType, TableViewer}; use reth_db::{TableSet, TableType, TableViewer, table::TableInfo, tables};
use std::fmt; use std::fmt;
/// Static key used for spot metadata, as the database is unique to each chain.
/// This may later serve as a versioning key to assist with future database migrations.
pub const SPOT_METADATA_KEY: u64 = 0;
tables! { tables! {
/// Read precompile calls for each block. /// Read precompile calls for each block.
table BlockReadPrecompileCalls { table BlockReadPrecompileCalls {
type Key = BlockNumber; type Key = BlockNumber;
type Value = Bytes; type Value = Bytes;
} }
/// Spot metadata mapping (EVM address to spot token index).
/// Uses a constant key since the database is chain-specific.
table SpotMetadata {
type Key = u64;
type Value = Bytes;
}
} }

View File

@ -2,26 +2,39 @@
//! //!
//! Changes: //! Changes:
//! - ReadPrecompileCalls supports RLP encoding / decoding //! - ReadPrecompileCalls supports RLP encoding / decoding
use alloy_primitives::{Address, Bytes, Log, B256}; use alloy_consensus::TxType;
use alloy_primitives::{Address, B256, Bytes, Log};
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::BufMut; use bytes::BufMut;
use reth_ethereum_primitives::EthereumReceipt;
use reth_primitives_traits::InMemorySize;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::HlBlock; use crate::HlBlock;
pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>); pub type ReadPrecompileCall = (Address, Vec<(ReadPrecompileInput, ReadPrecompileResult)>);
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default, Hash)]
pub struct ReadPrecompileCalls(pub Vec<ReadPrecompileCall>); pub struct ReadPrecompileCalls(pub Vec<ReadPrecompileCall>);
pub(crate) mod reth_compat; pub(crate) mod reth_compat;
// Re-export spot metadata functions
pub use reth_compat::{initialize_spot_metadata_cache, set_spot_metadata_db};
#[derive(Debug, Clone, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct HlExtras { pub struct HlExtras {
pub read_precompile_calls: Option<ReadPrecompileCalls>, pub read_precompile_calls: Option<ReadPrecompileCalls>,
pub highest_precompile_address: Option<Address>, pub highest_precompile_address: Option<Address>,
} }
impl InMemorySize for HlExtras {
fn size(&self) -> usize {
self.read_precompile_calls.as_ref().map_or(0, |s| s.0.len()) +
self.highest_precompile_address.as_ref().map_or(0, |_| 20)
}
}
impl Encodable for ReadPrecompileCalls { impl Encodable for ReadPrecompileCalls {
fn encode(&self, out: &mut dyn BufMut) { fn encode(&self, out: &mut dyn BufMut) {
let buf: Bytes = rmp_serde::to_vec(&self.0).unwrap().into(); let buf: Bytes = rmp_serde::to_vec(&self.0).unwrap().into();
@ -56,6 +69,7 @@ impl BlockAndReceipts {
self.read_precompile_calls.clone(), self.read_precompile_calls.clone(),
self.highest_precompile_address, self.highest_precompile_address,
self.system_txs.clone(), self.system_txs.clone(),
self.receipts.clone(),
chain_id, chain_id,
) )
} }
@ -84,6 +98,23 @@ pub struct LegacyReceipt {
logs: Vec<Log>, logs: Vec<Log>,
} }
impl From<LegacyReceipt> for EthereumReceipt {
fn from(r: LegacyReceipt) -> Self {
EthereumReceipt {
tx_type: match r.tx_type {
LegacyTxType::Legacy => TxType::Legacy,
LegacyTxType::Eip2930 => TxType::Eip2930,
LegacyTxType::Eip1559 => TxType::Eip1559,
LegacyTxType::Eip4844 => TxType::Eip4844,
LegacyTxType::Eip7702 => TxType::Eip7702,
},
success: r.success,
cumulative_gas_used: r.cumulative_gas_used,
logs: r.logs,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
enum LegacyTxType { enum LegacyTxType {
Legacy = 0, Legacy = 0,
@ -99,6 +130,19 @@ pub struct SystemTx {
pub receipt: Option<LegacyReceipt>, pub receipt: Option<LegacyReceipt>,
} }
impl SystemTx {
pub fn gas_limit(&self) -> u64 {
use reth_compat::Transaction;
match &self.tx {
Transaction::Legacy(tx) => tx.gas_limit,
Transaction::Eip2930(tx) => tx.gas_limit,
Transaction::Eip1559(tx) => tx.gas_limit,
Transaction::Eip4844(tx) => tx.gas_limit,
Transaction::Eip7702(tx) => tx.gas_limit,
}
}
}
#[derive( #[derive(
Debug, Debug,
Clone, Clone,
@ -117,7 +161,7 @@ pub struct ReadPrecompileInput {
pub gas_limit: u64, pub gas_limit: u64,
} }
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub enum ReadPrecompileResult { pub enum ReadPrecompileResult {
Ok { gas_used: u64, bytes: Bytes }, Ok { gas_used: u64, bytes: Bytes },
OutOfGas, OutOfGas,

View File

@ -1,21 +1,25 @@
//! Copy of reth codebase to preserve serialization compatibility //! Copy of reth codebase to preserve serialization compatibility
use crate::chainspec::TESTNET_CHAIN_ID;
use crate::node::storage::tables::{SPOT_METADATA_KEY, SpotMetadata};
use alloy_consensus::{Header, Signed, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; use alloy_consensus::{Header, Signed, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy};
use alloy_primitives::{Address, BlockHash, Signature, TxKind, U256}; use alloy_primitives::{Address, BlockHash, Bytes, Signature, TxKind, U256};
use reth_db::cursor::DbCursorRW;
use reth_db_api::{Database, transaction::DbTxMut};
use reth_primitives::TransactionSigned as RethTxSigned; use reth_primitives::TransactionSigned as RethTxSigned;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
collections::BTreeMap, collections::BTreeMap,
sync::{Arc, LazyLock, RwLock}, sync::{Arc, LazyLock, Mutex, RwLock},
}; };
use tracing::info; use tracing::info;
use crate::{ use crate::{
HlBlock, HlBlockBody, HlHeader,
node::{ node::{
primitives::TransactionSigned as TxSigned, primitives::TransactionSigned as TxSigned,
spot_meta::{erc20_contract_to_spot_token, SpotId}, spot_meta::{SpotId, erc20_contract_to_spot_token},
types::{ReadPrecompileCalls, SystemTx}, types::{LegacyReceipt, ReadPrecompileCalls, SystemTx},
}, },
HlBlock, HlBlockBody,
}; };
/// A raw transaction. /// A raw transaction.
@ -81,55 +85,119 @@ pub struct SealedBlock {
pub body: BlockBody, pub body: BlockBody,
} }
fn system_tx_to_reth_transaction(transaction: &SystemTx, chain_id: u64) -> TxSigned { static EVM_MAP: LazyLock<Arc<RwLock<BTreeMap<Address, SpotId>>>> =
static EVM_MAP: LazyLock<Arc<RwLock<BTreeMap<Address, SpotId>>>> = LazyLock::new(|| Arc::new(RwLock::new(BTreeMap::new())));
LazyLock::new(|| Arc::new(RwLock::new(BTreeMap::new())));
{
let Transaction::Legacy(tx) = &transaction.tx else {
panic!("Unexpected transaction type");
};
let TxKind::Call(to) = tx.to else {
panic!("Unexpected contract creation");
};
let s = if tx.input.is_empty() {
U256::from(0x1)
} else {
loop {
if let Some(spot) = EVM_MAP.read().unwrap().get(&to) {
break spot.to_s();
}
info!("Contract not found: {to:?} from spot mapping, fetching again..."); // Optional database handle for persisting on-demand fetches
*EVM_MAP.write().unwrap() = erc20_contract_to_spot_token(chain_id).unwrap(); static DB_HANDLE: LazyLock<Mutex<Option<Arc<reth_db::DatabaseEnv>>>> =
} LazyLock::new(|| Mutex::new(None));
};
let signature = Signature::new(U256::from(0x1), s, true); /// Set the database handle for persisting spot metadata
TxSigned::Default(RethTxSigned::Legacy(Signed::new_unhashed(tx.clone(), signature))) pub fn set_spot_metadata_db(db: Arc<reth_db::DatabaseEnv>) {
*DB_HANDLE.lock().unwrap() = Some(db);
}
/// Initialize the spot metadata cache with data loaded from database.
/// This should be called during node initialization.
pub fn initialize_spot_metadata_cache(metadata: BTreeMap<Address, SpotId>) {
*EVM_MAP.write().unwrap() = metadata;
}
/// Persist spot metadata to database if handle is available
fn persist_spot_metadata_to_db(metadata: &BTreeMap<Address, SpotId>) {
if let Some(db) = DB_HANDLE.lock().unwrap().as_ref() {
let result = db.update(|tx| -> Result<(), reth_db::DatabaseError> {
let mut cursor = tx.cursor_write::<SpotMetadata>()?;
// Serialize to BTreeMap<Address, u64>
let serializable_map: BTreeMap<Address, u64> =
metadata.iter().map(|(addr, spot)| (*addr, spot.index)).collect();
cursor.upsert(
SPOT_METADATA_KEY,
&Bytes::from(
rmp_serde::to_vec(&serializable_map)
.expect("Failed to serialize spot metadata"),
),
)?;
Ok(())
});
match result {
Ok(_) => info!("Persisted spot metadata to database"),
Err(e) => info!("Failed to persist spot metadata to database: {}", e),
}
} }
} }
fn system_tx_to_reth_transaction(transaction: &SystemTx, chain_id: u64) -> TxSigned {
let Transaction::Legacy(tx) = &transaction.tx else {
panic!("Unexpected transaction type");
};
let TxKind::Call(to) = tx.to else {
panic!("Unexpected contract creation");
};
let s = if tx.input.is_empty() {
U256::from(0x1)
} else {
loop {
if let Some(spot) = EVM_MAP.read().unwrap().get(&to) {
break spot.to_s();
}
// Cache miss - fetch from API, update cache, and persist to database
info!("Contract not found: {to:?} from spot mapping, fetching from API...");
let metadata = erc20_contract_to_spot_token(chain_id).unwrap();
*EVM_MAP.write().unwrap() = metadata.clone();
persist_spot_metadata_to_db(&metadata);
}
};
let signature = Signature::new(U256::from(0x1), s, true);
TxSigned::Default(RethTxSigned::Legacy(Signed::new_unhashed(tx.clone(), signature)))
}
impl SealedBlock { impl SealedBlock {
pub fn to_reth_block( pub fn to_reth_block(
&self, &self,
read_precompile_calls: ReadPrecompileCalls, read_precompile_calls: ReadPrecompileCalls,
highest_precompile_address: Option<Address>, highest_precompile_address: Option<Address>,
system_txs: Vec<super::SystemTx>, mut system_txs: Vec<super::SystemTx>,
receipts: Vec<LegacyReceipt>,
chain_id: u64, chain_id: u64,
) -> HlBlock { ) -> HlBlock {
// NOTE: Filter out system transactions that may be rejected by the EVM (tracked by #97,
// testnet only).
if chain_id == TESTNET_CHAIN_ID {
system_txs = system_txs.into_iter().filter(|tx| tx.receipt.is_some()).collect();
}
let mut merged_txs = vec![]; let mut merged_txs = vec![];
merged_txs.extend(system_txs.iter().map(|tx| system_tx_to_reth_transaction(tx, chain_id))); merged_txs.extend(system_txs.iter().map(|tx| system_tx_to_reth_transaction(tx, chain_id)));
merged_txs.extend(self.body.transactions.iter().map(|tx| tx.to_reth_transaction())); merged_txs.extend(self.body.transactions.iter().map(|tx| tx.to_reth_transaction()));
let mut merged_receipts = vec![];
merged_receipts.extend(system_txs.iter().map(|tx| tx.receipt.clone().unwrap().into()));
merged_receipts.extend(receipts.into_iter().map(From::from));
let block_body = HlBlockBody { let block_body = HlBlockBody {
inner: reth_primitives::BlockBody { inner: reth_primitives::BlockBody {
transactions: merged_txs, transactions: merged_txs,
withdrawals: self.body.withdrawals.clone(), withdrawals: self.body.withdrawals.clone(),
ommers: self.body.ommers.clone(), ommers: vec![],
}, },
sidecars: None, sidecars: None,
read_precompile_calls: Some(read_precompile_calls), read_precompile_calls: Some(read_precompile_calls),
highest_precompile_address, highest_precompile_address,
}; };
HlBlock { header: self.header.header.clone(), body: block_body } let system_tx_count = system_txs.len() as u64;
HlBlock {
header: HlHeader::from_ethereum_header(
self.header.header.clone(),
&merged_receipts,
system_tx_count,
),
body: block_body,
}
} }
} }

View File

@ -46,7 +46,7 @@ impl BlockSourceConfig {
.expect("home dir not found") .expect("home dir not found")
.join("hl") .join("hl")
.join("data") .join("data")
.join("evm_blocks_and_receipts"), .join("evm_block_and_receipts"),
}, },
block_source_from_node: None, block_source_from_node: None,
} }

View File

@ -12,7 +12,7 @@ pub mod utils;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tracing::info; use tracing::{error, info};
pub use cli::*; pub use cli::*;
pub use config::*; pub use config::*;
@ -37,6 +37,7 @@ pub async fn start_pseudo_peer(
chain_spec: Arc<HlChainSpec>, chain_spec: Arc<HlChainSpec>,
destination_peer: String, destination_peer: String,
block_source: BlockSourceBoxed, block_source: BlockSourceBoxed,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
let blockhash_cache = new_blockhash_cache(); let blockhash_cache = new_blockhash_cache();
@ -46,6 +47,7 @@ pub async fn start_pseudo_peer(
destination_peer, destination_peer,
block_source.clone(), block_source.clone(),
blockhash_cache.clone(), blockhash_cache.clone(),
debug_cutoff_height,
) )
.await?; .await?;
@ -78,8 +80,11 @@ pub async fn start_pseudo_peer(
_ = transaction_rx.recv() => {} _ = transaction_rx.recv() => {}
Some(eth_req) = eth_rx.recv() => { Some(eth_req) = eth_rx.recv() => {
service.process_eth_request(eth_req).await?; if let Err(e) = service.process_eth_request(eth_req).await {
info!("Processed eth request"); error!("Error processing eth request: {e:?}");
} else {
info!("Processed eth request");
}
} }
} }
} }

View File

@ -1,8 +1,8 @@
use super::service::{BlockHashCache, BlockPoller}; use super::service::{BlockHashCache, BlockPoller};
use crate::{chainspec::HlChainSpec, node::network::HlNetworkPrimitives, HlPrimitives}; use crate::{HlPrimitives, chainspec::HlChainSpec, node::network::HlNetworkPrimitives};
use reth_network::{ use reth_network::{
config::{rng_secret_key, SecretKey},
NetworkConfig, NetworkManager, PeersConfig, NetworkConfig, NetworkManager, PeersConfig,
config::{SecretKey, rng_secret_key},
}; };
use reth_network_peers::TrustedPeer; use reth_network_peers::TrustedPeer;
use reth_provider::test_utils::NoopProvider; use reth_provider::test_utils::NoopProvider;
@ -20,6 +20,7 @@ pub struct NetworkBuilder {
discovery_port: u16, discovery_port: u16,
listener_port: u16, listener_port: u16,
chain_spec: HlChainSpec, chain_spec: HlChainSpec,
debug_cutoff_height: Option<u64>,
} }
impl Default for NetworkBuilder { impl Default for NetworkBuilder {
@ -31,6 +32,7 @@ impl Default for NetworkBuilder {
discovery_port: 0, discovery_port: 0,
listener_port: 0, listener_port: 0,
chain_spec: HlChainSpec::default(), chain_spec: HlChainSpec::default(),
debug_cutoff_height: None,
} }
} }
} }
@ -46,6 +48,11 @@ impl NetworkBuilder {
self self
} }
pub fn with_debug_cutoff_height(mut self, debug_cutoff_height: Option<u64>) -> Self {
self.debug_cutoff_height = debug_cutoff_height;
self
}
pub async fn build<BS>( pub async fn build<BS>(
self, self,
block_source: Arc<Box<dyn super::sources::BlockSource>>, block_source: Arc<Box<dyn super::sources::BlockSource>>,
@ -58,8 +65,12 @@ impl NetworkBuilder {
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port)); .listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
let chain_id = self.chain_spec.inner.chain().id(); let chain_id = self.chain_spec.inner.chain().id();
let (block_poller, start_tx) = let (block_poller, start_tx) = BlockPoller::new_suspended(
BlockPoller::new_suspended(chain_id, block_source, blockhash_cache); chain_id,
block_source,
blockhash_cache,
self.debug_cutoff_height,
);
let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::< let config = builder.block_import(Box::new(block_poller)).build(Arc::new(NoopProvider::<
HlChainSpec, HlChainSpec,
HlPrimitives, HlPrimitives,
@ -77,10 +88,12 @@ pub async fn create_network_manager<BS>(
destination_peer: String, destination_peer: String,
block_source: Arc<Box<dyn super::sources::BlockSource>>, block_source: Arc<Box<dyn super::sources::BlockSource>>,
blockhash_cache: BlockHashCache, blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> { ) -> eyre::Result<(NetworkManager<HlNetworkPrimitives>, mpsc::Sender<()>)> {
NetworkBuilder::default() NetworkBuilder::default()
.with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()]) .with_boot_nodes(vec![TrustedPeer::from_str(&destination_peer).unwrap()])
.with_chain_spec(chain_spec) .with_chain_spec(chain_spec)
.with_debug_cutoff_height(debug_cutoff_height)
.build::<BS>(block_source, blockhash_cache) .build::<BS>(block_source, blockhash_cache)
.await .await
} }

View File

@ -52,12 +52,12 @@ impl BlockPoller {
chain_id: u64, chain_id: u64,
block_source: BS, block_source: BS,
blockhash_cache: BlockHashCache, blockhash_cache: BlockHashCache,
debug_cutoff_height: Option<u64>,
) -> (Self, mpsc::Sender<()>) { ) -> (Self, mpsc::Sender<()>) {
let block_source = Arc::new(block_source); let block_source = Arc::new(block_source);
let (start_tx, start_rx) = mpsc::channel(1); let (start_tx, start_rx) = mpsc::channel(1);
let (block_tx, block_rx) = mpsc::channel(100); let (block_tx, block_rx) = mpsc::channel(100);
let block_tx_clone = block_tx.clone(); let task = tokio::spawn(Self::task(start_rx, block_source, block_tx, debug_cutoff_height));
let task = tokio::spawn(Self::task(start_rx, block_source, block_tx_clone));
(Self { chain_id, block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx) (Self { chain_id, block_rx, task, blockhash_cache: blockhash_cache.clone() }, start_tx)
} }
@ -69,7 +69,8 @@ impl BlockPoller {
async fn task<BS: BlockSource>( async fn task<BS: BlockSource>(
mut start_rx: mpsc::Receiver<()>, mut start_rx: mpsc::Receiver<()>,
block_source: Arc<BS>, block_source: Arc<BS>,
block_tx_clone: mpsc::Sender<(u64, BlockAndReceipts)>, block_tx: mpsc::Sender<(u64, BlockAndReceipts)>,
debug_cutoff_height: Option<u64>,
) -> eyre::Result<()> { ) -> eyre::Result<()> {
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?; start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
info!("Starting block poller"); info!("Starting block poller");
@ -81,9 +82,15 @@ impl BlockPoller {
.ok_or(eyre::eyre!("Failed to find latest block number"))?; .ok_or(eyre::eyre!("Failed to find latest block number"))?;
loop { loop {
if let Some(debug_cutoff_height) = debug_cutoff_height
&& next_block_number > debug_cutoff_height
{
next_block_number = debug_cutoff_height;
}
match block_source.collect_block(next_block_number).await { match block_source.collect_block(next_block_number).await {
Ok(block) => { Ok(block) => {
block_tx_clone.send((next_block_number, block)).await?; block_tx.send((next_block_number, block)).await?;
next_block_number += 1; next_block_number += 1;
} }
Err(_) => tokio::time::sleep(polling_interval).await, Err(_) => tokio::time::sleep(polling_interval).await,
@ -152,13 +159,14 @@ impl<BS: BlockSource> PseudoPeer<BS> {
async fn collect_blocks( async fn collect_blocks(
&self, &self,
block_numbers: impl IntoIterator<Item = u64>, block_numbers: impl IntoIterator<Item = u64>,
) -> Vec<BlockAndReceipts> { ) -> eyre::Result<Vec<BlockAndReceipts>> {
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>(); let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
futures::stream::iter(block_numbers) let res = futures::stream::iter(block_numbers)
.map(async |number| self.collect_block(number).await.unwrap()) .map(async |number| self.collect_block(number).await)
.buffered(self.block_source.recommended_chunk_size() as usize) .buffered(self.block_source.recommended_chunk_size() as usize)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await .await;
res.into_iter().collect()
} }
pub async fn process_eth_request( pub async fn process_eth_request(
@ -185,7 +193,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
HeadersDirection::Falling => { HeadersDirection::Falling => {
self.collect_blocks((number + 1 - limit..number + 1).rev()).await self.collect_blocks((number + 1 - limit..number + 1).rev()).await
} }
} }?
.into_par_iter() .into_par_iter()
.map(|block| block.to_reth_block(chain_id).header.clone()) .map(|block| block.to_reth_block(chain_id).header.clone())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -203,7 +211,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
let block_bodies = self let block_bodies = self
.collect_blocks(numbers) .collect_blocks(numbers)
.await .await?
.into_iter() .into_iter()
.map(|block| block.to_reth_block(chain_id).body) .map(|block| block.to_reth_block(chain_id).body)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -340,7 +348,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
debug!("Backfilling from {start_number} to {end_number}"); debug!("Backfilling from {start_number} to {end_number}");
// Collect blocks and cache them // Collect blocks and cache them
let blocks = self.collect_blocks(uncached_block_numbers).await; let blocks = self.collect_blocks(uncached_block_numbers).await?;
let block_map: HashMap<B256, u64> = let block_map: HashMap<B256, u64> =
blocks.into_iter().map(|block| (block.hash(), block.number())).collect(); blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
let maybe_block_number = block_map.get(&target_hash).copied(); let maybe_block_number = block_map.get(&target_hash).copied();

View File

@ -1,6 +1,6 @@
use super::{BlockSource, BlockSourceBoxed}; use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts; use crate::node::types::BlockAndReceipts;
use futures::{future::BoxFuture, FutureExt}; use futures::{FutureExt, future::BoxFuture};
use reth_network::cache::LruMap; use reth_network::cache::LruMap;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};

View File

@ -27,7 +27,7 @@ impl LocalBlocksCache {
} }
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> { pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
self.cache.remove(&height) self.cache.get(&height).cloned()
} }
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> { pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {

View File

@ -1,4 +1,4 @@
use super::{scan::Scanner, time_utils::TimeUtils, HOURLY_SUBDIR}; use super::{HOURLY_SUBDIR, scan::Scanner, time_utils::TimeUtils};
use crate::node::types::BlockAndReceipts; use crate::node::types::BlockAndReceipts;
use std::{ use std::{
fs::File, fs::File,
@ -17,12 +17,12 @@ impl FileOperations {
files.extend( files.extend(
subentries subentries
.filter_map(|f| f.ok().map(|f| f.path())) .filter_map(|f| f.ok().map(|f| f.path()))
.filter(|p| TimeUtils::datetime_from_path(p).is_some()), .filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
); );
} }
} }
files.sort(); files.sort();
Some(files) Some(files.into_iter().map(|(_, p)| p).collect())
} }
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> { pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {

View File

@ -8,12 +8,13 @@ mod time_utils;
use self::{ use self::{
cache::LocalBlocksCache, cache::LocalBlocksCache,
file_ops::FileOperations, file_ops::FileOperations,
scan::{ScanOptions, Scanner}, scan::{LineStream, ScanOptions, Scanner},
time_utils::TimeUtils, time_utils::TimeUtils,
}; };
use super::{BlockSource, BlockSourceBoxed}; use super::{BlockSource, BlockSourceBoxed};
use crate::node::types::BlockAndReceipts; use crate::node::types::BlockAndReceipts;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
@ -41,6 +42,18 @@ pub struct HlNodeBlockSource {
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>, pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
pub args: HlNodeBlockSourceArgs, pub args: HlNodeBlockSourceArgs,
pub metrics: HlNodeBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.hl_node")]
pub struct HlNodeBlockSourceMetrics {
/// How many times the HL node block source is polling for a block
pub fetched_from_hl_node: Counter,
/// How many times the HL node block source is fetched from the fallback
pub fetched_from_fallback: Counter,
/// How many times `try_collect_local_block` was faster than ingest loop
pub file_read_triggered: Counter,
} }
impl BlockSource for HlNodeBlockSource { impl BlockSource for HlNodeBlockSource {
@ -49,11 +62,15 @@ impl BlockSource for HlNodeBlockSource {
let args = self.args.clone(); let args = self.args.clone();
let local_blocks_cache = self.local_blocks_cache.clone(); let local_blocks_cache = self.local_blocks_cache.clone();
let last_local_fetch = self.last_local_fetch.clone(); let last_local_fetch = self.last_local_fetch.clone();
let metrics = self.metrics.clone();
Box::pin(async move { Box::pin(async move {
let now = OffsetDateTime::now_utc(); let now = OffsetDateTime::now_utc();
if let Some(block) = Self::try_collect_local_block(local_blocks_cache, height).await { if let Some(block) =
Self::try_collect_local_block(&metrics, local_blocks_cache, height).await
{
Self::update_last_fetch(last_local_fetch, height, now).await; Self::update_last_fetch(last_local_fetch, height, now).await;
metrics.fetched_from_hl_node.increment(1);
return Ok(block); return Ok(block);
} }
@ -62,12 +79,13 @@ impl BlockSource for HlNodeBlockSource {
let too_soon = now - last_poll_time < args.fallback_threshold; let too_soon = now - last_poll_time < args.fallback_threshold;
if more_recent && too_soon { if more_recent && too_soon {
return Err(eyre::eyre!( return Err(eyre::eyre!(
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up" "Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
)); ));
} }
} }
let block = fallback.collect_block(height).await?; let block = fallback.collect_block(height).await?;
metrics.fetched_from_fallback.increment(1);
Self::update_last_fetch(last_local_fetch, height, now).await; Self::update_last_fetch(last_local_fetch, height, now).await;
Ok(block) Ok(block)
}) })
@ -106,6 +124,28 @@ impl BlockSource for HlNodeBlockSource {
} }
} }
struct CurrentFile {
path: PathBuf,
line_stream: Option<LineStream>,
}
impl CurrentFile {
pub fn from_datetime(dt: OffsetDateTime, root: &Path) -> Self {
let (hour, day_str) = (dt.hour(), TimeUtils::date_from_datetime(dt));
let path = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{}", hour));
Self { path, line_stream: None }
}
pub fn open(&mut self) -> eyre::Result<()> {
if self.line_stream.is_some() {
return Ok(());
}
self.line_stream = Some(LineStream::from_path(&self.path)?);
Ok(())
}
}
impl HlNodeBlockSource { impl HlNodeBlockSource {
async fn update_last_fetch( async fn update_last_fetch(
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>, last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
@ -119,6 +159,7 @@ impl HlNodeBlockSource {
} }
async fn try_collect_local_block( async fn try_collect_local_block(
metrics: &HlNodeBlockSourceMetrics,
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
height: u64, height: u64,
) -> Option<BlockAndReceipts> { ) -> Option<BlockAndReceipts> {
@ -128,9 +169,10 @@ impl HlNodeBlockSource {
} }
let path = u_cache.get_path_for_height(height)?; let path = u_cache.get_path_for_height(height)?;
info!("Loading block data from {:?}", path); info!("Loading block data from {:?}", path);
metrics.file_read_triggered.increment(1);
let mut line_stream = LineStream::from_path(&path).ok()?;
let scan_result = Scanner::scan_hour_file( let scan_result = Scanner::scan_hour_file(
&path, &mut line_stream,
&mut 0,
ScanOptions { start_height: 0, only_load_ranges: false }, ScanOptions { start_height: 0, only_load_ranges: false },
); );
u_cache.load_scan_result(scan_result); u_cache.load_scan_result(scan_result);
@ -151,9 +193,10 @@ impl HlNodeBlockSource {
} else { } else {
warn!("Failed to parse last line of file: {:?}", subfile); warn!("Failed to parse last line of file: {:?}", subfile);
} }
let mut line_stream =
LineStream::from_path(&subfile).expect("Failed to open line stream");
let mut scan_result = Scanner::scan_hour_file( let mut scan_result = Scanner::scan_hour_file(
&subfile, &mut line_stream,
&mut 0,
ScanOptions { start_height: cutoff_height, only_load_ranges: true }, ScanOptions { start_height: cutoff_height, only_load_ranges: true },
); );
scan_result.new_blocks.clear(); // Only store ranges, load data lazily scan_result.new_blocks.clear(); // Only store ranges, load data lazily
@ -174,15 +217,13 @@ impl HlNodeBlockSource {
} }
tokio::time::sleep(TAIL_INTERVAL).await; tokio::time::sleep(TAIL_INTERVAL).await;
}; };
let (mut hour, mut day_str, mut last_line) = let mut current_file = CurrentFile::from_datetime(dt, &root);
(dt.hour(), TimeUtils::date_from_datetime(dt), 0);
info!("Starting local ingest loop from height: {}", current_head); info!("Starting local ingest loop from height: {}", current_head);
loop { loop {
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}")); let _ = current_file.open();
if hour_file.exists() { if let Some(line_stream) = &mut current_file.line_stream {
let scan_result = Scanner::scan_hour_file( let scan_result = Scanner::scan_hour_file(
&hour_file, line_stream,
&mut last_line,
ScanOptions { start_height: next_height, only_load_ranges: false }, ScanOptions { start_height: next_height, only_load_ranges: false },
); );
next_height = scan_result.next_expected_height; next_height = scan_result.next_expected_height;
@ -191,11 +232,8 @@ impl HlNodeBlockSource {
let now = OffsetDateTime::now_utc(); let now = OffsetDateTime::now_utc();
if dt + ONE_HOUR < now { if dt + ONE_HOUR < now {
dt += ONE_HOUR; dt += ONE_HOUR;
(hour, day_str, last_line) = (dt.hour(), TimeUtils::date_from_datetime(dt), 0); current_file = CurrentFile::from_datetime(dt, &root);
info!( info!("Moving to new file: {:?}", current_file.path);
"Moving to new file: {:?}",
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
);
continue; continue;
} }
tokio::time::sleep(TAIL_INTERVAL).await; tokio::time::sleep(TAIL_INTERVAL).await;
@ -224,6 +262,7 @@ impl HlNodeBlockSource {
args, args,
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))), local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
last_local_fetch: Arc::new(Mutex::new(None)), last_local_fetch: Arc::new(Mutex::new(None)),
metrics: HlNodeBlockSourceMetrics::default(),
}; };
block_source.run(next_block_number).await.unwrap(); block_source.run(next_block_number).await.unwrap();
block_source block_source

View File

@ -2,7 +2,7 @@ use crate::node::types::{BlockAndReceipts, EvmBlock};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
fs::File, fs::File,
io::{BufRead, BufReader}, io::{BufRead, BufReader, Seek, SeekFrom},
ops::RangeInclusive, ops::RangeInclusive,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
@ -25,6 +25,57 @@ pub struct ScanOptions {
pub struct Scanner; pub struct Scanner;
/// Stream for sequentially reading lines from a file.
///
/// This struct allows sequential iteration over lines over [Self::next] method.
/// It is resilient to cases where the line producer process is interrupted while writing:
/// - If a line is incomplete but still ends with a line ending, it is skipped: later, the fallback
/// block source will be used to retrieve the missing block.
/// - If a line does not end with a newline (i.e., the write was incomplete), the method returns
/// `None` to break out of the loop and avoid reading partial data.
/// - If a temporary I/O error occurs, the stream exits the loop without rewinding the cursor, which
/// will result in skipping ahead to the next unread bytes.
pub struct LineStream {
path: PathBuf,
reader: BufReader<File>,
}
impl LineStream {
pub fn from_path(path: &Path) -> std::io::Result<Self> {
let reader = BufReader::with_capacity(1024 * 1024, File::open(path)?);
Ok(Self { path: path.to_path_buf(), reader })
}
pub fn next(&mut self) -> Option<String> {
let mut line_buffer = vec![];
let Ok(size) = self.reader.read_until(b'\n', &mut line_buffer) else {
// Temporary I/O error; restart the loop
return None;
};
// Now cursor is right after the end of the line
// On UTF-8 error, skip the line
let Ok(mut line) = String::from_utf8(line_buffer) else {
return Some(String::new());
};
// If line is not completed yet, return None so that we can break the loop
if line.ends_with('\n') {
if line.ends_with('\r') {
line.pop();
}
line.pop();
return Some(line);
}
// info!("Line is not completed yet: {}", line);
if size != 0 {
self.reader.seek(SeekFrom::Current(-(size as i64))).unwrap();
}
None
}
}
impl Scanner { impl Scanner {
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> { pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts = let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
@ -35,31 +86,20 @@ impl Scanner {
Ok((parsed_block, height)) Ok((parsed_block, height))
} }
pub fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult { pub fn scan_hour_file(line_stream: &mut LineStream, options: ScanOptions) -> ScanResult {
let lines: Vec<String> =
BufReader::new(File::open(path).expect("Failed to open hour file"))
.lines()
.collect::<Result<_, _>>()
.unwrap();
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
let mut new_blocks = Vec::new(); let mut new_blocks = Vec::new();
let mut last_height = options.start_height; let mut last_height = options.start_height;
let mut block_ranges = Vec::new(); let mut block_ranges = Vec::new();
let mut current_range: Option<(u64, u64)> = None; let mut current_range: Option<(u64, u64)> = None;
for (line_idx, line) in lines.iter().enumerate().skip(skip) { while let Some(line) = line_stream.next() {
if line_idx < *last_line || line.trim().is_empty() { match Self::line_to_evm_block(&line) {
continue;
}
match Self::line_to_evm_block(line) {
Ok((parsed_block, height)) => { Ok((parsed_block, height)) => {
if height >= options.start_height { if height >= options.start_height {
last_height = last_height.max(height); last_height = last_height.max(height);
if !options.only_load_ranges { if !options.only_load_ranges {
new_blocks.push(parsed_block); new_blocks.push(parsed_block);
} }
*last_line = line_idx;
} }
match current_range { match current_range {
@ -74,16 +114,17 @@ impl Scanner {
} }
} }
} }
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line)), Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(&line)),
} }
} }
if let Some((start, end)) = current_range { if let Some((start, end)) = current_range {
block_ranges.push(start..=end); block_ranges.push(start..=end);
} }
ScanResult { ScanResult {
path: path.to_path_buf(), path: line_stream.path.clone(),
next_expected_height: last_height + 1, next_expected_height: last_height + current_range.is_some() as u64,
new_blocks, new_blocks,
new_block_ranges: block_ranges, new_block_ranges: block_ranges,
} }

View File

@ -1,10 +1,10 @@
use super::*; use super::*;
use crate::{ use crate::{
node::types::{reth_compat, ReadPrecompileCalls}, node::types::{ReadPrecompileCalls, reth_compat},
pseudo_peer::sources::{hl_node::scan::LocalBlockAndReceipts, LocalBlockSource}, pseudo_peer::sources::{LocalBlockSource, hl_node::scan::LocalBlockAndReceipts},
}; };
use alloy_consensus::{BlockBody, Header}; use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256}; use alloy_primitives::{Address, B64, B256, Bloom, Bytes, U256};
use std::{io::Write, time::Duration}; use std::{io::Write, time::Duration};
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000); const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
@ -193,3 +193,22 @@ async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
Ok(()) Ok(())
} }
#[test]
fn test_hourly_files_sort() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir()?;
// create 20250826/9, 20250826/14
let targets = [("20250826", "9"), ("20250826", "14")];
for (date, hour) in targets {
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
let parent = hourly_file.parent().unwrap();
std::fs::create_dir_all(parent)?;
std::fs::File::create(hourly_file)?;
}
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
let file_names: Vec<_> =
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
assert_eq!(file_names, ["9", "14"]);
Ok(())
}

View File

@ -1,5 +1,5 @@
use std::path::Path; use std::path::Path;
use time::{macros::format_description, Date, OffsetDateTime, Time}; use time::{Date, OffsetDateTime, Time, macros::format_description};
pub struct TimeUtils; pub struct TimeUtils;

View File

@ -1,7 +1,8 @@
use super::{utils, BlockSource}; use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts; use crate::node::types::BlockAndReceipts;
use eyre::Context; use eyre::Context;
use futures::{future::BoxFuture, FutureExt}; use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::path::PathBuf; use std::path::PathBuf;
use tracing::info; use tracing::info;
@ -9,11 +10,21 @@ use tracing::info;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LocalBlockSource { pub struct LocalBlockSource {
dir: PathBuf, dir: PathBuf,
metrics: LocalBlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.local")]
pub struct LocalBlockSourceMetrics {
/// How many times the local block source is polling for a block
pub polling_attempt: Counter,
/// How many times the local block source is fetched from the local filesystem
pub fetched: Counter,
} }
impl LocalBlockSource { impl LocalBlockSource {
pub fn new(dir: impl Into<PathBuf>) -> Self { pub fn new(dir: impl Into<PathBuf>) -> Self {
Self { dir: dir.into() } Self { dir: dir.into(), metrics: LocalBlockSourceMetrics::default() }
} }
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> { async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
@ -31,13 +42,17 @@ impl LocalBlockSource {
impl BlockSource for LocalBlockSource { impl BlockSource for LocalBlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> { fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let dir = self.dir.clone(); let dir = self.dir.clone();
let metrics = self.metrics.clone();
async move { async move {
let path = dir.join(utils::rmp_path(height)); let path = dir.join(utils::rmp_path(height));
metrics.polling_attempt.increment(1);
let file = tokio::fs::read(&path) let file = tokio::fs::read(&path)
.await .await
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?; .wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]); let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?; let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
metrics.fetched.increment(1);
Ok(blocks[0].clone()) Ok(blocks[0].clone())
} }
.boxed() .boxed()

View File

@ -1,7 +1,8 @@
use super::{utils, BlockSource}; use super::{BlockSource, utils};
use crate::node::types::BlockAndReceipts; use crate::node::types::BlockAndReceipts;
use aws_sdk_s3::types::RequestPayer; use aws_sdk_s3::types::RequestPayer;
use futures::{future::BoxFuture, FutureExt}; use futures::{FutureExt, future::BoxFuture};
use reth_metrics::{Metrics, metrics, metrics::Counter};
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use tracing::info; use tracing::info;
@ -11,11 +12,26 @@ pub struct S3BlockSource {
client: Arc<aws_sdk_s3::Client>, client: Arc<aws_sdk_s3::Client>,
bucket: String, bucket: String,
polling_interval: Duration, polling_interval: Duration,
metrics: S3BlockSourceMetrics,
}
#[derive(Metrics, Clone)]
#[metrics(scope = "block_source.s3")]
pub struct S3BlockSourceMetrics {
/// How many times the S3 block source is polling for a block
pub polling_attempt: Counter,
/// How many times the S3 block source has polled a block
pub fetched: Counter,
} }
impl S3BlockSource { impl S3BlockSource {
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self { pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
Self { client: client.into(), bucket, polling_interval } Self {
client: client.into(),
bucket,
polling_interval,
metrics: S3BlockSourceMetrics::default(),
}
} }
async fn pick_path_with_highest_number( async fn pick_path_with_highest_number(
@ -52,14 +68,18 @@ impl BlockSource for S3BlockSource {
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> { fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
let client = self.client.clone(); let client = self.client.clone();
let bucket = self.bucket.clone(); let bucket = self.bucket.clone();
let metrics = self.metrics.clone();
async move { async move {
let path = utils::rmp_path(height); let path = utils::rmp_path(height);
metrics.polling_attempt.increment(1);
let request = client let request = client
.get_object() .get_object()
.request_payer(RequestPayer::Requester) .request_payer(RequestPayer::Requester)
.bucket(&bucket) .bucket(&bucket)
.key(path); .key(path);
let response = request.send().await?; let response = request.send().await?;
metrics.fetched.increment(1);
let bytes = response.body.collect().await?.into_bytes(); let bytes = response.body.collect().await?.into_bytes();
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]); let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?; let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;

35
src/version.rs Normal file
View File

@ -0,0 +1,35 @@
use std::borrow::Cow;
use reth_node_core::version::{RethCliVersionConsts, try_init_version_metadata};
pub fn init_reth_hl_version() {
let cargo_pkg_version = env!("CARGO_PKG_VERSION").to_string();
let short = env!("RETH_HL_SHORT_VERSION").to_string();
let long = format!(
"{}\n{}\n{}\n{}\n{}",
env!("RETH_HL_LONG_VERSION_0"),
env!("RETH_HL_LONG_VERSION_1"),
env!("RETH_HL_LONG_VERSION_2"),
env!("RETH_HL_LONG_VERSION_3"),
env!("RETH_HL_LONG_VERSION_4"),
);
let p2p = env!("RETH_HL_P2P_CLIENT_VERSION").to_string();
let meta = RethCliVersionConsts {
name_client: Cow::Borrowed("reth_hl"),
cargo_pkg_version: Cow::Owned(cargo_pkg_version.clone()),
vergen_git_sha_long: Cow::Owned(env!("VERGEN_GIT_SHA").to_string()),
vergen_git_sha: Cow::Owned(env!("VERGEN_GIT_SHA_SHORT").to_string()),
vergen_build_timestamp: Cow::Owned(env!("VERGEN_BUILD_TIMESTAMP").to_string()),
vergen_cargo_target_triple: Cow::Owned(env!("VERGEN_CARGO_TARGET_TRIPLE").to_string()),
vergen_cargo_features: Cow::Owned(env!("VERGEN_CARGO_FEATURES").to_string()),
short_version: Cow::Owned(short),
long_version: Cow::Owned(long),
build_profile_name: Cow::Owned(env!("RETH_HL_BUILD_PROFILE").to_string()),
p2p_client_version: Cow::Owned(p2p),
extra_data: Cow::Owned(format!("reth_hl/v{}/{}", cargo_pkg_version, std::env::consts::OS)),
};
let _ = try_init_version_metadata(meta);
}

49
tests/run_tests.sh Normal file
View File

@ -0,0 +1,49 @@
#!/bin/bash
set -e
export ETH_RPC_URL="${ETH_RPC_URL:-wss://hl-archive-node.xyz}"
success() {
echo "Success: $1"
}
fail() {
echo "Failed: $1"
exit 1
}
ensure_cmd() {
command -v "$1" > /dev/null 2>&1 || fail "$1 is required"
}
ensure_cmd jq
ensure_cmd cast
ensure_cmd wscat
if [[ ! "$ETH_RPC_URL" =~ ^wss?:// ]]; then
fail "ETH_RPC_URL must be a websocket url"
fi
TITLE="Issue #78 - eth_getLogs should return system transactions"
cast logs \
--rpc-url "$ETH_RPC_URL" \
--from-block 15312567 \
--to-block 15312570 \
--address 0x9fdbda0a5e284c32744d2f17ee5c74b284993463 \
0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef \
| grep -q "0x00000000000000000000000020000000000000000000000000000000000000c5" \
&& success "$TITLE" || fail "$TITLE"
TITLE="Issue #78 - eth_getBlockByNumber should return the same logsBloom as official RPC"
OFFICIAL_RPC="https://rpc.hyperliquid.xyz/evm"
A=$(cast block 1394092 --rpc-url "$ETH_RPC_URL" -f logsBloom | md5sum)
B=$(cast block 1394092 --rpc-url "$OFFICIAL_RPC" -f logsBloom | md5sum)
echo node "$A"
echo rpc\ "$B"
[[ "$A" == "$B" ]] && success "$TITLE" || fail "$TITLE"
TITLE="eth_subscribe newHeads via wscat"
CMD='{"jsonrpc":"2.0","id":1,"method":"eth_subscribe","params":["newHeads"]}'
wscat -w 2 -c "$ETH_RPC_URL" -x "$CMD" | tail -1 | jq -r .params.result.nonce | grep 0x \
&& success "$TITLE" || fail "$TITLE"