feat(p2p): Include forkid file from apache licenced repo (#68)

* Start using reth-rlp
* Include apache licenced forkid file in primitives
* Move forkid to eth-wire
This commit is contained in:
rakita
2022-10-14 16:52:59 +02:00
committed by GitHub
parent 19d001fbdd
commit a644318c32
11 changed files with 963 additions and 76 deletions

74
Cargo.lock generated
View File

@ -630,7 +630,7 @@ version = "17.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b"
dependencies = [ dependencies = [
"ethereum-types 0.13.1", "ethereum-types",
"hex", "hex",
"once_cell", "once_cell",
"regex", "regex",
@ -648,25 +648,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef"
dependencies = [ dependencies = [
"crunchy", "crunchy",
"fixed-hash 0.7.0", "fixed-hash",
"impl-rlp", "impl-rlp",
"impl-serde", "impl-serde",
"tiny-keccak", "tiny-keccak",
] ]
[[package]]
name = "ethereum-forkid"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70b823f6b913b97e58a2bd67a7beeb48b0338d4aa8e3cc21d9cdab457716e4d4"
dependencies = [
"crc",
"fastrlp",
"maplit",
"primitive-types 0.11.1",
"thiserror",
]
[[package]] [[package]]
name = "ethereum-types" name = "ethereum-types"
version = "0.13.1" version = "0.13.1"
@ -674,21 +661,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6"
dependencies = [ dependencies = [
"ethbloom", "ethbloom",
"fixed-hash 0.7.0", "fixed-hash",
"impl-rlp", "impl-rlp",
"impl-serde", "impl-serde",
"primitive-types 0.11.1", "primitive-types",
"uint",
]
[[package]]
name = "ethereum-types"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81224dc661606574f5a0f28c9947d0ee1d93ff11c5f1c4e7272f52e8c0b5483c"
dependencies = [
"fixed-hash 0.8.0",
"primitive-types 0.12.0",
"uint", "uint",
] ]
@ -758,7 +734,7 @@ dependencies = [
"arrayvec", "arrayvec",
"auto_impl", "auto_impl",
"bytes", "bytes",
"ethereum-types 0.13.1", "ethereum-types",
"fastrlp-derive", "fastrlp-derive",
] ]
@ -796,17 +772,6 @@ dependencies = [
"static_assertions", "static_assertions",
] ]
[[package]]
name = "fixed-hash"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534"
dependencies = [
"byteorder",
"rustc-hex",
"static_assertions",
]
[[package]] [[package]]
name = "fnv" name = "fnv"
version = "1.0.7" version = "1.0.7"
@ -1877,23 +1842,13 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a"
dependencies = [ dependencies = [
"fixed-hash 0.7.0", "fixed-hash",
"impl-codec", "impl-codec",
"impl-rlp", "impl-rlp",
"impl-serde", "impl-serde",
"uint", "uint",
] ]
[[package]]
name = "primitive-types"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a"
dependencies = [
"fixed-hash 0.8.0",
"uint",
]
[[package]] [[package]]
name = "proc-macro-crate" name = "proc-macro-crate"
version = "1.2.1" version = "1.2.1"
@ -2079,12 +2034,13 @@ name = "reth-eth-wire"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bytes", "bytes",
"ethereum-forkid", "crc",
"ethers-core", "ethers-core",
"fastrlp",
"hex", "hex",
"hex-literal", "hex-literal",
"maplit",
"reth-primitives", "reth-primitives",
"reth-rlp",
"thiserror", "thiserror",
] ]
@ -2131,10 +2087,14 @@ name = "reth-primitives"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bytes", "bytes",
"crc",
"ethers-core", "ethers-core",
"fastrlp", "hex-literal",
"maplit",
"reth-rlp",
"serde", "serde",
"serde_json", "serde_json",
"thiserror",
] ]
[[package]] [[package]]
@ -2145,7 +2105,7 @@ dependencies = [
"auto_impl", "auto_impl",
"bytes", "bytes",
"criterion", "criterion",
"ethereum-types 0.14.0", "ethereum-types",
"ethnum", "ethnum",
"hex-literal", "hex-literal",
"reth-rlp", "reth-rlp",
@ -2239,7 +2199,7 @@ dependencies = [
"bytes", "bytes",
"hashbrown", "hashbrown",
"num_enum", "num_enum",
"primitive-types 0.11.1", "primitive-types",
"revm_precompiles", "revm_precompiles",
"rlp", "rlp",
"sha3", "sha3",
@ -2255,7 +2215,7 @@ dependencies = [
"hashbrown", "hashbrown",
"num", "num",
"once_cell", "once_cell",
"primitive-types 0.11.1", "primitive-types",
"ripemd", "ripemd",
"secp256k1", "secp256k1",
"sha2", "sha2",

View File

@ -3,7 +3,7 @@ name = "reth-rlp-derive"
version = "0.1.1" version = "0.1.1"
license = "Apache-2.0" license = "Apache-2.0"
edition = "2021" edition = "2021"
description = "Procedural macros for fastrlp" description = "Procedural macros for reth-rlp"
repository = "https://github.com/foundry-rs/reth" repository = "https://github.com/foundry-rs/reth"
[lib] [lib]

View File

@ -11,7 +11,7 @@ arrayvec = { version = "0.7", default-features = false }
auto_impl = "1" auto_impl = "1"
bytes = { version = "1", default-features = false } bytes = { version = "1", default-features = false }
ethnum = { version = "1", default-features = false, optional = true } ethnum = { version = "1", default-features = false, optional = true }
ethereum-types = { version = "0.14", default-features = false, optional = true } ethereum-types = { version = "0.13", default-features = false, optional = true }
reth-rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } reth-rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true }
[dev-dependencies] [dev-dependencies]

View File

@ -8,15 +8,16 @@ readme = "README.md"
[dependencies] [dependencies]
bytes = { version = "1.1" } bytes = { version = "1.1" }
# can remove these restrictions once ethereum-types is bumped across the board
fastrlp = { version = "0.1.3", features = ["alloc", "derive", "std", "ethereum-types"] }
ethereum-forkid = "=0.10"
hex = "0.4" hex = "0.4"
thiserror = "1" thiserror = "1"
# reth # reth
reth-primitives = { path = "../../primitives" } reth-primitives = { path = "../../primitives" }
reth-rlp = { path = "../../common/rlp", features = ["alloc", "derive", "std", "ethereum-types"] }
#used for forkid
crc = "1"
maplit = "1"
[dev-dependencies] [dev-dependencies]
hex-literal = "0.3" hex-literal = "0.3"

View File

@ -0,0 +1,458 @@
//! EIP-2124 implementation based on <https://eips.ethereum.org/EIPS/eip-2124>.
#![deny(missing_docs)]
#![allow(clippy::redundant_else, clippy::too_many_lines)]
use crc::crc32;
use maplit::btreemap;
use reth_primitives::{BlockNumber, H256};
use reth_rlp::*;
use std::{
collections::{BTreeMap, BTreeSet},
ops::{Add, AddAssign},
};
use thiserror::Error;
/// `CRC32` hash of all previous forks starting from genesis block.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
RlpEncodableWrapper,
RlpDecodableWrapper,
RlpMaxEncodedLen,
)]
pub struct ForkHash(pub [u8; 4]);
impl From<H256> for ForkHash {
fn from(genesis: H256) -> Self {
Self(crc32::checksum_ieee(&genesis[..]).to_be_bytes())
}
}
impl AddAssign<BlockNumber> for ForkHash {
fn add_assign(&mut self, block: BlockNumber) {
let blob = block.to_be_bytes();
self.0 = crc32::update(u32::from_be_bytes(self.0), &crc32::IEEE_TABLE, &blob).to_be_bytes();
}
}
impl Add<BlockNumber> for ForkHash {
type Output = Self;
fn add(mut self, block: BlockNumber) -> Self {
self += block;
self
}
}
/// A fork identifier as defined by EIP-2124.
/// Serves as the chain compatibility identifier.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable, RlpMaxEncodedLen)]
pub struct ForkId {
/// CRC32 checksum of the all fork blocks from genesis.
pub hash: ForkHash,
/// Next upcoming fork block number, 0 if not yet known.
pub next: BlockNumber,
}
/// Reason for rejecting provided `ForkId`.
#[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)]
pub enum ValidationError {
/// Remote node is outdated and needs a software update.
#[error("remote node is outdated and needs a software update")]
RemoteStale,
/// Local node is on an incompatible chain or needs a software update.
#[error("local node is on an incompatible chain or needs a software update")]
LocalIncompatibleOrStale,
}
/// Filter that describes the state of blockchain and can be used to check incoming `ForkId`s for
/// compatibility.
#[derive(Clone, Debug, PartialEq)]
pub struct ForkFilter {
forks: BTreeMap<BlockNumber, ForkHash>,
head: BlockNumber,
cache: Cache,
}
#[derive(Clone, Debug, PartialEq)]
struct Cache {
// An epoch is a period between forks.
// When we progress from one fork to the next one we move to the next epoch.
epoch_start: BlockNumber,
epoch_end: Option<BlockNumber>,
past: Vec<(BlockNumber, ForkHash)>,
future: Vec<ForkHash>,
fork_id: ForkId,
}
impl Cache {
/// Compute cache.
fn compute_cache(forks: &BTreeMap<BlockNumber, ForkHash>, head: BlockNumber) -> Self {
let mut past = Vec::with_capacity(forks.len());
let mut future = Vec::with_capacity(forks.len());
let mut epoch_start = 0;
let mut epoch_end = None;
for (block, hash) in forks {
if *block <= head {
epoch_start = *block;
past.push((*block, *hash));
} else {
if epoch_end.is_none() {
epoch_end = Some(*block);
}
future.push(*hash);
}
}
let fork_id = ForkId {
hash: past.last().expect("there is always at least one - genesis - fork hash; qed").1,
next: epoch_end.unwrap_or(0),
};
Self { epoch_start, epoch_end, past, future, fork_id }
}
}
impl ForkFilter {
/// Create the filter from provided head, genesis block hash, past forks and expected future
/// forks.
pub fn new<F>(head: BlockNumber, genesis: H256, forks: F) -> Self
where
F: IntoIterator<Item = BlockNumber>,
{
let genesis_fork_hash = ForkHash::from(genesis);
let mut forks = forks.into_iter().collect::<BTreeSet<_>>();
forks.remove(&0);
let forks = forks
.into_iter()
.fold(
(btreemap! { 0 => genesis_fork_hash }, genesis_fork_hash),
|(mut acc, base_hash), block| {
let fork_hash = base_hash + block;
acc.insert(block, fork_hash);
(acc, fork_hash)
},
)
.0;
let cache = Cache::compute_cache(&forks, head);
Self { forks, head, cache }
}
fn set_head_priv(&mut self, head: BlockNumber) -> bool {
#[allow(clippy::option_if_let_else)]
let recompute_cache = {
if head < self.cache.epoch_start {
true
} else if let Some(epoch_end) = self.cache.epoch_end {
head >= epoch_end
} else {
false
}
};
if recompute_cache {
self.cache = Cache::compute_cache(&self.forks, head);
}
self.head = head;
recompute_cache
}
/// Set the current head
pub fn set_head(&mut self, head: BlockNumber) {
self.set_head_priv(head);
}
/// Return current fork id
#[must_use]
pub const fn current(&self) -> ForkId {
self.cache.fork_id
}
/// Check whether the provided `ForkId` is compatible based on the validation rules in
/// `EIP-2124`.
///
/// # Errors
/// Returns a `ValidationError` if the `ForkId` is not compatible.
pub fn validate(&self, fork_id: ForkId) -> Result<(), ValidationError> {
// 1) If local and remote FORK_HASH matches...
if self.current().hash == fork_id.hash {
if fork_id.next == 0 {
// 1b) No remotely announced fork, connect.
return Ok(())
}
//... compare local head to FORK_NEXT.
if self.head >= fork_id.next {
// 1a) A remotely announced but remotely not passed block is already passed locally,
// disconnect, since the chains are incompatible.
return Err(ValidationError::LocalIncompatibleOrStale)
} else {
// 1b) Remotely announced fork not yet passed locally, connect.
return Ok(())
}
}
// 2) If the remote FORK_HASH is a subset of the local past forks...
let mut it = self.cache.past.iter();
while let Some((_, hash)) = it.next() {
if *hash == fork_id.hash {
// ...and the remote FORK_NEXT matches with the locally following fork block number,
// connect.
if let Some((actual_fork_block, _)) = it.next() {
if *actual_fork_block == fork_id.next {
return Ok(())
} else {
return Err(ValidationError::RemoteStale)
}
}
break
}
}
// 3) If the remote FORK_HASH is a superset of the local past forks and can be completed
// with locally known future forks, connect.
for future_fork_hash in &self.cache.future {
if *future_fork_hash == fork_id.hash {
return Ok(())
}
}
// 4) Reject in all other cases.
Err(ValidationError::LocalIncompatibleOrStale)
}
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
const GENESIS_HASH: H256 =
H256(hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"));
// EIP test vectors.
#[test]
fn forkhash() {
let mut fork_hash = ForkHash::from(GENESIS_HASH);
assert_eq!(fork_hash.0, hex!("fc64ec04"));
fork_hash += 1_150_000;
assert_eq!(fork_hash.0, hex!("97c2c34c"));
fork_hash += 1_920_000;
assert_eq!(fork_hash.0, hex!("91d1f948"));
}
#[test]
fn compatibility_check() {
let mut filter = ForkFilter::new(
0,
GENESIS_HASH,
vec![1_150_000, 1_920_000, 2_463_000, 2_675_000, 4_370_000, 7_280_000],
);
// Local is mainnet Petersburg, remote announces the same. No future fork is announced.
filter.set_head(7_987_396);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
// Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
// at block 0xffffffff, but that is uncertain.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId {
hash: ForkHash(hex!("668db0af")),
next: BlockNumber::max_value()
}),
Ok(())
);
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg),remote
// announces also Byzantium, but it's not yet aware of Petersburg (e.g. non updated
// node before the fork). In this case we don't know if Petersburg passed yet or
// not.
filter.set_head(7_279_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
// announces also Byzantium, and it's also aware of Petersburg (e.g. updated node
// before the fork). We don't know if Petersburg passed yet (will pass) or not.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
Ok(())
);
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
// announces also Byzantium, and it's also aware of some random fork (e.g.
// misconfigured Petersburg). As neither forks passed at neither nodes, they may
// mismatch, but we still connect for now.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId {
hash: ForkHash(hex!("a00bc324")),
next: BlockNumber::max_value()
}),
Ok(())
);
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg.
// Remote is simply out of sync, accept.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
Ok(())
);
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium.
// Remote is definitely out of sync. It may or may not need the Petersburg update,
// we don't know yet.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("3edd5b10")), next: 4_370_000 }),
Ok(())
);
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
filter.set_head(7_279_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg.
// Local out of sync. Local also knows about a future fork, but that is uncertain
// yet.
filter.set_head(4_369_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further
// forks. Remote needs software update.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }),
Err(ValidationError::RemoteStale)
);
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("afec6b27")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Petersburg, far in the future. Remote announces Gopherium (non existing
// fork) at some future block 88888888, for itself, but past block for local. Local
// is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
filter.set_head(88_888_888);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 88_888_888 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non
// existing fork) at block 7279999, before Petersburg. Local is incompatible.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_279_999 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
}
#[test]
fn forkid_serialization() {
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId { hash: ForkHash(hex!("00000000")), next: 0 }),
hex!("c6840000000080")
);
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId {
hash: ForkHash(hex!("deadbeef")),
next: 0xBADD_CAFE
}),
hex!("ca84deadbeef84baddcafe")
);
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId {
hash: ForkHash(hex!("ffffffff")),
next: u64::max_value()
}),
hex!("ce84ffffffff88ffffffffffffffff")
);
assert_eq!(
ForkId::decode(&mut (&hex!("c6840000000080") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("00000000")), next: 0 }
);
assert_eq!(
ForkId::decode(&mut (&hex!("ca84deadbeef84baddcafe") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADD_CAFE }
);
assert_eq!(
ForkId::decode(&mut (&hex!("ce84ffffffff88ffffffffffffffff") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::max_value() }
);
}
#[test]
fn compute_cache() {
let b1 = 1_150_000;
let b2 = 1_920_000;
let h0 = ForkId { hash: ForkHash(hex!("fc64ec04")), next: b1 };
let h1 = ForkId { hash: ForkHash(hex!("97c2c34c")), next: b2 };
let h2 = ForkId { hash: ForkHash(hex!("91d1f948")), next: 0 };
let mut fork_filter = ForkFilter::new(0, GENESIS_HASH, vec![b1, b2]);
assert!(!fork_filter.set_head_priv(0));
assert_eq!(fork_filter.current(), h0);
assert!(!fork_filter.set_head_priv(1));
assert_eq!(fork_filter.current(), h0);
assert!(fork_filter.set_head_priv(b1 + 1));
assert_eq!(fork_filter.current(), h1);
assert!(!fork_filter.set_head_priv(b1));
assert_eq!(fork_filter.current(), h1);
assert!(fork_filter.set_head_priv(b1 - 1));
assert_eq!(fork_filter.current(), h0);
assert!(fork_filter.set_head_priv(b1));
assert_eq!(fork_filter.current(), h1);
assert!(!fork_filter.set_head_priv(b2 - 1));
assert_eq!(fork_filter.current(), h1);
assert!(fork_filter.set_head_priv(b2));
assert_eq!(fork_filter.current(), h2);
}
}

View File

@ -12,3 +12,5 @@ pub use status::Status;
mod version; mod version;
pub use version::EthVersion; pub use version::EthVersion;
pub mod forkid;

View File

@ -1,6 +1,6 @@
use ethereum_forkid::ForkId; use crate::forkid::ForkId;
use fastrlp::{RlpDecodable, RlpEncodable};
use reth_primitives::{Chain, H256, U256}; use reth_primitives::{Chain, H256, U256};
use reth_rlp::{RlpDecodable, RlpEncodable};
use std::fmt::{Debug, Display}; use std::fmt::{Debug, Display};
/// The status message is used in the eth protocol handshake to ensure that peers are on the same /// The status message is used in the eth protocol handshake to ensure that peers are on the same
@ -86,15 +86,16 @@ impl Debug for Status {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr;
use ethereum_forkid::{ForkHash, ForkId};
use ethers_core::types::Chain as NamedChain; use ethers_core::types::Chain as NamedChain;
use fastrlp::{Decodable, Encodable};
use hex_literal::hex; use hex_literal::hex;
use reth_primitives::{Chain, H256, U256}; use reth_primitives::{Chain, H256, U256};
use reth_rlp::{Decodable, Encodable};
use std::str::FromStr;
use crate::{EthVersion, Status}; use crate::{
forkid::{ForkHash, ForkId},
EthVersion, Status,
};
#[test] #[test]
fn encode_eth_status_message() { fn encode_eth_status_message() {

View File

@ -8,10 +8,16 @@ readme = "README.md"
description = "Commonly used types in reth." description = "Commonly used types in reth."
[dependencies] [dependencies]
fastrlp = { version = "0.1.3" }
ethers-core = { git = "https://github.com/gakonst/ethers-rs", default-features = false } ethers-core = { git = "https://github.com/gakonst/ethers-rs", default-features = false }
bytes = "1.2" bytes = "1.2"
serde = "1.0" serde = "1.0"
thiserror = "1"
reth-rlp = { path = "../common/rlp", features = ["derive"]}
#used for forkid
crc = "1"
maplit = "1"
[dev-dependencies] [dev-dependencies]
serde_json = "1.0" serde_json = "1.0"
hex-literal = "0.3"

View File

@ -1,6 +1,6 @@
use crate::U256; use crate::U256;
use ethers_core::types::{ParseChainError, U64}; use ethers_core::types::{ParseChainError, U64};
use fastrlp::{Decodable, Encodable}; use reth_rlp::{Decodable, Encodable};
use std::{fmt, str::FromStr}; use std::{fmt, str::FromStr};
/// Either a named or chain id or the actual id value /// Either a named or chain id or the actual id value
@ -117,7 +117,7 @@ impl Encodable for Chain {
Self::Id(id) => id.length(), Self::Id(id) => id.length(),
} }
} }
fn encode(&self, out: &mut dyn fastrlp::BufMut) { fn encode(&self, out: &mut dyn reth_rlp::BufMut) {
match self { match self {
Self::Named(chain) => u64::from(*chain).encode(out), Self::Named(chain) => u64::from(*chain).encode(out),
Self::Id(id) => id.encode(out), Self::Id(id) => id.encode(out),
@ -126,7 +126,7 @@ impl Encodable for Chain {
} }
impl Decodable for Chain { impl Decodable for Chain {
fn decode(buf: &mut &[u8]) -> Result<Self, fastrlp::DecodeError> { fn decode(buf: &mut &[u8]) -> Result<Self, reth_rlp::DecodeError> {
Ok(u64::decode(buf)?.into()) Ok(u64::decode(buf)?.into())
} }
} }

View File

@ -0,0 +1,457 @@
//! EIP-2124 implementation based on <https://eips.ethereum.org/EIPS/eip-2124>.
#![deny(missing_docs)]
#![allow(clippy::redundant_else, clippy::too_many_lines)]
use crate::{BlockNumber, H256};
use crc::crc32;
use maplit::btreemap;
use reth_rlp::*;
use std::{
collections::{BTreeMap, BTreeSet},
ops::{Add, AddAssign},
};
use thiserror::Error;
/// `CRC32` hash of all previous forks starting from genesis block.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
RlpEncodableWrapper,
RlpDecodableWrapper,
RlpMaxEncodedLen,
)]
pub struct ForkHash(pub [u8; 4]);
impl From<H256> for ForkHash {
fn from(genesis: H256) -> Self {
Self(crc32::checksum_ieee(&genesis[..]).to_be_bytes())
}
}
impl AddAssign<BlockNumber> for ForkHash {
fn add_assign(&mut self, block: BlockNumber) {
let blob = block.to_be_bytes();
self.0 = crc32::update(u32::from_be_bytes(self.0), &crc32::IEEE_TABLE, &blob).to_be_bytes();
}
}
impl Add<BlockNumber> for ForkHash {
type Output = Self;
fn add(mut self, block: BlockNumber) -> Self {
self += block;
self
}
}
/// A fork identifier as defined by EIP-2124.
/// Serves as the chain compatibility identifier.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable, RlpMaxEncodedLen)]
pub struct ForkId {
/// CRC32 checksum of the all fork blocks from genesis.
pub hash: ForkHash,
/// Next upcoming fork block number, 0 if not yet known.
pub next: BlockNumber,
}
/// Reason for rejecting provided `ForkId`.
#[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)]
pub enum ValidationError {
/// Remote node is outdated and needs a software update.
#[error("remote node is outdated and needs a software update")]
RemoteStale,
/// Local node is on an incompatible chain or needs a software update.
#[error("local node is on an incompatible chain or needs a software update")]
LocalIncompatibleOrStale,
}
/// Filter that describes the state of blockchain and can be used to check incoming `ForkId`s for
/// compatibility.
#[derive(Clone, Debug, PartialEq)]
pub struct ForkFilter {
forks: BTreeMap<BlockNumber, ForkHash>,
head: BlockNumber,
cache: Cache,
}
#[derive(Clone, Debug, PartialEq)]
struct Cache {
// An epoch is a period between forks.
// When we progress from one fork to the next one we move to the next epoch.
epoch_start: BlockNumber,
epoch_end: Option<BlockNumber>,
past: Vec<(BlockNumber, ForkHash)>,
future: Vec<ForkHash>,
fork_id: ForkId,
}
impl Cache {
fn compute_cache(forks: &BTreeMap<BlockNumber, ForkHash>, head: BlockNumber) -> Self {
let mut past = Vec::with_capacity(forks.len());
let mut future = Vec::with_capacity(forks.len());
let mut epoch_start = 0;
let mut epoch_end = None;
for (block, hash) in forks {
if *block <= head {
epoch_start = *block;
past.push((*block, *hash));
} else {
if epoch_end.is_none() {
epoch_end = Some(*block);
}
future.push(*hash);
}
}
let fork_id = ForkId {
hash: past.last().expect("there is always at least one - genesis - fork hash; qed").1,
next: epoch_end.unwrap_or(0),
};
Self { epoch_start, epoch_end, past, future, fork_id }
}
}
impl ForkFilter {
/// Create the filter from provided head, genesis block hash, past forks and expected future
/// forks.
pub fn new<F>(head: BlockNumber, genesis: H256, forks: F) -> Self
where
F: IntoIterator<Item = BlockNumber>,
{
let genesis_fork_hash = ForkHash::from(genesis);
let mut forks = forks.into_iter().collect::<BTreeSet<_>>();
forks.remove(&0);
let forks = forks
.into_iter()
.fold(
(btreemap! { 0 => genesis_fork_hash }, genesis_fork_hash),
|(mut acc, base_hash), block| {
let fork_hash = base_hash + block;
acc.insert(block, fork_hash);
(acc, fork_hash)
},
)
.0;
let cache = Cache::compute_cache(&forks, head);
Self { forks, head, cache }
}
fn set_head_priv(&mut self, head: BlockNumber) -> bool {
#[allow(clippy::option_if_let_else)]
let recompute_cache = {
if head < self.cache.epoch_start {
true
} else if let Some(epoch_end) = self.cache.epoch_end {
head >= epoch_end
} else {
false
}
};
if recompute_cache {
self.cache = Cache::compute_cache(&self.forks, head);
}
self.head = head;
recompute_cache
}
/// Set the current head
pub fn set_head(&mut self, head: BlockNumber) {
self.set_head_priv(head);
}
/// Return current fork id
#[must_use]
pub const fn current(&self) -> ForkId {
self.cache.fork_id
}
/// Check whether the provided `ForkId` is compatible based on the validation rules in
/// `EIP-2124`.
///
/// # Errors
/// Returns a `ValidationError` if the `ForkId` is not compatible.
pub fn validate(&self, fork_id: ForkId) -> Result<(), ValidationError> {
// 1) If local and remote FORK_HASH matches...
if self.current().hash == fork_id.hash {
if fork_id.next == 0 {
// 1b) No remotely announced fork, connect.
return Ok(())
}
//... compare local head to FORK_NEXT.
if self.head >= fork_id.next {
// 1a) A remotely announced but remotely not passed block is already passed locally,
// disconnect, since the chains are incompatible.
return Err(ValidationError::LocalIncompatibleOrStale)
} else {
// 1b) Remotely announced fork not yet passed locally, connect.
return Ok(())
}
}
// 2) If the remote FORK_HASH is a subset of the local past forks...
let mut it = self.cache.past.iter();
while let Some((_, hash)) = it.next() {
if *hash == fork_id.hash {
// ...and the remote FORK_NEXT matches with the locally following fork block number,
// connect.
if let Some((actual_fork_block, _)) = it.next() {
if *actual_fork_block == fork_id.next {
return Ok(())
} else {
return Err(ValidationError::RemoteStale)
}
}
break
}
}
// 3) If the remote FORK_HASH is a superset of the local past forks and can be completed
// with locally known future forks, connect.
for future_fork_hash in &self.cache.future {
if *future_fork_hash == fork_id.hash {
return Ok(())
}
}
// 4) Reject in all other cases.
Err(ValidationError::LocalIncompatibleOrStale)
}
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
const GENESIS_HASH: H256 =
H256(hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"));
// EIP test vectors.
#[test]
fn forkhash() {
let mut fork_hash = ForkHash::from(GENESIS_HASH);
assert_eq!(fork_hash.0, hex!("fc64ec04"));
fork_hash += 1_150_000;
assert_eq!(fork_hash.0, hex!("97c2c34c"));
fork_hash += 1_920_000;
assert_eq!(fork_hash.0, hex!("91d1f948"));
}
#[test]
fn compatibility_check() {
let mut filter = ForkFilter::new(
0,
GENESIS_HASH,
vec![1_150_000, 1_920_000, 2_463_000, 2_675_000, 4_370_000, 7_280_000],
);
// Local is mainnet Petersburg, remote announces the same. No future fork is announced.
filter.set_head(7_987_396);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
// Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
// at block 0xffffffff, but that is uncertain.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId {
hash: ForkHash(hex!("668db0af")),
next: BlockNumber::max_value()
}),
Ok(())
);
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg),remote
// announces also Byzantium, but it's not yet aware of Petersburg (e.g. non updated
// node before the fork). In this case we don't know if Petersburg passed yet or
// not.
filter.set_head(7_279_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
// announces also Byzantium, and it's also aware of Petersburg (e.g. updated node
// before the fork). We don't know if Petersburg passed yet (will pass) or not.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
Ok(())
);
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
// announces also Byzantium, and it's also aware of some random fork (e.g.
// misconfigured Petersburg). As neither forks passed at neither nodes, they may
// mismatch, but we still connect for now.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId {
hash: ForkHash(hex!("a00bc324")),
next: BlockNumber::max_value()
}),
Ok(())
);
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg.
// Remote is simply out of sync, accept.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
Ok(())
);
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium.
// Remote is definitely out of sync. It may or may not need the Petersburg update,
// we don't know yet.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("3edd5b10")), next: 4_370_000 }),
Ok(())
);
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
filter.set_head(7_279_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg.
// Local out of sync. Local also knows about a future fork, but that is uncertain
// yet.
filter.set_head(4_369_999);
assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further
// forks. Remote needs software update.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }),
Err(ValidationError::RemoteStale)
);
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
filter.set_head(7_987_396);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("afec6b27")), next: 0 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Petersburg, far in the future. Remote announces Gopherium (non existing
// fork) at some future block 88888888, for itself, but past block for local. Local
// is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
filter.set_head(88_888_888);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 88_888_888 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non
// existing fork) at block 7279999, before Petersburg. Local is incompatible.
filter.set_head(7_279_999);
assert_eq!(
filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_279_999 }),
Err(ValidationError::LocalIncompatibleOrStale)
);
}
#[test]
fn forkid_serialization() {
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId { hash: ForkHash(hex!("00000000")), next: 0 }),
hex!("c6840000000080")
);
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId {
hash: ForkHash(hex!("deadbeef")),
next: 0xBADD_CAFE
}),
hex!("ca84deadbeef84baddcafe")
);
assert_eq!(
&*reth_rlp::encode_fixed_size(&ForkId {
hash: ForkHash(hex!("ffffffff")),
next: u64::max_value()
}),
hex!("ce84ffffffff88ffffffffffffffff")
);
assert_eq!(
ForkId::decode(&mut (&hex!("c6840000000080") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("00000000")), next: 0 }
);
assert_eq!(
ForkId::decode(&mut (&hex!("ca84deadbeef84baddcafe") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADD_CAFE }
);
assert_eq!(
ForkId::decode(&mut (&hex!("ce84ffffffff88ffffffffffffffff") as &[u8])).unwrap(),
ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::max_value() }
);
}
#[test]
fn compute_cache() {
let b1 = 1_150_000;
let b2 = 1_920_000;
let h0 = ForkId { hash: ForkHash(hex!("fc64ec04")), next: b1 };
let h1 = ForkId { hash: ForkHash(hex!("97c2c34c")), next: b2 };
let h2 = ForkId { hash: ForkHash(hex!("91d1f948")), next: 0 };
let mut fork_filter = ForkFilter::new(0, GENESIS_HASH, vec![b1, b2]);
assert!(!fork_filter.set_head_priv(0));
assert_eq!(fork_filter.current(), h0);
assert!(!fork_filter.set_head_priv(1));
assert_eq!(fork_filter.current(), h0);
assert!(fork_filter.set_head_priv(b1 + 1));
assert_eq!(fork_filter.current(), h1);
assert!(!fork_filter.set_head_priv(b1));
assert_eq!(fork_filter.current(), h1);
assert!(fork_filter.set_head_priv(b1 - 1));
assert_eq!(fork_filter.current(), h0);
assert!(fork_filter.set_head_priv(b1));
assert_eq!(fork_filter.current(), h1);
assert!(!fork_filter.set_head_priv(b2 - 1));
assert_eq!(fork_filter.current(), h1);
assert!(fork_filter.set_head_priv(b2));
assert_eq!(fork_filter.current(), h2);
}
}

View File

@ -10,6 +10,7 @@
mod account; mod account;
mod block; mod block;
mod chain; mod chain;
mod forkid;
mod header; mod header;
mod jsonu256; mod jsonu256;
mod log; mod log;
@ -19,6 +20,7 @@ mod transaction;
pub use account::Account; pub use account::Account;
pub use block::{Block, BlockLocked}; pub use block::{Block, BlockLocked};
pub use chain::Chain; pub use chain::Chain;
pub use forkid::{ForkFilter, ForkHash, ForkId};
pub use header::{Header, HeaderLocked}; pub use header::{Header, HeaderLocked};
pub use jsonu256::JsonU256; pub use jsonu256::JsonU256;
pub use log::Log; pub use log::Log;