mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat(net): authenticate sessions (#178)
* Switch stream type of ActiveSession to EthStream * Start `StatusBuilder` for initializing the `Status` message required for the handshake * Add `Hardfork` for `Status` default forkid * Add `MAINNET_GENESIS` constant * finish `StatusBuilder` * initialize eth streams in session * add status, hello, and fork filter to session manager * fix status builder example * add status and hello to network config * will probably remove * removing status and hello from networkconfig * move forkid to primitives * change imports for forkid * add hardfork to primitives * remove hardfork and forkid from eth-wire * fix remaining eth-wire forkid references * put mainnet genesis in constants, remove NodeId * replace NodeId with PeerId * the only NodeId remaining is inherited from enr * PeerId still needs to be documented * also run cargo fmt * replace loop with iter().any() * ignore missing docs for hardforks * use correct PeerId for Discv4::bind example test * document PeerId as secp256k1 public key * cargo fmt * temporarily allow too_many_arguments * the authenticate and start_pending_incoming_session methods have many arguments, we can reconsider the lint or fix the methods at a later point
This commit is contained in:
@ -24,7 +24,7 @@ pub struct Discv4Config {
|
|||||||
pub find_node_timeout: Duration,
|
pub find_node_timeout: Duration,
|
||||||
/// The duration we set for neighbours responses
|
/// The duration we set for neighbours responses
|
||||||
pub neighbours_timeout: Duration,
|
pub neighbours_timeout: Duration,
|
||||||
/// A set of lists that permit or ban IP's or NodeIds from the server. See
|
/// A set of lists that permit or ban IP's or PeerIds from the server. See
|
||||||
/// `crate::PermitBanList`.
|
/// `crate::PermitBanList`.
|
||||||
pub permit_ban_list: PermitBanList,
|
pub permit_ban_list: PermitBanList,
|
||||||
/// Set the default duration for which nodes are banned for. This timeouts are checked every 5
|
/// Set the default duration for which nodes are banned for. This timeouts are checked every 5
|
||||||
@ -110,7 +110,7 @@ impl Discv4ConfigBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A set of lists that permit or ban IP's or NodeIds from the server. See
|
/// A set of lists that permit or ban IP's or PeerIds from the server. See
|
||||||
/// `crate::PermitBanList`.
|
/// `crate::PermitBanList`.
|
||||||
pub fn permit_ban_list(&mut self, list: PermitBanList) -> &mut Self {
|
pub fn permit_ban_list(&mut self, list: PermitBanList) -> &mut Self {
|
||||||
self.config.permit_ban_list = list;
|
self.config.permit_ban_list = list;
|
||||||
|
|||||||
@ -30,7 +30,7 @@ use discv5::{
|
|||||||
},
|
},
|
||||||
ConnectionDirection, ConnectionState,
|
ConnectionDirection, ConnectionState,
|
||||||
};
|
};
|
||||||
use reth_primitives::{H256, H512};
|
use reth_primitives::{PeerId, H256};
|
||||||
use secp256k1::SecretKey;
|
use secp256k1::SecretKey;
|
||||||
use std::{
|
use std::{
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
@ -67,9 +67,6 @@ pub mod mock;
|
|||||||
/// reexport to get public ip.
|
/// reexport to get public ip.
|
||||||
pub use public_ip;
|
pub use public_ip;
|
||||||
|
|
||||||
/// Identifier for nodes.
|
|
||||||
pub type NodeId = H512;
|
|
||||||
|
|
||||||
/// The default port for discv4 via UDP
|
/// The default port for discv4 via UDP
|
||||||
///
|
///
|
||||||
/// Note: the default TCP port is the same.
|
/// Note: the default TCP port is the same.
|
||||||
@ -140,12 +137,13 @@ impl Discv4 {
|
|||||||
/// use std::str::FromStr;
|
/// use std::str::FromStr;
|
||||||
/// use rand::thread_rng;
|
/// use rand::thread_rng;
|
||||||
/// use secp256k1::SECP256K1;
|
/// use secp256k1::SECP256K1;
|
||||||
/// use reth_discv4::{Discv4, Discv4Config, NodeId, NodeRecord};
|
/// use reth_primitives::PeerId;
|
||||||
|
/// use reth_discv4::{Discv4, Discv4Config, NodeRecord};
|
||||||
/// # async fn t() -> io::Result<()> {
|
/// # async fn t() -> io::Result<()> {
|
||||||
/// // generate a (random) keypair
|
/// // generate a (random) keypair
|
||||||
/// let mut rng = thread_rng();
|
/// let mut rng = thread_rng();
|
||||||
/// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
/// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||||
/// let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
/// let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
///
|
///
|
||||||
/// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
/// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||||
/// let local_enr = NodeRecord {
|
/// let local_enr = NodeRecord {
|
||||||
@ -212,11 +210,11 @@ impl Discv4 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Looks up the given node id
|
/// Looks up the given node id
|
||||||
pub async fn lookup(&self, node_id: NodeId) -> Result<Vec<NodeRecord>, Discv4Error> {
|
pub async fn lookup(&self, node_id: PeerId) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||||
self.lookup_node(Some(node_id)).await
|
self.lookup_node(Some(node_id)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn lookup_node(&self, node_id: Option<NodeId>) -> Result<Vec<NodeRecord>, Discv4Error> {
|
async fn lookup_node(&self, node_id: Option<PeerId>) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) };
|
let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) };
|
||||||
self.to_service.send(cmd).await?;
|
self.to_service.send(cmd).await?;
|
||||||
@ -269,9 +267,9 @@ pub struct Discv4Service {
|
|||||||
/// followup `FindNode` requests.... Buffering them effectively prevents high `Ping` peaks.
|
/// followup `FindNode` requests.... Buffering them effectively prevents high `Ping` peaks.
|
||||||
queued_pings: VecDeque<(NodeRecord, PingReason)>,
|
queued_pings: VecDeque<(NodeRecord, PingReason)>,
|
||||||
/// Currently active pings to specific nodes.
|
/// Currently active pings to specific nodes.
|
||||||
pending_pings: HashMap<NodeId, PingRequest>,
|
pending_pings: HashMap<PeerId, PingRequest>,
|
||||||
/// Currently active FindNode requests
|
/// Currently active FindNode requests
|
||||||
pending_find_nodes: HashMap<NodeId, FindNodeRequest>,
|
pending_find_nodes: HashMap<PeerId, FindNodeRequest>,
|
||||||
/// Commands listener
|
/// Commands listener
|
||||||
commands_rx: Option<mpsc::Receiver<Discv4Command>>,
|
commands_rx: Option<mpsc::Receiver<Discv4Command>>,
|
||||||
/// All subscribers for table updates
|
/// All subscribers for table updates
|
||||||
@ -377,8 +375,8 @@ impl Discv4Service {
|
|||||||
&mut self.local_enr
|
&mut self.local_enr
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the given NodeId is currently in the bucket
|
/// Returns true if the given PeerId is currently in the bucket
|
||||||
pub fn contains_node(&self, id: NodeId) -> bool {
|
pub fn contains_node(&self, id: PeerId) -> bool {
|
||||||
let key = kad_key(id);
|
let key = kad_key(id);
|
||||||
self.kbuckets.get_index(&key).is_some()
|
self.kbuckets.get_index(&key).is_some()
|
||||||
}
|
}
|
||||||
@ -431,7 +429,7 @@ impl Discv4Service {
|
|||||||
//
|
//
|
||||||
// To guard against traffic amplification attacks, Neighbors replies should only be sent if the
|
// To guard against traffic amplification attacks, Neighbors replies should only be sent if the
|
||||||
// sender of FindNode has been verified by the endpoint proof procedure.
|
// sender of FindNode has been verified by the endpoint proof procedure.
|
||||||
pub fn lookup(&mut self, target: NodeId) {
|
pub fn lookup(&mut self, target: PeerId) {
|
||||||
self.lookup_with(target, None)
|
self.lookup_with(target, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -445,7 +443,7 @@ impl Discv4Service {
|
|||||||
/// This takes an optional Sender through which all successfully discovered nodes are sent once
|
/// This takes an optional Sender through which all successfully discovered nodes are sent once
|
||||||
/// the request has finished.
|
/// the request has finished.
|
||||||
#[instrument(skip_all, fields(?target), target = "net::discv4")]
|
#[instrument(skip_all, fields(?target), target = "net::discv4")]
|
||||||
fn lookup_with(&mut self, target: NodeId, tx: Option<NodeRecordSender>) {
|
fn lookup_with(&mut self, target: PeerId, tx: Option<NodeRecordSender>) {
|
||||||
trace!("Starting lookup");
|
trace!("Starting lookup");
|
||||||
let key = kad_key(target);
|
let key = kad_key(target);
|
||||||
|
|
||||||
@ -499,7 +497,7 @@ impl Discv4Service {
|
|||||||
///
|
///
|
||||||
/// This allows applications, for whatever reason, to remove nodes from the local routing
|
/// This allows applications, for whatever reason, to remove nodes from the local routing
|
||||||
/// table. Returns `true` if the node was in the table and `false` otherwise.
|
/// table. Returns `true` if the node was in the table and `false` otherwise.
|
||||||
pub fn remove_node(&mut self, node_id: NodeId) -> bool {
|
pub fn remove_node(&mut self, node_id: PeerId) -> bool {
|
||||||
let key = kad_key(node_id);
|
let key = kad_key(node_id);
|
||||||
let removed = self.kbuckets.remove(&key);
|
let removed = self.kbuckets.remove(&key);
|
||||||
if removed {
|
if removed {
|
||||||
@ -559,7 +557,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Message handler for an incoming `Ping`
|
/// Message handler for an incoming `Ping`
|
||||||
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: NodeId, hash: H256) {
|
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: PeerId, hash: H256) {
|
||||||
// update the record
|
// update the record
|
||||||
let record = NodeRecord {
|
let record = NodeRecord {
|
||||||
address: ping.from.address,
|
address: ping.from.address,
|
||||||
@ -611,7 +609,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Message handler for an incoming `Pong`.
|
/// Message handler for an incoming `Pong`.
|
||||||
fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: NodeId) {
|
fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: PeerId) {
|
||||||
if self.is_expired(pong.expire) {
|
if self.is_expired(pong.expire) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -654,7 +652,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handler for incoming `FindNode` message
|
/// Handler for incoming `FindNode` message
|
||||||
fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: NodeId) {
|
fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: PeerId) {
|
||||||
match self.node_status(node_id, remote_addr) {
|
match self.node_status(node_id, remote_addr) {
|
||||||
NodeEntryStatus::IsLocal => {
|
NodeEntryStatus::IsLocal => {
|
||||||
// received address from self
|
// received address from self
|
||||||
@ -675,7 +673,7 @@ impl Discv4Service {
|
|||||||
|
|
||||||
/// Handler for incoming `Neighbours` messages that are handled if they're responses to
|
/// Handler for incoming `Neighbours` messages that are handled if they're responses to
|
||||||
/// `FindNode` requests
|
/// `FindNode` requests
|
||||||
fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: NodeId) {
|
fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: PeerId) {
|
||||||
// check if this request was expected
|
// check if this request was expected
|
||||||
let ctx = match self.pending_find_nodes.entry(node_id) {
|
let ctx = match self.pending_find_nodes.entry(node_id) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
@ -732,7 +730,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sends a Neighbours packet for `target` to the given addr
|
/// Sends a Neighbours packet for `target` to the given addr
|
||||||
fn respond_closest(&mut self, target: NodeId, to: SocketAddr) {
|
fn respond_closest(&mut self, target: PeerId, to: SocketAddr) {
|
||||||
let key = kad_key(target);
|
let key = kad_key(target);
|
||||||
let expire = self.send_neighbours_timeout();
|
let expire = self.send_neighbours_timeout();
|
||||||
let all_nodes = self.kbuckets.closest_values(&key).collect::<Vec<_>>();
|
let all_nodes = self.kbuckets.closest_values(&key).collect::<Vec<_>>();
|
||||||
@ -746,7 +744,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current status of the node
|
/// Returns the current status of the node
|
||||||
fn node_status(&mut self, node: NodeId, addr: SocketAddr) -> NodeEntryStatus {
|
fn node_status(&mut self, node: PeerId, addr: SocketAddr) -> NodeEntryStatus {
|
||||||
if node == self.local_enr.id {
|
if node == self.local_enr.id {
|
||||||
debug!(?node, target = "net::disc", "Got an incoming discovery request from self");
|
debug!(?node, target = "net::disc", "Got an incoming discovery request from self");
|
||||||
return NodeEntryStatus::IsLocal
|
return NodeEntryStatus::IsLocal
|
||||||
@ -807,7 +805,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Removes the node from the table
|
/// Removes the node from the table
|
||||||
fn expire_node_request(&mut self, node_id: NodeId) {
|
fn expire_node_request(&mut self, node_id: PeerId) {
|
||||||
let key = kad_key(node_id);
|
let key = kad_key(node_id);
|
||||||
self.kbuckets.remove(&key);
|
self.kbuckets.remove(&key);
|
||||||
}
|
}
|
||||||
@ -976,7 +974,7 @@ pub(crate) async fn send_loop(udp: Arc<UdpSocket>, rx: EgressReceiver) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Continuously awaits new incoming messages and sends them back through the channel.
|
/// Continuously awaits new incoming messages and sends them back through the channel.
|
||||||
pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_id: NodeId) {
|
pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_id: PeerId) {
|
||||||
loop {
|
loop {
|
||||||
let mut buf = [0; MAX_PACKET_SIZE];
|
let mut buf = [0; MAX_PACKET_SIZE];
|
||||||
let res = udp.recv_from(&mut buf).await;
|
let res = udp.recv_from(&mut buf).await;
|
||||||
@ -1010,7 +1008,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
|||||||
|
|
||||||
/// The commands sent from the frontend to the service
|
/// The commands sent from the frontend to the service
|
||||||
enum Discv4Command {
|
enum Discv4Command {
|
||||||
Lookup { node_id: Option<NodeId>, tx: Option<NodeRecordSender> },
|
Lookup { node_id: Option<PeerId>, tx: Option<NodeRecordSender> },
|
||||||
Updates(OneshotSender<ReceiverStream<TableUpdate>>),
|
Updates(OneshotSender<ReceiverStream<TableUpdate>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1036,7 +1034,7 @@ struct PingRequest {
|
|||||||
reason: PingReason,
|
reason: PingReason,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rotates the NodeId that is periodically looked up.
|
/// Rotates the PeerId that is periodically looked up.
|
||||||
///
|
///
|
||||||
/// By selecting different targets, the lookups will be seeded with different ALPHA seed nodes.
|
/// By selecting different targets, the lookups will be seeded with different ALPHA seed nodes.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -1066,13 +1064,13 @@ impl Default for LookupTargetRotator {
|
|||||||
|
|
||||||
impl LookupTargetRotator {
|
impl LookupTargetRotator {
|
||||||
/// this will return the next node id to lookup
|
/// this will return the next node id to lookup
|
||||||
fn next(&mut self, local: &NodeId) -> NodeId {
|
fn next(&mut self, local: &PeerId) -> PeerId {
|
||||||
self.counter += 1;
|
self.counter += 1;
|
||||||
self.counter %= self.interval;
|
self.counter %= self.interval;
|
||||||
if self.counter == 0 {
|
if self.counter == 0 {
|
||||||
return *local
|
return *local
|
||||||
}
|
}
|
||||||
NodeId::random()
|
PeerId::random()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1087,7 +1085,7 @@ struct LookupContext {
|
|||||||
impl LookupContext {
|
impl LookupContext {
|
||||||
/// Create new context for a recursive lookup
|
/// Create new context for a recursive lookup
|
||||||
fn new(
|
fn new(
|
||||||
target: NodeId,
|
target: PeerId,
|
||||||
nearest_nodes: impl IntoIterator<Item = (Distance, NodeRecord)>,
|
nearest_nodes: impl IntoIterator<Item = (Distance, NodeRecord)>,
|
||||||
listener: Option<NodeRecordSender>,
|
listener: Option<NodeRecordSender>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -1107,7 +1105,7 @@ impl LookupContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the target of this lookup
|
/// Returns the target of this lookup
|
||||||
fn target(&self) -> NodeId {
|
fn target(&self) -> PeerId {
|
||||||
self.inner.target
|
self.inner.target
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1132,7 +1130,7 @@ impl LookupContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Marks the node as queried
|
/// Marks the node as queried
|
||||||
fn mark_queried(&self, id: NodeId) {
|
fn mark_queried(&self, id: PeerId) {
|
||||||
if let Some((_, node)) =
|
if let Some((_, node)) =
|
||||||
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
||||||
{
|
{
|
||||||
@ -1141,7 +1139,7 @@ impl LookupContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Marks the node as responded
|
/// Marks the node as responded
|
||||||
fn mark_responded(&self, id: NodeId) {
|
fn mark_responded(&self, id: PeerId) {
|
||||||
if let Some((_, node)) =
|
if let Some((_, node)) =
|
||||||
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
||||||
{
|
{
|
||||||
@ -1159,7 +1157,7 @@ impl LookupContext {
|
|||||||
unsafe impl Send for LookupContext {}
|
unsafe impl Send for LookupContext {}
|
||||||
|
|
||||||
struct LookupContextInner {
|
struct LookupContextInner {
|
||||||
target: NodeId,
|
target: PeerId,
|
||||||
/// The closest nodes
|
/// The closest nodes
|
||||||
closest_nodes: RefCell<BTreeMap<Distance, QueryNode>>,
|
closest_nodes: RefCell<BTreeMap<Distance, QueryNode>>,
|
||||||
/// A listener for all the nodes retrieved in this lookup
|
/// A listener for all the nodes retrieved in this lookup
|
||||||
@ -1249,7 +1247,7 @@ enum PingReason {
|
|||||||
///
|
///
|
||||||
/// Once the expected PONG is received, the endpoint proof is complete and the find node can be
|
/// Once the expected PONG is received, the endpoint proof is complete and the find node can be
|
||||||
/// answered.
|
/// answered.
|
||||||
FindNode(NodeId, NodeEntryStatus),
|
FindNode(PeerId, NodeEntryStatus),
|
||||||
/// Part of a lookup to ensure endpoint is proven.
|
/// Part of a lookup to ensure endpoint is proven.
|
||||||
Lookup(NodeRecord, LookupContext),
|
Lookup(NodeRecord, LookupContext),
|
||||||
}
|
}
|
||||||
@ -1260,7 +1258,7 @@ pub enum TableUpdate {
|
|||||||
/// A new node was inserted to the table.
|
/// A new node was inserted to the table.
|
||||||
Added(NodeRecord),
|
Added(NodeRecord),
|
||||||
/// Node that was removed from the table
|
/// Node that was removed from the table
|
||||||
Removed(NodeId),
|
Removed(PeerId),
|
||||||
/// A series of updates
|
/// A series of updates
|
||||||
Batch(Vec<TableUpdate>),
|
Batch(Vec<TableUpdate>),
|
||||||
}
|
}
|
||||||
@ -1276,7 +1274,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_local_rotator() {
|
fn test_local_rotator() {
|
||||||
let id = NodeId::random();
|
let id = PeerId::random();
|
||||||
let mut rotator = LookupTargetRotator::local_only();
|
let mut rotator = LookupTargetRotator::local_only();
|
||||||
assert_eq!(rotator.next(&id), id);
|
assert_eq!(rotator.next(&id), id);
|
||||||
assert_eq!(rotator.next(&id), id);
|
assert_eq!(rotator.next(&id), id);
|
||||||
@ -1284,7 +1282,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rotator() {
|
fn test_rotator() {
|
||||||
let id = NodeId::random();
|
let id = PeerId::random();
|
||||||
let mut rotator = LookupTargetRotator::default();
|
let mut rotator = LookupTargetRotator::default();
|
||||||
assert_eq!(rotator.next(&id), id);
|
assert_eq!(rotator.next(&id), id);
|
||||||
assert_ne!(rotator.next(&id), id);
|
assert_ne!(rotator.next(&id), id);
|
||||||
@ -1301,7 +1299,7 @@ mod tests {
|
|||||||
let local_addr = service.local_addr();
|
let local_addr = service.local_addr();
|
||||||
|
|
||||||
for idx in 0..MAX_NODES_PING {
|
for idx in 0..MAX_NODES_PING {
|
||||||
let node = NodeRecord::new(local_addr, NodeId::random());
|
let node = NodeRecord::new(local_addr, PeerId::random());
|
||||||
service.add_node(node);
|
service.add_node(node);
|
||||||
assert!(service.pending_pings.contains_key(&node.id));
|
assert!(service.pending_pings.contains_key(&node.id));
|
||||||
assert_eq!(service.pending_pings.len(), idx + 1);
|
assert_eq!(service.pending_pings.len(), idx + 1);
|
||||||
|
|||||||
@ -6,7 +6,7 @@ use crate::{
|
|||||||
node::NodeRecord,
|
node::NodeRecord,
|
||||||
proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong},
|
proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong},
|
||||||
receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent,
|
receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent,
|
||||||
IngressReceiver, NodeId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
|
IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
|
||||||
};
|
};
|
||||||
use rand::{thread_rng, Rng, RngCore};
|
use rand::{thread_rng, Rng, RngCore};
|
||||||
use reth_primitives::H256;
|
use reth_primitives::H256;
|
||||||
@ -40,8 +40,8 @@ pub struct MockDiscovery {
|
|||||||
ingress: IngressReceiver,
|
ingress: IngressReceiver,
|
||||||
/// Sender for sending outgoing messages
|
/// Sender for sending outgoing messages
|
||||||
egress: EgressSender,
|
egress: EgressSender,
|
||||||
pending_pongs: HashSet<NodeId>,
|
pending_pongs: HashSet<PeerId>,
|
||||||
pending_neighbours: HashMap<NodeId, Vec<NodeRecord>>,
|
pending_neighbours: HashMap<PeerId, Vec<NodeRecord>>,
|
||||||
command_rx: mpsc::Receiver<MockCommand>,
|
command_rx: mpsc::Receiver<MockCommand>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ impl MockDiscovery {
|
|||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
let socket = Arc::new(UdpSocket::bind(socket).await?);
|
let socket = Arc::new(UdpSocket::bind(socket).await?);
|
||||||
let local_addr = socket.local_addr()?;
|
let local_addr = socket.local_addr()?;
|
||||||
let local_enr = NodeRecord {
|
let local_enr = NodeRecord {
|
||||||
@ -95,12 +95,12 @@ impl MockDiscovery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a pending pong.
|
/// Queue a pending pong.
|
||||||
pub fn queue_pong(&mut self, from: NodeId) {
|
pub fn queue_pong(&mut self, from: PeerId) {
|
||||||
self.pending_pongs.insert(from);
|
self.pending_pongs.insert(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Queue a pending Neighbours response.
|
/// Queue a pending Neighbours response.
|
||||||
pub fn queue_neighbours(&mut self, target: NodeId, nodes: Vec<NodeRecord>) {
|
pub fn queue_neighbours(&mut self, target: PeerId, nodes: Vec<NodeRecord>) {
|
||||||
self.pending_neighbours.insert(target, nodes);
|
self.pending_neighbours.insert(target, nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,8 +195,8 @@ pub enum MockEvent {
|
|||||||
|
|
||||||
/// Command for interacting with the `MockDiscovery` service
|
/// Command for interacting with the `MockDiscovery` service
|
||||||
pub enum MockCommand {
|
pub enum MockCommand {
|
||||||
MockPong { node_id: NodeId },
|
MockPong { node_id: PeerId },
|
||||||
MockNeighbours { target: NodeId, nodes: Vec<NodeRecord> },
|
MockNeighbours { target: PeerId, nodes: Vec<NodeRecord> },
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new testing instance for [`Discv4`] and its service
|
/// Creates a new testing instance for [`Discv4`] and its service
|
||||||
@ -209,7 +209,7 @@ pub async fn create_discv4_with_config(config: Discv4Config) -> (Discv4, Discv4S
|
|||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
let external_addr = public_ip::addr().await.unwrap_or_else(|| socket.ip());
|
let external_addr = public_ip::addr().await.unwrap_or_else(|| socket.ip());
|
||||||
let local_enr =
|
let local_enr =
|
||||||
NodeRecord { address: external_addr, tcp_port: socket.port(), udp_port: socket.port(), id };
|
NodeRecord { address: external_addr, tcp_port: socket.port(), udp_port: socket.port(), id };
|
||||||
@ -231,21 +231,21 @@ pub fn rng_endpoint(rng: &mut impl Rng) -> NodeEndpoint {
|
|||||||
|
|
||||||
pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord {
|
pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||||
let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng);
|
let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng);
|
||||||
NodeRecord { address, tcp_port, udp_port, id: NodeId::random() }
|
NodeRecord { address, tcp_port, udp_port, id: PeerId::random() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord {
|
pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||||
let mut ip = [0u8; 16];
|
let mut ip = [0u8; 16];
|
||||||
rng.fill_bytes(&mut ip);
|
rng.fill_bytes(&mut ip);
|
||||||
let address = IpAddr::V6(ip.into());
|
let address = IpAddr::V6(ip.into());
|
||||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() }
|
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord {
|
pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||||
let mut ip = [0u8; 4];
|
let mut ip = [0u8; 4];
|
||||||
rng.fill_bytes(&mut ip);
|
rng.fill_bytes(&mut ip);
|
||||||
let address = IpAddr::V4(ip.into());
|
let address = IpAddr::V4(ip.into());
|
||||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() }
|
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
||||||
@ -256,7 +256,7 @@ pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
|||||||
expire: rng.gen(),
|
expire: rng.gen(),
|
||||||
}),
|
}),
|
||||||
2 => Message::Pong(Pong { to: rng_endpoint(rng), echo: H256::random(), expire: rng.gen() }),
|
2 => Message::Pong(Pong { to: rng_endpoint(rng), echo: H256::random(), expire: rng.gen() }),
|
||||||
3 => Message::FindNode(FindNode { id: NodeId::random(), expire: rng.gen() }),
|
3 => Message::FindNode(FindNode { id: PeerId::random(), expire: rng.gen() }),
|
||||||
4 => {
|
4 => {
|
||||||
let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS);
|
let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS);
|
||||||
Message::Neighbours(Neighbours {
|
Message::Neighbours(Neighbours {
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
use crate::{proto::Octets, NodeId};
|
use crate::{proto::Octets, PeerId};
|
||||||
use bytes::{Buf, BufMut};
|
use bytes::{Buf, BufMut};
|
||||||
use generic_array::GenericArray;
|
use generic_array::GenericArray;
|
||||||
use reth_primitives::keccak256;
|
use reth_primitives::keccak256;
|
||||||
@ -13,10 +13,10 @@ use url::{Host, Url};
|
|||||||
|
|
||||||
/// The key type for the table.
|
/// The key type for the table.
|
||||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
pub(crate) struct NodeKey(pub(crate) NodeId);
|
pub(crate) struct NodeKey(pub(crate) PeerId);
|
||||||
|
|
||||||
impl From<NodeId> for NodeKey {
|
impl From<PeerId> for NodeKey {
|
||||||
fn from(value: NodeId) -> Self {
|
fn from(value: PeerId) -> Self {
|
||||||
NodeKey(value)
|
NodeKey(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -29,9 +29,9 @@ impl From<NodeKey> for discv5::Key<NodeKey> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a `NodeId` into the required `Key` type for the table
|
/// Converts a `PeerId` into the required `Key` type for the table
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn kad_key(node: NodeId) -> discv5::Key<NodeKey> {
|
pub(crate) fn kad_key(node: PeerId) -> discv5::Key<NodeKey> {
|
||||||
discv5::kbucket::Key::from(NodeKey::from(node))
|
discv5::kbucket::Key::from(NodeKey::from(node))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,20 +45,20 @@ pub struct NodeRecord {
|
|||||||
/// UDP discovery port.
|
/// UDP discovery port.
|
||||||
pub udp_port: u16,
|
pub udp_port: u16,
|
||||||
/// Public key of the discovery service
|
/// Public key of the discovery service
|
||||||
pub id: NodeId,
|
pub id: PeerId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NodeRecord {
|
impl NodeRecord {
|
||||||
/// Derive the [`NodeRecord`] from the secret key and addr
|
/// Derive the [`NodeRecord`] from the secret key and addr
|
||||||
pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self {
|
pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self {
|
||||||
let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk);
|
let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk);
|
||||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
Self::new(addr, id)
|
Self::new(addr, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new record
|
/// Creates a new record
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub(crate) fn new(addr: SocketAddr, id: NodeId) -> Self {
|
pub(crate) fn new(addr: SocketAddr, id: PeerId) -> Self {
|
||||||
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
|
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ impl FromStr for NodeRecord {
|
|||||||
|
|
||||||
let id = url
|
let id = url
|
||||||
.username()
|
.username()
|
||||||
.parse::<NodeId>()
|
.parse::<PeerId>()
|
||||||
.map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?;
|
.map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?;
|
||||||
|
|
||||||
Ok(Self { address, id, tcp_port: port, udp_port: port })
|
Ok(Self { address, id, tcp_port: port, udp_port: port })
|
||||||
@ -126,7 +126,7 @@ impl Encodable for NodeRecord {
|
|||||||
octets: Octets,
|
octets: Octets,
|
||||||
udp_port: u16,
|
udp_port: u16,
|
||||||
tcp_port: u16,
|
tcp_port: u16,
|
||||||
id: NodeId,
|
id: PeerId,
|
||||||
}
|
}
|
||||||
|
|
||||||
let octets = match self.address {
|
let octets = match self.address {
|
||||||
@ -185,7 +185,7 @@ mod tests {
|
|||||||
address: IpAddr::V4(ip.into()),
|
address: IpAddr::V4(ip.into()),
|
||||||
tcp_port: rng.gen(),
|
tcp_port: rng.gen(),
|
||||||
udp_port: rng.gen(),
|
udp_port: rng.gen(),
|
||||||
id: NodeId::random(),
|
id: PeerId::random(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
@ -206,7 +206,7 @@ mod tests {
|
|||||||
address: IpAddr::V6(ip.into()),
|
address: IpAddr::V6(ip.into()),
|
||||||
tcp_port: rng.gen(),
|
tcp_port: rng.gen(),
|
||||||
udp_port: rng.gen(),
|
udp_port: rng.gen(),
|
||||||
id: NodeId::random(),
|
id: PeerId::random(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
#![allow(missing_docs)]
|
#![allow(missing_docs)]
|
||||||
|
|
||||||
use crate::{error::DecodePacketError, node::NodeRecord, NodeId, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
|
use crate::{error::DecodePacketError, node::NodeRecord, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
use reth_primitives::{keccak256, H256};
|
use reth_primitives::{keccak256, H256};
|
||||||
use reth_rlp::{Decodable, DecodeError, Encodable, Header};
|
use reth_rlp::{Decodable, DecodeError, Encodable, Header};
|
||||||
@ -136,7 +136,7 @@ impl Message {
|
|||||||
let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_bytes())?;
|
let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_bytes())?;
|
||||||
|
|
||||||
let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?;
|
let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?;
|
||||||
let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
|
|
||||||
let msg_type = packet[97];
|
let msg_type = packet[97];
|
||||||
let payload = &mut &packet[98..];
|
let payload = &mut &packet[98..];
|
||||||
@ -156,7 +156,7 @@ impl Message {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Packet {
|
pub struct Packet {
|
||||||
pub msg: Message,
|
pub msg: Message,
|
||||||
pub node_id: NodeId,
|
pub node_id: PeerId,
|
||||||
pub hash: H256,
|
pub hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ impl From<NodeRecord> for NodeEndpoint {
|
|||||||
/// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03).).
|
/// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03).).
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct FindNode {
|
pub struct FindNode {
|
||||||
pub id: NodeId,
|
pub id: PeerId,
|
||||||
pub expire: u64,
|
pub expire: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,7 +499,7 @@ mod tests {
|
|||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
let msg = rng_message(&mut rng);
|
let msg = rng_message(&mut rng);
|
||||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||||
let sender_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let sender_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
|
|
||||||
let (buf, _) = msg.encode(&secret_key);
|
let (buf, _) = msg.encode(&secret_key);
|
||||||
|
|
||||||
|
|||||||
@ -17,6 +17,9 @@ reth-ecies = { path = "../ecies" }
|
|||||||
reth-primitives = { path = "../../primitives" }
|
reth-primitives = { path = "../../primitives" }
|
||||||
reth-rlp = { path = "../../common/rlp", features = ["alloc", "derive", "std", "ethereum-types", "smol_str"] }
|
reth-rlp = { path = "../../common/rlp", features = ["alloc", "derive", "std", "ethereum-types", "smol_str"] }
|
||||||
|
|
||||||
|
# used for Chain and builders
|
||||||
|
ethers-core = { git = "https://github.com/gakonst/ethers-rs", default-features = false }
|
||||||
|
|
||||||
#used for forkid
|
#used for forkid
|
||||||
crc = "1"
|
crc = "1"
|
||||||
maplit = "1"
|
maplit = "1"
|
||||||
|
|||||||
145
crates/net/eth-wire/src/builder.rs
Normal file
145
crates/net/eth-wire/src/builder.rs
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
//! Builder structs for [`Status`](crate::types::Status) and [`Hello`](crate::types::Hello)
|
||||||
|
//! messages.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
capability::Capability,
|
||||||
|
p2pstream::{HelloMessage, ProtocolVersion},
|
||||||
|
EthVersion, Status,
|
||||||
|
};
|
||||||
|
use reth_primitives::{Chain, ForkId, PeerId, H256, U256};
|
||||||
|
|
||||||
|
/// Builder for [`Status`](crate::types::Status) messages.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// use reth_eth_wire::EthVersion;
|
||||||
|
/// use reth_primitives::{Chain, U256, H256, MAINNET_GENESIS, Hardfork};
|
||||||
|
/// use reth_eth_wire::types::Status;
|
||||||
|
///
|
||||||
|
/// // this is just an example status message!
|
||||||
|
/// let status = Status::builder()
|
||||||
|
/// .version(EthVersion::Eth66.into())
|
||||||
|
/// .chain(Chain::Named(ethers_core::types::Chain::Mainnet))
|
||||||
|
/// .total_difficulty(U256::from(100))
|
||||||
|
/// .blockhash(H256::from(MAINNET_GENESIS))
|
||||||
|
/// .genesis(H256::from(MAINNET_GENESIS))
|
||||||
|
/// .forkid(Hardfork::Latest.fork_id())
|
||||||
|
/// .build();
|
||||||
|
///
|
||||||
|
/// assert_eq!(
|
||||||
|
/// status,
|
||||||
|
/// Status {
|
||||||
|
/// version: EthVersion::Eth66.into(),
|
||||||
|
/// chain: Chain::Named(ethers_core::types::Chain::Mainnet),
|
||||||
|
/// total_difficulty: U256::from(100),
|
||||||
|
/// blockhash: H256::from(MAINNET_GENESIS),
|
||||||
|
/// genesis: H256::from(MAINNET_GENESIS),
|
||||||
|
/// forkid: Hardfork::Latest.fork_id(),
|
||||||
|
/// }
|
||||||
|
/// );
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct StatusBuilder {
|
||||||
|
status: Status,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StatusBuilder {
|
||||||
|
/// Consumes the type and creates the actual [`Status`](crate::types::Status) message.
|
||||||
|
pub fn build(self) -> Status {
|
||||||
|
self.status
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the protocol version.
|
||||||
|
pub fn version(mut self, version: u8) -> Self {
|
||||||
|
self.status.version = version;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the chain id.
|
||||||
|
pub fn chain(mut self, chain: Chain) -> Self {
|
||||||
|
self.status.chain = chain;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the total difficulty.
|
||||||
|
pub fn total_difficulty(mut self, total_difficulty: U256) -> Self {
|
||||||
|
self.status.total_difficulty = total_difficulty;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the block hash.
|
||||||
|
pub fn blockhash(mut self, blockhash: H256) -> Self {
|
||||||
|
self.status.blockhash = blockhash;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the genesis hash.
|
||||||
|
pub fn genesis(mut self, genesis: H256) -> Self {
|
||||||
|
self.status.genesis = genesis;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the fork id.
|
||||||
|
pub fn forkid(mut self, forkid: ForkId) -> Self {
|
||||||
|
self.status.forkid = forkid;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builder for [`Hello`](crate::types::Hello) messages.
|
||||||
|
pub struct HelloBuilder {
|
||||||
|
hello: HelloMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HelloBuilder {
|
||||||
|
/// Creates a new [`HelloBuilder`](crate::builder::HelloBuilder) with default [`Hello`] values,
|
||||||
|
/// and a `PeerId` corresponding to the given pubkey.
|
||||||
|
pub fn new(pubkey: PeerId) -> Self {
|
||||||
|
Self {
|
||||||
|
hello: HelloMessage {
|
||||||
|
protocol_version: ProtocolVersion::V5,
|
||||||
|
// TODO: proper client versioning
|
||||||
|
client_version: "Ethereum/1.0.0".to_string(),
|
||||||
|
capabilities: vec![EthVersion::Eth67.into()],
|
||||||
|
// TODO: default port config
|
||||||
|
port: 30303,
|
||||||
|
id: pubkey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consumes the type and creates the actual [`Hello`](crate::types::Hello) message.
|
||||||
|
pub fn build(self) -> HelloMessage {
|
||||||
|
self.hello
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the protocol version.
|
||||||
|
pub fn protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {
|
||||||
|
self.hello.protocol_version = protocol_version;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the client version.
|
||||||
|
pub fn client_version(mut self, client_version: String) -> Self {
|
||||||
|
self.hello.client_version = client_version;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the capabilities.
|
||||||
|
pub fn capabilities(mut self, capabilities: Vec<Capability>) -> Self {
|
||||||
|
self.hello.capabilities = capabilities;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the port.
|
||||||
|
pub fn port(mut self, port: u16) -> Self {
|
||||||
|
self.hello.port = port;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the node id.
|
||||||
|
pub fn id(mut self, id: PeerId) -> Self {
|
||||||
|
self.hello.id = id;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -91,6 +91,16 @@ impl Capabilities {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Vec<Capability>> for Capabilities {
|
||||||
|
fn from(value: Vec<Capability>) -> Self {
|
||||||
|
Self {
|
||||||
|
eth_66: value.iter().any(Capability::is_eth_v66),
|
||||||
|
eth_67: value.iter().any(Capability::is_eth_v67),
|
||||||
|
inner: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Encodable for Capabilities {
|
impl Encodable for Capabilities {
|
||||||
fn encode(&self, out: &mut dyn BufMut) {
|
fn encode(&self, out: &mut dyn BufMut) {
|
||||||
self.inner.encode(out)
|
self.inner.encode(out)
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
//! Error cases when handling a [`crate::EthStream`]
|
//! Error cases when handling a [`crate::EthStream`]
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use reth_primitives::{Chain, H256};
|
use reth_primitives::{Chain, ValidationError, H256};
|
||||||
|
|
||||||
use crate::{capability::SharedCapabilityError, types::forkid::ValidationError};
|
use crate::capability::SharedCapabilityError;
|
||||||
|
|
||||||
/// Errors when sending/receiving messages
|
/// Errors when sending/receiving messages
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
|||||||
@ -1,10 +1,11 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
error::{EthStreamError, HandshakeError},
|
error::{EthStreamError, HandshakeError},
|
||||||
types::{forkid::ForkFilter, EthMessage, ProtocolMessage, Status},
|
types::{EthMessage, ProtocolMessage, Status},
|
||||||
};
|
};
|
||||||
use bytes::{Bytes, BytesMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use futures::{ready, Sink, SinkExt, StreamExt};
|
use futures::{ready, Sink, SinkExt, StreamExt};
|
||||||
use pin_project::pin_project;
|
use pin_project::pin_project;
|
||||||
|
use reth_primitives::ForkFilter;
|
||||||
use reth_rlp::{Decodable, Encodable};
|
use reth_rlp::{Decodable, Encodable};
|
||||||
use std::{
|
use std::{
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
@ -117,6 +118,7 @@ where
|
|||||||
/// An `EthStream` wraps over any `Stream` that yields bytes and makes it
|
/// An `EthStream` wraps over any `Stream` that yields bytes and makes it
|
||||||
/// compatible with eth-networking protocol messages, which get RLP encoded/decoded.
|
/// compatible with eth-networking protocol messages, which get RLP encoded/decoded.
|
||||||
#[pin_project]
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct EthStream<S> {
|
pub struct EthStream<S> {
|
||||||
#[pin]
|
#[pin]
|
||||||
inner: S,
|
inner: S,
|
||||||
@ -203,7 +205,7 @@ where
|
|||||||
mod tests {
|
mod tests {
|
||||||
use crate::{
|
use crate::{
|
||||||
p2pstream::{HelloMessage, ProtocolVersion, UnauthedP2PStream},
|
p2pstream::{HelloMessage, ProtocolVersion, UnauthedP2PStream},
|
||||||
types::{broadcast::BlockHashNumber, forkid::ForkFilter, EthMessage, Status},
|
types::{broadcast::BlockHashNumber, EthMessage, Status},
|
||||||
EthStream, PassthroughCodec,
|
EthStream, PassthroughCodec,
|
||||||
};
|
};
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
@ -214,7 +216,7 @@ mod tests {
|
|||||||
|
|
||||||
use crate::{capability::Capability, types::EthVersion};
|
use crate::{capability::Capability, types::EthVersion};
|
||||||
use ethers_core::types::Chain;
|
use ethers_core::types::Chain;
|
||||||
use reth_primitives::{H256, U256};
|
use reth_primitives::{ForkFilter, H256, U256};
|
||||||
|
|
||||||
use super::UnauthedEthStream;
|
use super::UnauthedEthStream;
|
||||||
|
|
||||||
|
|||||||
@ -11,12 +11,17 @@
|
|||||||
pub use tokio_util::codec::{
|
pub use tokio_util::codec::{
|
||||||
LengthDelimitedCodec as PassthroughCodec, LengthDelimitedCodecError as PassthroughCodecError,
|
LengthDelimitedCodec as PassthroughCodec, LengthDelimitedCodecError as PassthroughCodecError,
|
||||||
};
|
};
|
||||||
|
pub mod builder;
|
||||||
pub mod capability;
|
pub mod capability;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
mod ethstream;
|
mod ethstream;
|
||||||
mod p2pstream;
|
mod p2pstream;
|
||||||
mod pinger;
|
mod pinger;
|
||||||
|
pub use builder::*;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
|
|
||||||
pub use ethstream::{EthStream, UnauthedEthStream};
|
pub use crate::{
|
||||||
|
ethstream::{EthStream, UnauthedEthStream},
|
||||||
|
p2pstream::{HelloMessage, P2PStream, UnauthedP2PStream},
|
||||||
|
};
|
||||||
|
|||||||
@ -138,6 +138,7 @@ where
|
|||||||
/// A P2PStream wraps over any `Stream` that yields bytes and makes it compatible with `p2p`
|
/// A P2PStream wraps over any `Stream` that yields bytes and makes it compatible with `p2p`
|
||||||
/// protocol messages.
|
/// protocol messages.
|
||||||
#[pin_project]
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct P2PStream<S> {
|
pub struct P2PStream<S> {
|
||||||
#[pin]
|
#[pin]
|
||||||
inner: S,
|
inner: S,
|
||||||
|
|||||||
@ -6,8 +6,6 @@ pub use status::Status;
|
|||||||
pub mod version;
|
pub mod version;
|
||||||
pub use version::EthVersion;
|
pub use version::EthVersion;
|
||||||
|
|
||||||
pub mod forkid;
|
|
||||||
|
|
||||||
pub mod message;
|
pub mod message;
|
||||||
pub use message::{EthMessage, EthMessageID, ProtocolMessage};
|
pub use message::{EthMessage, EthMessageID, ProtocolMessage};
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
use super::forkid::ForkId;
|
use crate::{EthVersion, StatusBuilder};
|
||||||
use reth_primitives::{Chain, H256, U256};
|
|
||||||
|
use reth_primitives::{Chain, ForkId, Hardfork, H256, MAINNET_GENESIS, U256};
|
||||||
use reth_rlp::{RlpDecodable, RlpEncodable};
|
use reth_rlp::{RlpDecodable, RlpEncodable};
|
||||||
use std::fmt::{Debug, Display};
|
use std::fmt::{Debug, Display};
|
||||||
|
|
||||||
@ -37,6 +38,13 @@ pub struct Status {
|
|||||||
pub forkid: ForkId,
|
pub forkid: ForkId,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Status {
|
||||||
|
/// Helper for returning a builder for the status message.
|
||||||
|
pub fn builder() -> StatusBuilder {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Display for Status {
|
impl Display for Status {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let hexed_blockhash = hex::encode(self.blockhash);
|
let hexed_blockhash = hex::encode(self.blockhash);
|
||||||
@ -84,18 +92,28 @@ impl Debug for Status {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Status {
|
||||||
|
fn default() -> Self {
|
||||||
|
Status {
|
||||||
|
version: EthVersion::Eth67 as u8,
|
||||||
|
chain: Chain::Named(ethers_core::types::Chain::Mainnet),
|
||||||
|
total_difficulty: U256::zero(),
|
||||||
|
blockhash: MAINNET_GENESIS,
|
||||||
|
genesis: MAINNET_GENESIS,
|
||||||
|
forkid: Hardfork::Homestead.fork_id(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use ethers_core::types::Chain as NamedChain;
|
use ethers_core::types::Chain as NamedChain;
|
||||||
use hex_literal::hex;
|
use hex_literal::hex;
|
||||||
use reth_primitives::{Chain, H256, U256};
|
use reth_primitives::{Chain, ForkHash, ForkId, H256, U256};
|
||||||
use reth_rlp::{Decodable, Encodable};
|
use reth_rlp::{Decodable, Encodable};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use crate::types::{
|
use crate::types::{EthVersion, Status};
|
||||||
forkid::{ForkHash, ForkId},
|
|
||||||
EthVersion, Status,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn encode_eth_status_message() {
|
fn encode_eth_status_message() {
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
use crate::{peers::PeersConfig, session::SessionsConfig};
|
use crate::{peers::PeersConfig, session::SessionsConfig};
|
||||||
use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT};
|
use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT};
|
||||||
use reth_eth_wire::forkid::ForkId;
|
use reth_primitives::{Chain, ForkId, H256};
|
||||||
use reth_primitives::{Chain, H256};
|
|
||||||
use secp256k1::SecretKey;
|
use secp256k1::SecretKey;
|
||||||
use std::{
|
use std::{
|
||||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||||
@ -76,8 +75,12 @@ pub struct NetworkConfigBuilder<C> {
|
|||||||
peers_config: Option<PeersConfig>,
|
peers_config: Option<PeersConfig>,
|
||||||
/// How to configure the sessions manager
|
/// How to configure the sessions manager
|
||||||
sessions_config: Option<SessionsConfig>,
|
sessions_config: Option<SessionsConfig>,
|
||||||
|
/// A fork identifier as defined by EIP-2124.
|
||||||
|
/// Serves as the chain compatibility identifier.
|
||||||
fork_id: Option<ForkId>,
|
fork_id: Option<ForkId>,
|
||||||
|
/// The network's chain id
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
|
/// Network genesis hash
|
||||||
genesis_hash: H256,
|
genesis_hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
//! Discovery support for the network.
|
//! Discovery support for the network.
|
||||||
|
|
||||||
use crate::{error::NetworkError, NodeId};
|
use crate::error::NetworkError;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use reth_discv4::{Discv4, Discv4Config, NodeRecord, TableUpdate};
|
use reth_discv4::{Discv4, Discv4Config, NodeRecord, TableUpdate};
|
||||||
|
use reth_primitives::PeerId;
|
||||||
use secp256k1::SecretKey;
|
use secp256k1::SecretKey;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, HashMap, VecDeque},
|
collections::{hash_map::Entry, HashMap, VecDeque},
|
||||||
@ -19,7 +20,7 @@ pub struct Discovery {
|
|||||||
/// All nodes discovered via discovery protocol.
|
/// All nodes discovered via discovery protocol.
|
||||||
///
|
///
|
||||||
/// These nodes can be ephemeral and are updated via the discovery protocol.
|
/// These nodes can be ephemeral and are updated via the discovery protocol.
|
||||||
discovered_nodes: HashMap<NodeId, SocketAddr>,
|
discovered_nodes: HashMap<PeerId, SocketAddr>,
|
||||||
/// Local ENR of the discovery service.
|
/// Local ENR of the discovery service.
|
||||||
local_enr: NodeRecord,
|
local_enr: NodeRecord,
|
||||||
/// Handler to interact with the Discovery v4 service
|
/// Handler to interact with the Discovery v4 service
|
||||||
@ -66,12 +67,12 @@ impl Discovery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the id with which the local identifies itself in the network
|
/// Returns the id with which the local identifies itself in the network
|
||||||
pub(crate) fn local_id(&self) -> NodeId {
|
pub(crate) fn local_id(&self) -> PeerId {
|
||||||
self.local_enr.id
|
self.local_enr.id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Manually adds an address to the set.
|
/// Manually adds an address to the set.
|
||||||
pub(crate) fn add_known_address(&mut self, node_id: NodeId, addr: SocketAddr) {
|
pub(crate) fn add_known_address(&mut self, node_id: PeerId, addr: SocketAddr) {
|
||||||
self.on_discv4_update(TableUpdate::Added(NodeRecord {
|
self.on_discv4_update(TableUpdate::Added(NodeRecord {
|
||||||
address: addr.ip(),
|
address: addr.ip(),
|
||||||
tcp_port: addr.port(),
|
tcp_port: addr.port(),
|
||||||
@ -81,7 +82,7 @@ impl Discovery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns all nodes we know exist in the network.
|
/// Returns all nodes we know exist in the network.
|
||||||
pub fn known_nodes(&mut self) -> &HashMap<NodeId, SocketAddr> {
|
pub fn known_nodes(&mut self) -> &HashMap<PeerId, SocketAddr> {
|
||||||
&self.discovered_nodes
|
&self.discovered_nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,7 +132,7 @@ impl Discovery {
|
|||||||
/// Events produced by the [`Discovery`] manager.
|
/// Events produced by the [`Discovery`] manager.
|
||||||
pub enum DiscoveryEvent {
|
pub enum DiscoveryEvent {
|
||||||
/// A new node was discovered
|
/// A new node was discovered
|
||||||
Discovered(NodeId, SocketAddr),
|
Discovered(PeerId, SocketAddr),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@ -1,10 +1,10 @@
|
|||||||
//! Fetch data from the network.
|
//! Fetch data from the network.
|
||||||
|
|
||||||
use crate::{message::BlockRequest, NodeId};
|
use crate::message::BlockRequest;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use reth_eth_wire::{BlockBody, EthMessage};
|
use reth_eth_wire::{BlockBody, EthMessage};
|
||||||
use reth_interfaces::p2p::{error::RequestResult, headers::client::HeadersRequest};
|
use reth_interfaces::p2p::{error::RequestResult, headers::client::HeadersRequest};
|
||||||
use reth_primitives::{Header, H256, U256};
|
use reth_primitives::{Header, PeerId, H256, U256};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, VecDeque},
|
collections::{HashMap, VecDeque},
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
@ -19,9 +19,9 @@ use tokio_stream::wrappers::UnboundedReceiverStream;
|
|||||||
/// peers and sends the response once ready.
|
/// peers and sends the response once ready.
|
||||||
pub struct StateFetcher {
|
pub struct StateFetcher {
|
||||||
/// Currently active [`GetBlockHeaders`] requests
|
/// Currently active [`GetBlockHeaders`] requests
|
||||||
inflight_headers_requests: HashMap<NodeId, Request<HeadersRequest, RequestResult<Vec<Header>>>>,
|
inflight_headers_requests: HashMap<PeerId, Request<HeadersRequest, RequestResult<Vec<Header>>>>,
|
||||||
/// The list of available peers for requests.
|
/// The list of available peers for requests.
|
||||||
peers: HashMap<NodeId, Peer>,
|
peers: HashMap<PeerId, Peer>,
|
||||||
/// Requests queued for processing
|
/// Requests queued for processing
|
||||||
queued_requests: VecDeque<DownloadRequest>,
|
queued_requests: VecDeque<DownloadRequest>,
|
||||||
/// Receiver for new incoming download requests
|
/// Receiver for new incoming download requests
|
||||||
@ -34,13 +34,13 @@ pub struct StateFetcher {
|
|||||||
|
|
||||||
impl StateFetcher {
|
impl StateFetcher {
|
||||||
/// Invoked when connected to a new peer.
|
/// Invoked when connected to a new peer.
|
||||||
pub(crate) fn new_connected_peer(&mut self, _node_id: NodeId, _best_hash: H256) {}
|
pub(crate) fn new_connected_peer(&mut self, _node_id: PeerId, _best_hash: H256) {}
|
||||||
|
|
||||||
/// Invoked when an active session was closed.
|
/// Invoked when an active session was closed.
|
||||||
pub(crate) fn on_session_closed(&mut self, _peer: &NodeId) {}
|
pub(crate) fn on_session_closed(&mut self, _peer: &PeerId) {}
|
||||||
|
|
||||||
/// Invoked when an active session is about to be disconnected.
|
/// Invoked when an active session is about to be disconnected.
|
||||||
pub(crate) fn on_pending_disconnect(&mut self, _peer: &NodeId) {}
|
pub(crate) fn on_pending_disconnect(&mut self, _peer: &PeerId) {}
|
||||||
|
|
||||||
/// Returns the next action to return
|
/// Returns the next action to return
|
||||||
fn poll_action(&mut self) -> Option<FetchAction> {
|
fn poll_action(&mut self) -> Option<FetchAction> {
|
||||||
@ -94,7 +94,7 @@ impl StateFetcher {
|
|||||||
/// Called on a `GetBlockHeaders` response from a peer
|
/// Called on a `GetBlockHeaders` response from a peer
|
||||||
pub(crate) fn on_block_headers_response(
|
pub(crate) fn on_block_headers_response(
|
||||||
&mut self,
|
&mut self,
|
||||||
_peer: NodeId,
|
_peer: PeerId,
|
||||||
_res: RequestResult<Vec<Header>>,
|
_res: RequestResult<Vec<Header>>,
|
||||||
) -> Option<BlockResponseOutcome> {
|
) -> Option<BlockResponseOutcome> {
|
||||||
None
|
None
|
||||||
@ -103,7 +103,7 @@ impl StateFetcher {
|
|||||||
/// Called on a `GetBlockBodies` response from a peer
|
/// Called on a `GetBlockBodies` response from a peer
|
||||||
pub(crate) fn on_block_bodies_response(
|
pub(crate) fn on_block_bodies_response(
|
||||||
&mut self,
|
&mut self,
|
||||||
_peer: NodeId,
|
_peer: PeerId,
|
||||||
_res: RequestResult<Vec<BlockBody>>,
|
_res: RequestResult<Vec<BlockBody>>,
|
||||||
) -> Option<BlockResponseOutcome> {
|
) -> Option<BlockResponseOutcome> {
|
||||||
None
|
None
|
||||||
@ -189,7 +189,7 @@ enum DownloadRequest {
|
|||||||
pub(crate) enum FetchAction {
|
pub(crate) enum FetchAction {
|
||||||
/// Dispatch an eth request to the given peer.
|
/// Dispatch an eth request to the given peer.
|
||||||
EthRequest {
|
EthRequest {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// The request to send
|
/// The request to send
|
||||||
request: EthMessage,
|
request: EthMessage,
|
||||||
},
|
},
|
||||||
@ -201,8 +201,8 @@ pub(crate) enum FetchAction {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum BlockResponseOutcome {
|
pub(crate) enum BlockResponseOutcome {
|
||||||
/// Continue with another request to the peer.
|
/// Continue with another request to the peer.
|
||||||
Request(NodeId, BlockRequest),
|
Request(PeerId, BlockRequest),
|
||||||
/// How to handle a bad response
|
/// How to handle a bad response
|
||||||
// TODO this should include some form of reputation change
|
// TODO this should include some form of reputation change
|
||||||
BadResponse(NodeId),
|
BadResponse(PeerId),
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,7 +5,7 @@
|
|||||||
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
|
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
|
||||||
))]
|
))]
|
||||||
// TODO remove later
|
// TODO remove later
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code, clippy::too_many_arguments)]
|
||||||
|
|
||||||
//! reth P2P networking.
|
//! reth P2P networking.
|
||||||
//!
|
//!
|
||||||
@ -29,9 +29,6 @@ mod state;
|
|||||||
mod swarm;
|
mod swarm;
|
||||||
mod transactions;
|
mod transactions;
|
||||||
|
|
||||||
/// Identifier for a unique node
|
|
||||||
pub type NodeId = reth_discv4::NodeId;
|
|
||||||
|
|
||||||
pub use config::NetworkConfig;
|
pub use config::NetworkConfig;
|
||||||
pub use manager::NetworkManager;
|
pub use manager::NetworkManager;
|
||||||
pub use network::NetworkHandle;
|
pub use network::NetworkHandle;
|
||||||
|
|||||||
@ -25,7 +25,6 @@ use crate::{
|
|||||||
session::SessionManager,
|
session::SessionManager,
|
||||||
state::NetworkState,
|
state::NetworkState,
|
||||||
swarm::{Swarm, SwarmEvent},
|
swarm::{Swarm, SwarmEvent},
|
||||||
NodeId,
|
|
||||||
};
|
};
|
||||||
use futures::{Future, StreamExt};
|
use futures::{Future, StreamExt};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@ -34,6 +33,7 @@ use reth_eth_wire::{
|
|||||||
EthMessage,
|
EthMessage,
|
||||||
};
|
};
|
||||||
use reth_interfaces::provider::BlockProvider;
|
use reth_interfaces::provider::BlockProvider;
|
||||||
|
use reth_primitives::PeerId;
|
||||||
use std::{
|
use std::{
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
@ -88,8 +88,8 @@ pub struct NetworkManager<C> {
|
|||||||
/// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`]
|
/// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`]
|
||||||
/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
|
/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
|
||||||
num_active_peers: Arc<AtomicUsize>,
|
num_active_peers: Arc<AtomicUsize>,
|
||||||
/// Local copy of the `NodeId` of the local node.
|
/// Local copy of the `PeerId` of the local node.
|
||||||
local_node_id: NodeId,
|
local_node_id: PeerId,
|
||||||
}
|
}
|
||||||
|
|
||||||
// === impl NetworkManager ===
|
// === impl NetworkManager ===
|
||||||
@ -163,7 +163,7 @@ where
|
|||||||
/// Event hook for an unexpected message from the peer.
|
/// Event hook for an unexpected message from the peer.
|
||||||
fn on_invalid_message(
|
fn on_invalid_message(
|
||||||
&self,
|
&self,
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
_capabilities: Arc<Capabilities>,
|
_capabilities: Arc<Capabilities>,
|
||||||
_message: CapabilityMessage,
|
_message: CapabilityMessage,
|
||||||
) {
|
) {
|
||||||
@ -172,7 +172,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handles a received [`CapabilityMessage`] from the peer.
|
/// Handles a received [`CapabilityMessage`] from the peer.
|
||||||
fn on_capability_message(&mut self, _node_id: NodeId, msg: CapabilityMessage) {
|
fn on_capability_message(&mut self, _node_id: PeerId, msg: CapabilityMessage) {
|
||||||
match msg {
|
match msg {
|
||||||
CapabilityMessage::Eth(eth) => {
|
CapabilityMessage::Eth(eth) => {
|
||||||
match eth {
|
match eth {
|
||||||
@ -299,7 +299,7 @@ where
|
|||||||
/// Events emitted by the network that are of interest for subscribers.
|
/// Events emitted by the network that are of interest for subscribers.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum NetworkEvent {
|
pub enum NetworkEvent {
|
||||||
EthMessage { node_id: NodeId, message: EthMessage },
|
EthMessage { node_id: PeerId, message: EthMessage },
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bundles all listeners for [`NetworkEvent`]s.
|
/// Bundles all listeners for [`NetworkEvent`]s.
|
||||||
|
|||||||
@ -11,10 +11,9 @@ use reth_eth_wire::{
|
|||||||
};
|
};
|
||||||
use std::task::{ready, Context, Poll};
|
use std::task::{ready, Context, Poll};
|
||||||
|
|
||||||
use crate::NodeId;
|
|
||||||
use reth_eth_wire::capability::CapabilityMessage;
|
use reth_eth_wire::capability::CapabilityMessage;
|
||||||
use reth_interfaces::p2p::error::RequestResult;
|
use reth_interfaces::p2p::error::RequestResult;
|
||||||
use reth_primitives::{Header, Receipt, TransactionSigned};
|
use reth_primitives::{Header, PeerId, Receipt, TransactionSigned};
|
||||||
use tokio::sync::{mpsc, mpsc::error::TrySendError, oneshot};
|
use tokio::sync::{mpsc, mpsc::error::TrySendError, oneshot};
|
||||||
|
|
||||||
/// Represents all messages that can be sent to a peer session
|
/// Represents all messages that can be sent to a peer session
|
||||||
@ -180,7 +179,7 @@ impl PeerResponseResult {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct PeerRequestSender {
|
pub struct PeerRequestSender {
|
||||||
/// id of the remote node.
|
/// id of the remote node.
|
||||||
pub(crate) peer: NodeId,
|
pub(crate) peer: PeerId,
|
||||||
/// The Sender half connected to a session.
|
/// The Sender half connected to a session.
|
||||||
pub(crate) to_session_tx: mpsc::Sender<PeerRequest>,
|
pub(crate) to_session_tx: mpsc::Sender<PeerRequest>,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
use crate::{manager::NetworkEvent, peers::PeersHandle, NodeId};
|
use crate::{manager::NetworkEvent, peers::PeersHandle};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use reth_primitives::{H256, U256};
|
use reth_primitives::{PeerId, H256, U256};
|
||||||
use std::{
|
use std::{
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
sync::{atomic::AtomicUsize, Arc},
|
sync::{atomic::AtomicUsize, Arc},
|
||||||
@ -24,7 +24,7 @@ impl NetworkHandle {
|
|||||||
num_active_peers: Arc<AtomicUsize>,
|
num_active_peers: Arc<AtomicUsize>,
|
||||||
listener_address: Arc<Mutex<SocketAddr>>,
|
listener_address: Arc<Mutex<SocketAddr>>,
|
||||||
to_manager_tx: UnboundedSender<NetworkHandleMessage>,
|
to_manager_tx: UnboundedSender<NetworkHandleMessage>,
|
||||||
local_node_id: NodeId,
|
local_node_id: PeerId,
|
||||||
peers: PeersHandle,
|
peers: PeersHandle,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let inner = NetworkInner {
|
let inner = NetworkInner {
|
||||||
@ -57,7 +57,7 @@ struct NetworkInner {
|
|||||||
/// The local address that accepts incoming connections.
|
/// The local address that accepts incoming connections.
|
||||||
listener_address: Arc<Mutex<SocketAddr>>,
|
listener_address: Arc<Mutex<SocketAddr>>,
|
||||||
/// The identifier used by this node.
|
/// The identifier used by this node.
|
||||||
local_node_id: NodeId,
|
local_node_id: PeerId,
|
||||||
/// Access to the all the nodes
|
/// Access to the all the nodes
|
||||||
peers: PeersHandle, // TODO need something to access
|
peers: PeersHandle, // TODO need something to access
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use reth_discv4::NodeId;
|
use reth_primitives::PeerId;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, HashMap, VecDeque},
|
collections::{hash_map::Entry, HashMap, VecDeque},
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
@ -32,7 +32,7 @@ pub struct PeersHandle {
|
|||||||
/// The [`PeersManager`] will be notified on peer related changes
|
/// The [`PeersManager`] will be notified on peer related changes
|
||||||
pub(crate) struct PeersManager {
|
pub(crate) struct PeersManager {
|
||||||
/// All peers known to the network
|
/// All peers known to the network
|
||||||
peers: HashMap<NodeId, Peer>,
|
peers: HashMap<PeerId, Peer>,
|
||||||
/// Copy of the receiver half, so new [`PeersHandle`] can be created on demand.
|
/// Copy of the receiver half, so new [`PeersHandle`] can be created on demand.
|
||||||
manager_tx: mpsc::UnboundedSender<PeerCommand>,
|
manager_tx: mpsc::UnboundedSender<PeerCommand>,
|
||||||
/// Receiver half of the command channel.
|
/// Receiver half of the command channel.
|
||||||
@ -74,7 +74,7 @@ impl PeersManager {
|
|||||||
///
|
///
|
||||||
/// If the reputation of the peer is below the `BANNED_REPUTATION` threshold, a disconnect will
|
/// If the reputation of the peer is below the `BANNED_REPUTATION` threshold, a disconnect will
|
||||||
/// be scheduled.
|
/// be scheduled.
|
||||||
pub(crate) fn on_active_session(&mut self, peer_id: NodeId, addr: SocketAddr) {
|
pub(crate) fn on_active_session(&mut self, peer_id: PeerId, addr: SocketAddr) {
|
||||||
match self.peers.entry(peer_id) {
|
match self.peers.entry(peer_id) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
let value = entry.get_mut();
|
let value = entry.get_mut();
|
||||||
@ -96,7 +96,7 @@ impl PeersManager {
|
|||||||
/// Called when a session to a peer was disconnected.
|
/// Called when a session to a peer was disconnected.
|
||||||
///
|
///
|
||||||
/// Accepts an additional [`ReputationChange`] value to apply to the peer.
|
/// Accepts an additional [`ReputationChange`] value to apply to the peer.
|
||||||
pub(crate) fn on_disconnected(&mut self, peer: NodeId, reputation_change: ReputationChange) {
|
pub(crate) fn on_disconnected(&mut self, peer: PeerId, reputation_change: ReputationChange) {
|
||||||
if let Some(mut peer) = self.peers.get_mut(&peer) {
|
if let Some(mut peer) = self.peers.get_mut(&peer) {
|
||||||
self.connection_info.decr_state(peer.state);
|
self.connection_info.decr_state(peer.state);
|
||||||
peer.state = PeerConnectionState::Idle;
|
peer.state = PeerConnectionState::Idle;
|
||||||
@ -108,7 +108,7 @@ impl PeersManager {
|
|||||||
///
|
///
|
||||||
/// If the peer already exists, then the address will e updated. If the addresses differ, the
|
/// If the peer already exists, then the address will e updated. If the addresses differ, the
|
||||||
/// old address is returned
|
/// old address is returned
|
||||||
pub(crate) fn add_discovered_node(&mut self, peer_id: NodeId, addr: SocketAddr) {
|
pub(crate) fn add_discovered_node(&mut self, peer_id: PeerId, addr: SocketAddr) {
|
||||||
match self.peers.entry(peer_id) {
|
match self.peers.entry(peer_id) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
let node = entry.get_mut();
|
let node = entry.get_mut();
|
||||||
@ -121,7 +121,7 @@ impl PeersManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Removes the tracked node from the set.
|
/// Removes the tracked node from the set.
|
||||||
pub(crate) fn remove_discovered_node(&mut self, peer_id: NodeId) {
|
pub(crate) fn remove_discovered_node(&mut self, peer_id: PeerId) {
|
||||||
if let Some(entry) = self.peers.remove(&peer_id) {
|
if let Some(entry) = self.peers.remove(&peer_id) {
|
||||||
if entry.state.is_connected() {
|
if entry.state.is_connected() {
|
||||||
self.connection_info.decr_state(entry.state);
|
self.connection_info.decr_state(entry.state);
|
||||||
@ -133,11 +133,11 @@ impl PeersManager {
|
|||||||
/// Returns the idle peer with the highest reputation.
|
/// Returns the idle peer with the highest reputation.
|
||||||
///
|
///
|
||||||
/// Returns `None` if no peer is available.
|
/// Returns `None` if no peer is available.
|
||||||
fn best_unconnected(&mut self) -> Option<(NodeId, &mut Peer)> {
|
fn best_unconnected(&mut self) -> Option<(PeerId, &mut Peer)> {
|
||||||
self.peers
|
self.peers
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.filter(|(_, peer)| peer.state.is_unconnected())
|
.filter(|(_, peer)| peer.state.is_unconnected())
|
||||||
.fold(None::<(&NodeId, &mut Peer)>, |mut best_peer, candidate| {
|
.fold(None::<(&PeerId, &mut Peer)>, |mut best_peer, candidate| {
|
||||||
if let Some(best_peer) = best_peer.take() {
|
if let Some(best_peer) = best_peer.take() {
|
||||||
if best_peer.1.reputation >= candidate.1.reputation {
|
if best_peer.1.reputation >= candidate.1.reputation {
|
||||||
return Some(best_peer)
|
return Some(best_peer)
|
||||||
@ -331,14 +331,14 @@ pub(crate) enum PeerCommand {
|
|||||||
/// Command for manually add
|
/// Command for manually add
|
||||||
Add {
|
Add {
|
||||||
/// Identifier of the peer.
|
/// Identifier of the peer.
|
||||||
peer_id: NodeId,
|
peer_id: PeerId,
|
||||||
/// The address of the peer
|
/// The address of the peer
|
||||||
addr: SocketAddr,
|
addr: SocketAddr,
|
||||||
},
|
},
|
||||||
/// Remove a peer from the set
|
/// Remove a peer from the set
|
||||||
///
|
///
|
||||||
/// If currently connected this will disconnect the sessin
|
/// If currently connected this will disconnect the sessin
|
||||||
Remove(NodeId),
|
Remove(PeerId),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Actions the peer manager can trigger.
|
/// Actions the peer manager can trigger.
|
||||||
@ -347,17 +347,17 @@ pub enum PeerAction {
|
|||||||
/// Start a new connection to a peer.
|
/// Start a new connection to a peer.
|
||||||
Connect {
|
Connect {
|
||||||
/// The peer to connect to.
|
/// The peer to connect to.
|
||||||
peer_id: NodeId,
|
peer_id: PeerId,
|
||||||
/// Where to reach the node
|
/// Where to reach the node
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
},
|
},
|
||||||
/// Disconnect an existing connection.
|
/// Disconnect an existing connection.
|
||||||
Disconnect { peer_id: NodeId },
|
Disconnect { peer_id: PeerId },
|
||||||
/// Disconnect an existing incoming connection, because the peers reputation is below the
|
/// Disconnect an existing incoming connection, because the peers reputation is below the
|
||||||
/// banned threshold.
|
/// banned threshold.
|
||||||
DisconnectBannedIncoming {
|
DisconnectBannedIncoming {
|
||||||
/// Peer id of the established connection.
|
/// Peer id of the established connection.
|
||||||
peer_id: NodeId,
|
peer_id: PeerId,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -6,13 +6,16 @@ use crate::{
|
|||||||
handle::{ActiveSessionMessage, SessionCommand},
|
handle::{ActiveSessionMessage, SessionCommand},
|
||||||
SessionId,
|
SessionId,
|
||||||
},
|
},
|
||||||
NodeId,
|
|
||||||
};
|
};
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use futures::{stream::Fuse, Sink, Stream};
|
use futures::{stream::Fuse, Sink, Stream};
|
||||||
use pin_project::pin_project;
|
use pin_project::pin_project;
|
||||||
use reth_ecies::stream::ECIESStream;
|
use reth_ecies::stream::ECIESStream;
|
||||||
use reth_eth_wire::capability::{Capabilities, CapabilityMessage};
|
use reth_eth_wire::{
|
||||||
|
capability::{Capabilities, CapabilityMessage},
|
||||||
|
EthStream, P2PStream,
|
||||||
|
};
|
||||||
|
use reth_primitives::PeerId;
|
||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
future::Future,
|
future::Future,
|
||||||
@ -31,9 +34,9 @@ pub(crate) struct ActiveSession {
|
|||||||
pub(crate) next_id: usize,
|
pub(crate) next_id: usize,
|
||||||
/// The underlying connection.
|
/// The underlying connection.
|
||||||
#[pin]
|
#[pin]
|
||||||
pub(crate) conn: ECIESStream<TcpStream>,
|
pub(crate) conn: EthStream<P2PStream<ECIESStream<TcpStream>>>,
|
||||||
/// Identifier of the node we're connected to.
|
/// Identifier of the node we're connected to.
|
||||||
pub(crate) remote_node_id: NodeId,
|
pub(crate) remote_node_id: PeerId,
|
||||||
/// All capabilities the peer announced
|
/// All capabilities the peer announced
|
||||||
pub(crate) remote_capabilities: Arc<Capabilities>,
|
pub(crate) remote_capabilities: Arc<Capabilities>,
|
||||||
/// Internal identifier of this session
|
/// Internal identifier of this session
|
||||||
|
|||||||
@ -1,13 +1,12 @@
|
|||||||
//! Session handles
|
//! Session handles
|
||||||
use crate::{
|
use crate::session::{Direction, SessionId};
|
||||||
session::{Direction, SessionId},
|
|
||||||
NodeId,
|
|
||||||
};
|
|
||||||
use reth_ecies::{stream::ECIESStream, ECIESError};
|
use reth_ecies::{stream::ECIESStream, ECIESError};
|
||||||
use reth_eth_wire::{
|
use reth_eth_wire::{
|
||||||
capability::{Capabilities, CapabilityMessage},
|
capability::{Capabilities, CapabilityMessage},
|
||||||
Status,
|
error::EthStreamError,
|
||||||
|
EthStream, P2PStream, Status,
|
||||||
};
|
};
|
||||||
|
use reth_primitives::PeerId;
|
||||||
use std::{io, net::SocketAddr, sync::Arc, time::Instant};
|
use std::{io, net::SocketAddr, sync::Arc, time::Instant};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
net::TcpStream,
|
net::TcpStream,
|
||||||
@ -33,7 +32,7 @@ pub(crate) struct ActiveSessionHandle {
|
|||||||
/// The assigned id for this session
|
/// The assigned id for this session
|
||||||
pub(crate) session_id: SessionId,
|
pub(crate) session_id: SessionId,
|
||||||
/// The identifier of the remote peer
|
/// The identifier of the remote peer
|
||||||
pub(crate) remote_id: NodeId,
|
pub(crate) remote_id: PeerId,
|
||||||
/// The timestamp when the session has been established.
|
/// The timestamp when the session has been established.
|
||||||
pub(crate) established: Instant,
|
pub(crate) established: Instant,
|
||||||
/// Announced capabilities of the peer.
|
/// Announced capabilities of the peer.
|
||||||
@ -65,23 +64,24 @@ pub(crate) enum PendingSessionEvent {
|
|||||||
Established {
|
Established {
|
||||||
session_id: SessionId,
|
session_id: SessionId,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
node_id: NodeId,
|
/// The remote node's public key
|
||||||
|
node_id: PeerId,
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
status: Status,
|
status: Status,
|
||||||
conn: ECIESStream<TcpStream>,
|
conn: EthStream<P2PStream<ECIESStream<TcpStream>>>,
|
||||||
},
|
},
|
||||||
/// Handshake unsuccessful, session was disconnected.
|
/// Handshake unsuccessful, session was disconnected.
|
||||||
Disconnected {
|
Disconnected {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
session_id: SessionId,
|
session_id: SessionId,
|
||||||
direction: Direction,
|
direction: Direction,
|
||||||
error: Option<ECIESError>,
|
error: Option<EthStreamError>,
|
||||||
},
|
},
|
||||||
/// Thrown when unable to establish a [`TcpStream`].
|
/// Thrown when unable to establish a [`TcpStream`].
|
||||||
OutgoingConnectionError {
|
OutgoingConnectionError {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
session_id: SessionId,
|
session_id: SessionId,
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
error: io::Error,
|
error: io::Error,
|
||||||
},
|
},
|
||||||
/// Thrown when authentication via Ecies failed.
|
/// Thrown when authentication via Ecies failed.
|
||||||
@ -101,18 +101,18 @@ pub(crate) enum SessionCommand {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum ActiveSessionMessage {
|
pub(crate) enum ActiveSessionMessage {
|
||||||
/// Session disconnected.
|
/// Session disconnected.
|
||||||
Closed { node_id: NodeId, remote_addr: SocketAddr },
|
Closed { node_id: PeerId, remote_addr: SocketAddr },
|
||||||
/// A session received a valid message via RLPx.
|
/// A session received a valid message via RLPx.
|
||||||
ValidMessage {
|
ValidMessage {
|
||||||
/// Identifier of the remote peer.
|
/// Identifier of the remote peer.
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Message received from the peer.
|
/// Message received from the peer.
|
||||||
message: CapabilityMessage,
|
message: CapabilityMessage,
|
||||||
},
|
},
|
||||||
/// Received a message that does not match the announced capabilities of the peer.
|
/// Received a message that does not match the announced capabilities of the peer.
|
||||||
InvalidMessage {
|
InvalidMessage {
|
||||||
/// Identifier of the remote peer.
|
/// Identifier of the remote peer.
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Announced capabilities of the remote peer.
|
/// Announced capabilities of the remote peer.
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
/// Message received from the peer.
|
/// Message received from the peer.
|
||||||
|
|||||||
@ -1,21 +1,20 @@
|
|||||||
//! Support for handling peer sessions.
|
//! Support for handling peer sessions.
|
||||||
pub use crate::message::PeerRequestSender;
|
pub use crate::message::PeerRequestSender;
|
||||||
use crate::{
|
use crate::session::{
|
||||||
session::{
|
active::ActiveSession,
|
||||||
active::ActiveSession,
|
handle::{
|
||||||
handle::{
|
ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle,
|
||||||
ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
NodeId,
|
|
||||||
};
|
};
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use futures::{future::Either, io, FutureExt, StreamExt};
|
use futures::{future::Either, io, FutureExt, StreamExt};
|
||||||
use reth_ecies::{stream::ECIESStream, ECIESError};
|
use reth_ecies::stream::ECIESStream;
|
||||||
use reth_eth_wire::{
|
use reth_eth_wire::{
|
||||||
capability::{Capabilities, CapabilityMessage},
|
capability::{Capabilities, CapabilityMessage},
|
||||||
Status, UnauthedEthStream,
|
error::EthStreamError,
|
||||||
|
HelloBuilder, HelloMessage, Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream,
|
||||||
};
|
};
|
||||||
|
use reth_primitives::{ForkFilter, Hardfork, PeerId};
|
||||||
use secp256k1::{SecretKey, SECP256K1};
|
use secp256k1::{SecretKey, SECP256K1};
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
@ -48,7 +47,13 @@ pub(crate) struct SessionManager {
|
|||||||
/// The secret key used for authenticating sessions.
|
/// The secret key used for authenticating sessions.
|
||||||
secret_key: SecretKey,
|
secret_key: SecretKey,
|
||||||
/// The node id of node
|
/// The node id of node
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
|
/// The `Status` message to send to peers.
|
||||||
|
status: Status,
|
||||||
|
/// THe `Hello` message to send to peers.
|
||||||
|
hello: HelloMessage,
|
||||||
|
/// The [`ForkFilter`] used to validate the peer's `Status` message.
|
||||||
|
fork_filter: ForkFilter,
|
||||||
/// Size of the command buffer per session.
|
/// Size of the command buffer per session.
|
||||||
session_command_buffer: usize,
|
session_command_buffer: usize,
|
||||||
/// All spawned session tasks.
|
/// All spawned session tasks.
|
||||||
@ -61,7 +66,7 @@ pub(crate) struct SessionManager {
|
|||||||
/// session is authenticated, it can be moved to the `active_session` set.
|
/// session is authenticated, it can be moved to the `active_session` set.
|
||||||
pending_sessions: FnvHashMap<SessionId, PendingSessionHandle>,
|
pending_sessions: FnvHashMap<SessionId, PendingSessionHandle>,
|
||||||
/// All active sessions that are ready to exchange messages.
|
/// All active sessions that are ready to exchange messages.
|
||||||
active_sessions: HashMap<NodeId, ActiveSessionHandle>,
|
active_sessions: HashMap<PeerId, ActiveSessionHandle>,
|
||||||
/// The original Sender half of the [`PendingSessionEvent`] channel.
|
/// The original Sender half of the [`PendingSessionEvent`] channel.
|
||||||
///
|
///
|
||||||
/// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will
|
/// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will
|
||||||
@ -87,12 +92,21 @@ impl SessionManager {
|
|||||||
let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer);
|
let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer);
|
||||||
|
|
||||||
let pk = secret_key.public_key(SECP256K1);
|
let pk = secret_key.public_key(SECP256K1);
|
||||||
let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||||
|
|
||||||
|
// TODO: make sure this is the right place to put these builders - maybe per-Network rather
|
||||||
|
// than per-Session?
|
||||||
|
let hello = HelloBuilder::new(node_id).build();
|
||||||
|
let status = StatusBuilder::default().build();
|
||||||
|
let fork_filter = Hardfork::Frontier.fork_filter();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
next_id: 0,
|
next_id: 0,
|
||||||
secret_key,
|
secret_key,
|
||||||
node_id,
|
node_id,
|
||||||
|
status,
|
||||||
|
hello,
|
||||||
|
fork_filter,
|
||||||
session_command_buffer: config.session_command_buffer,
|
session_command_buffer: config.session_command_buffer,
|
||||||
spawned_tasks: Default::default(),
|
spawned_tasks: Default::default(),
|
||||||
pending_sessions: Default::default(),
|
pending_sessions: Default::default(),
|
||||||
@ -139,6 +153,9 @@ impl SessionManager {
|
|||||||
pending_events,
|
pending_events,
|
||||||
remote_addr,
|
remote_addr,
|
||||||
self.secret_key,
|
self.secret_key,
|
||||||
|
self.hello.clone(),
|
||||||
|
self.status,
|
||||||
|
self.fork_filter.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let handle = PendingSessionHandle { disconnect_tx };
|
let handle = PendingSessionHandle { disconnect_tx };
|
||||||
@ -147,7 +164,7 @@ impl SessionManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Starts a new pending session from the local node to the given remote node.
|
/// Starts a new pending session from the local node to the given remote node.
|
||||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: NodeId) {
|
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: PeerId) {
|
||||||
let session_id = self.next_id();
|
let session_id = self.next_id();
|
||||||
let (disconnect_tx, disconnect_rx) = oneshot::channel();
|
let (disconnect_tx, disconnect_rx) = oneshot::channel();
|
||||||
let pending_events = self.pending_sessions_tx.clone();
|
let pending_events = self.pending_sessions_tx.clone();
|
||||||
@ -158,6 +175,9 @@ impl SessionManager {
|
|||||||
remote_addr,
|
remote_addr,
|
||||||
remote_node_id,
|
remote_node_id,
|
||||||
self.secret_key,
|
self.secret_key,
|
||||||
|
self.hello.clone(),
|
||||||
|
self.status,
|
||||||
|
self.fork_filter.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let handle = PendingSessionHandle { disconnect_tx };
|
let handle = PendingSessionHandle { disconnect_tx };
|
||||||
@ -168,7 +188,7 @@ impl SessionManager {
|
|||||||
///
|
///
|
||||||
/// This will trigger the disconnect on the session task to gracefully terminate. The result
|
/// This will trigger the disconnect on the session task to gracefully terminate. The result
|
||||||
/// will be picked up by the receiver.
|
/// will be picked up by the receiver.
|
||||||
pub(crate) fn disconnect(&self, node: NodeId) {
|
pub(crate) fn disconnect(&self, node: PeerId) {
|
||||||
if let Some(session) = self.active_sessions.get(&node) {
|
if let Some(session) = self.active_sessions.get(&node) {
|
||||||
session.disconnect();
|
session.disconnect();
|
||||||
}
|
}
|
||||||
@ -376,7 +396,7 @@ pub(crate) enum SessionEvent {
|
|||||||
///
|
///
|
||||||
/// This session is now able to exchange data.
|
/// This session is now able to exchange data.
|
||||||
SessionEstablished {
|
SessionEstablished {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
status: Status,
|
status: Status,
|
||||||
@ -384,30 +404,30 @@ pub(crate) enum SessionEvent {
|
|||||||
},
|
},
|
||||||
/// A session received a valid message via RLPx.
|
/// A session received a valid message via RLPx.
|
||||||
ValidMessage {
|
ValidMessage {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Message received from the peer.
|
/// Message received from the peer.
|
||||||
message: CapabilityMessage,
|
message: CapabilityMessage,
|
||||||
},
|
},
|
||||||
/// Received a message that does not match the announced capabilities of the peer.
|
/// Received a message that does not match the announced capabilities of the peer.
|
||||||
InvalidMessage {
|
InvalidMessage {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Announced capabilities of the remote peer.
|
/// Announced capabilities of the remote peer.
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
/// Message received from the peer.
|
/// Message received from the peer.
|
||||||
message: CapabilityMessage,
|
message: CapabilityMessage,
|
||||||
},
|
},
|
||||||
/// Closed an incoming pending session during authentication.
|
/// Closed an incoming pending session during authentication.
|
||||||
IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option<ECIESError> },
|
IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option<EthStreamError> },
|
||||||
/// Closed an outgoing pending session during authentication.
|
/// Closed an outgoing pending session during authentication.
|
||||||
OutgoingPendingSessionClosed {
|
OutgoingPendingSessionClosed {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
error: Option<ECIESError>,
|
error: Option<EthStreamError>,
|
||||||
},
|
},
|
||||||
/// Failed to establish a tcp stream
|
/// Failed to establish a tcp stream
|
||||||
OutgoingConnectionError { remote_addr: SocketAddr, node_id: NodeId, error: io::Error },
|
OutgoingConnectionError { remote_addr: SocketAddr, node_id: PeerId, error: io::Error },
|
||||||
/// Active session was disconnected.
|
/// Active session was disconnected.
|
||||||
Disconnected { node_id: NodeId, remote_addr: SocketAddr },
|
Disconnected { node_id: PeerId, remote_addr: SocketAddr },
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The error thrown when the max configured limit has been reached and no more connections are
|
/// The error thrown when the max configured limit has been reached and no more connections are
|
||||||
@ -426,6 +446,9 @@ async fn start_pending_incoming_session(
|
|||||||
events: mpsc::Sender<PendingSessionEvent>,
|
events: mpsc::Sender<PendingSessionEvent>,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
secret_key: SecretKey,
|
secret_key: SecretKey,
|
||||||
|
hello: HelloMessage,
|
||||||
|
status: Status,
|
||||||
|
fork_filter: ForkFilter,
|
||||||
) {
|
) {
|
||||||
authenticate(
|
authenticate(
|
||||||
disconnect_rx,
|
disconnect_rx,
|
||||||
@ -435,6 +458,9 @@ async fn start_pending_incoming_session(
|
|||||||
remote_addr,
|
remote_addr,
|
||||||
secret_key,
|
secret_key,
|
||||||
Direction::Incoming,
|
Direction::Incoming,
|
||||||
|
hello,
|
||||||
|
status,
|
||||||
|
fork_filter,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@ -446,8 +472,11 @@ async fn start_pending_outbound_session(
|
|||||||
events: mpsc::Sender<PendingSessionEvent>,
|
events: mpsc::Sender<PendingSessionEvent>,
|
||||||
session_id: SessionId,
|
session_id: SessionId,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
remote_node_id: NodeId,
|
remote_node_id: PeerId,
|
||||||
secret_key: SecretKey,
|
secret_key: SecretKey,
|
||||||
|
hello: HelloMessage,
|
||||||
|
status: Status,
|
||||||
|
fork_filter: ForkFilter,
|
||||||
) {
|
) {
|
||||||
let stream = match TcpStream::connect(remote_addr).await {
|
let stream = match TcpStream::connect(remote_addr).await {
|
||||||
Ok(stream) => stream,
|
Ok(stream) => stream,
|
||||||
@ -471,6 +500,9 @@ async fn start_pending_outbound_session(
|
|||||||
remote_addr,
|
remote_addr,
|
||||||
secret_key,
|
secret_key,
|
||||||
Direction::Outgoing(remote_node_id),
|
Direction::Outgoing(remote_node_id),
|
||||||
|
hello,
|
||||||
|
status,
|
||||||
|
fork_filter,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@ -481,7 +513,7 @@ pub(crate) enum Direction {
|
|||||||
/// Incoming connection.
|
/// Incoming connection.
|
||||||
Incoming,
|
Incoming,
|
||||||
/// Outgoing connection to a specific node.
|
/// Outgoing connection to a specific node.
|
||||||
Outgoing(NodeId),
|
Outgoing(PeerId),
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn authenticate(
|
async fn authenticate(
|
||||||
@ -492,6 +524,9 @@ async fn authenticate(
|
|||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
secret_key: SecretKey,
|
secret_key: SecretKey,
|
||||||
direction: Direction,
|
direction: Direction,
|
||||||
|
hello: HelloMessage,
|
||||||
|
status: Status,
|
||||||
|
fork_filter: ForkFilter,
|
||||||
) {
|
) {
|
||||||
let stream = match direction {
|
let stream = match direction {
|
||||||
Direction::Incoming => match ECIESStream::incoming(stream, secret_key).await {
|
Direction::Incoming => match ECIESStream::incoming(stream, secret_key).await {
|
||||||
@ -520,8 +555,17 @@ async fn authenticate(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let unauthed = UnauthedEthStream::new(stream);
|
let unauthed = UnauthedP2PStream::new(stream);
|
||||||
let auth = authenticate_stream(unauthed, session_id, remote_addr, direction).boxed();
|
let auth = authenticate_stream(
|
||||||
|
unauthed,
|
||||||
|
session_id,
|
||||||
|
remote_addr,
|
||||||
|
direction,
|
||||||
|
hello,
|
||||||
|
status,
|
||||||
|
fork_filter,
|
||||||
|
)
|
||||||
|
.boxed();
|
||||||
|
|
||||||
match futures::future::select(disconnect_rx, auth).await {
|
match futures::future::select(disconnect_rx, auth).await {
|
||||||
Either::Left((_, _)) => {
|
Either::Left((_, _)) => {
|
||||||
@ -544,10 +588,47 @@ async fn authenticate(
|
|||||||
///
|
///
|
||||||
/// On Success return the authenticated stream as [`PendingSessionEvent`]
|
/// On Success return the authenticated stream as [`PendingSessionEvent`]
|
||||||
async fn authenticate_stream(
|
async fn authenticate_stream(
|
||||||
_stream: UnauthedEthStream<ECIESStream<TcpStream>>,
|
stream: UnauthedP2PStream<ECIESStream<TcpStream>>,
|
||||||
_session_id: SessionId,
|
session_id: SessionId,
|
||||||
_remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
_direction: Direction,
|
direction: Direction,
|
||||||
|
hello: HelloMessage,
|
||||||
|
status: Status,
|
||||||
|
fork_filter: ForkFilter,
|
||||||
) -> PendingSessionEvent {
|
) -> PendingSessionEvent {
|
||||||
todo!()
|
// conduct the p2p handshake and return the authenticated stream
|
||||||
|
let (p2p_stream, their_hello) = match stream.handshake(hello).await {
|
||||||
|
Ok(stream_res) => stream_res,
|
||||||
|
Err(err) => {
|
||||||
|
return PendingSessionEvent::Disconnected {
|
||||||
|
remote_addr,
|
||||||
|
session_id,
|
||||||
|
direction,
|
||||||
|
error: Some(err.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// if the hello handshake was successful we can try status handshake
|
||||||
|
let eth_unauthed = UnauthedEthStream::new(p2p_stream);
|
||||||
|
let (eth_stream, their_status) = match eth_unauthed.handshake(status, fork_filter).await {
|
||||||
|
Ok(stream_res) => stream_res,
|
||||||
|
Err(err) => {
|
||||||
|
return PendingSessionEvent::Disconnected {
|
||||||
|
remote_addr,
|
||||||
|
session_id,
|
||||||
|
direction,
|
||||||
|
error: Some(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
PendingSessionEvent::Established {
|
||||||
|
session_id,
|
||||||
|
remote_addr,
|
||||||
|
node_id: their_hello.id,
|
||||||
|
capabilities: Arc::new(Capabilities::from(their_hello.capabilities)),
|
||||||
|
status: their_status,
|
||||||
|
conn: eth_stream,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,12 +5,11 @@ use crate::{
|
|||||||
fetch::StateFetcher,
|
fetch::StateFetcher,
|
||||||
message::{PeerRequestSender, PeerResponse},
|
message::{PeerRequestSender, PeerResponse},
|
||||||
peers::{PeerAction, PeersManager},
|
peers::{PeerAction, PeersManager},
|
||||||
NodeId,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use reth_eth_wire::{capability::Capabilities, Status};
|
use reth_eth_wire::{capability::Capabilities, Status};
|
||||||
use reth_interfaces::provider::BlockProvider;
|
use reth_interfaces::provider::BlockProvider;
|
||||||
use reth_primitives::H256;
|
use reth_primitives::{PeerId, H256};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, VecDeque},
|
collections::{HashMap, VecDeque},
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
@ -37,7 +36,7 @@ use tracing::trace;
|
|||||||
/// This type is also responsible for responding for received request.
|
/// This type is also responsible for responding for received request.
|
||||||
pub struct NetworkState<C> {
|
pub struct NetworkState<C> {
|
||||||
/// All connected peers and their state.
|
/// All connected peers and their state.
|
||||||
connected_peers: HashMap<NodeId, ConnectedPeer>,
|
connected_peers: HashMap<PeerId, ConnectedPeer>,
|
||||||
/// Manages connections to peers.
|
/// Manages connections to peers.
|
||||||
peers_manager: PeersManager,
|
peers_manager: PeersManager,
|
||||||
/// Buffered messages until polled.
|
/// Buffered messages until polled.
|
||||||
@ -83,7 +82,7 @@ where
|
|||||||
/// should be rejected.
|
/// should be rejected.
|
||||||
pub(crate) fn on_session_activated(
|
pub(crate) fn on_session_activated(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer: NodeId,
|
peer: PeerId,
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
status: Status,
|
status: Status,
|
||||||
request_tx: PeerRequestSender,
|
request_tx: PeerRequestSender,
|
||||||
@ -107,7 +106,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Event hook for a disconnected session for the peer.
|
/// Event hook for a disconnected session for the peer.
|
||||||
pub(crate) fn on_session_closed(&mut self, peer: NodeId) {
|
pub(crate) fn on_session_closed(&mut self, peer: PeerId) {
|
||||||
self.connected_peers.remove(&peer);
|
self.connected_peers.remove(&peer);
|
||||||
self.state_fetcher.on_session_closed(&peer);
|
self.state_fetcher.on_session_closed(&peer);
|
||||||
}
|
}
|
||||||
@ -149,7 +148,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Disconnect the session
|
/// Disconnect the session
|
||||||
fn on_session_disconnected(&mut self, peer: NodeId) {
|
fn on_session_disconnected(&mut self, peer: PeerId) {
|
||||||
self.connected_peers.remove(&peer);
|
self.connected_peers.remove(&peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +156,7 @@ where
|
|||||||
///
|
///
|
||||||
/// Caution: this will replace an already pending response. It's the responsibility of the
|
/// Caution: this will replace an already pending response. It's the responsibility of the
|
||||||
/// caller to select the peer.
|
/// caller to select the peer.
|
||||||
fn handle_block_request(&mut self, peer: NodeId, request: BlockRequest) {
|
fn handle_block_request(&mut self, peer: PeerId, request: BlockRequest) {
|
||||||
if let Some(ref mut peer) = self.connected_peers.get_mut(&peer) {
|
if let Some(ref mut peer) = self.connected_peers.get_mut(&peer) {
|
||||||
let (request, response) = match request {
|
let (request, response) = match request {
|
||||||
BlockRequest::GetBlockHeaders(request) => {
|
BlockRequest::GetBlockHeaders(request) => {
|
||||||
@ -192,7 +191,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Invoked when received a response from a connected peer.
|
/// Invoked when received a response from a connected peer.
|
||||||
fn on_eth_response(&mut self, peer: NodeId, resp: PeerResponseResult) -> Option<StateAction> {
|
fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option<StateAction> {
|
||||||
match resp {
|
match resp {
|
||||||
PeerResponseResult::BlockHeaders(res) => {
|
PeerResponseResult::BlockHeaders(res) => {
|
||||||
let outcome = self.state_fetcher.on_block_headers_response(peer, res)?;
|
let outcome = self.state_fetcher.on_block_headers_response(peer, res)?;
|
||||||
@ -283,9 +282,9 @@ pub struct ConnectedPeer {
|
|||||||
/// Message variants triggered by the [`State`]
|
/// Message variants triggered by the [`State`]
|
||||||
pub enum StateAction {
|
pub enum StateAction {
|
||||||
/// Create a new connection to the given node.
|
/// Create a new connection to the given node.
|
||||||
Connect { remote_addr: SocketAddr, node_id: NodeId },
|
Connect { remote_addr: SocketAddr, node_id: PeerId },
|
||||||
/// Disconnect an existing connection
|
/// Disconnect an existing connection
|
||||||
Disconnect { node_id: NodeId },
|
Disconnect { node_id: PeerId },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
@ -293,6 +292,6 @@ pub enum AddSessionError {
|
|||||||
#[error("No capacity for new sessions")]
|
#[error("No capacity for new sessions")]
|
||||||
AtCapacity {
|
AtCapacity {
|
||||||
/// The peer of the session
|
/// The peer of the session
|
||||||
peer: NodeId,
|
peer: PeerId,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,12 +2,14 @@ use crate::{
|
|||||||
listener::{ConnectionListener, ListenerEvent},
|
listener::{ConnectionListener, ListenerEvent},
|
||||||
session::{SessionEvent, SessionId, SessionManager},
|
session::{SessionEvent, SessionId, SessionManager},
|
||||||
state::{AddSessionError, NetworkState, StateAction},
|
state::{AddSessionError, NetworkState, StateAction},
|
||||||
NodeId,
|
|
||||||
};
|
};
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use reth_ecies::ECIESError;
|
use reth_eth_wire::{
|
||||||
use reth_eth_wire::capability::{Capabilities, CapabilityMessage};
|
capability::{Capabilities, CapabilityMessage},
|
||||||
|
error::EthStreamError,
|
||||||
|
};
|
||||||
use reth_interfaces::provider::BlockProvider;
|
use reth_interfaces::provider::BlockProvider;
|
||||||
|
use reth_primitives::PeerId;
|
||||||
use std::{
|
use std::{
|
||||||
io,
|
io,
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
@ -55,7 +57,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Triggers a new outgoing connection to the given node
|
/// Triggers a new outgoing connection to the given node
|
||||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: NodeId) {
|
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) {
|
||||||
self.sessions.dial_outbound(remote_addr, remote_id)
|
self.sessions.dial_outbound(remote_addr, remote_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,13 +193,13 @@ pub enum SwarmEvent {
|
|||||||
/// Events related to the actual network protocol.
|
/// Events related to the actual network protocol.
|
||||||
CapabilityMessage {
|
CapabilityMessage {
|
||||||
/// The peer that sent the message
|
/// The peer that sent the message
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Message received from the peer
|
/// Message received from the peer
|
||||||
message: CapabilityMessage,
|
message: CapabilityMessage,
|
||||||
},
|
},
|
||||||
/// Received a message that does not match the announced capabilities of the peer.
|
/// Received a message that does not match the announced capabilities of the peer.
|
||||||
InvalidCapabilityMessage {
|
InvalidCapabilityMessage {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
/// Announced capabilities of the remote peer.
|
/// Announced capabilities of the remote peer.
|
||||||
capabilities: Arc<Capabilities>,
|
capabilities: Arc<Capabilities>,
|
||||||
/// Message received from the peer.
|
/// Message received from the peer.
|
||||||
@ -226,28 +228,28 @@ pub enum SwarmEvent {
|
|||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
},
|
},
|
||||||
SessionEstablished {
|
SessionEstablished {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
},
|
},
|
||||||
SessionClosed {
|
SessionClosed {
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
},
|
},
|
||||||
/// Closed an incoming pending session during authentication.
|
/// Closed an incoming pending session during authentication.
|
||||||
IncomingPendingSessionClosed {
|
IncomingPendingSessionClosed {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
error: Option<ECIESError>,
|
error: Option<EthStreamError>,
|
||||||
},
|
},
|
||||||
/// Closed an outgoing pending session during authentication.
|
/// Closed an outgoing pending session during authentication.
|
||||||
OutgoingPendingSessionClosed {
|
OutgoingPendingSessionClosed {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
error: Option<ECIESError>,
|
error: Option<EthStreamError>,
|
||||||
},
|
},
|
||||||
/// Failed to establish a tcp stream to the given address/node
|
/// Failed to establish a tcp stream to the given address/node
|
||||||
OutgoingConnectionError {
|
OutgoingConnectionError {
|
||||||
remote_addr: SocketAddr,
|
remote_addr: SocketAddr,
|
||||||
node_id: NodeId,
|
node_id: PeerId,
|
||||||
error: io::Error,
|
error: io::Error,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
5
crates/primitives/src/constants.rs
Normal file
5
crates/primitives/src/constants.rs
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
use crate::H256;
|
||||||
|
|
||||||
|
/// The Ethereum mainnet genesis hash.
|
||||||
|
pub const MAINNET_GENESIS: H256 =
|
||||||
|
H256(hex_literal::hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"));
|
||||||
@ -3,9 +3,9 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(clippy::redundant_else, clippy::too_many_lines)]
|
#![allow(clippy::redundant_else, clippy::too_many_lines)]
|
||||||
|
|
||||||
|
use crate::{BlockNumber, H256};
|
||||||
use crc::crc32;
|
use crc::crc32;
|
||||||
use maplit::btreemap;
|
use maplit::btreemap;
|
||||||
use reth_primitives::{BlockNumber, H256};
|
|
||||||
use reth_rlp::*;
|
use reth_rlp::*;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet},
|
collections::{BTreeMap, BTreeSet},
|
||||||
225
crates/primitives/src/hardfork.rs
Normal file
225
crates/primitives/src/hardfork.rs
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
use crate::{BlockNumber, ForkFilter, ForkHash, ForkId, MAINNET_GENESIS};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
/// Ethereum mainnet hardforks
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||||
|
pub enum Hardfork {
|
||||||
|
Frontier,
|
||||||
|
Homestead,
|
||||||
|
Dao,
|
||||||
|
Tangerine,
|
||||||
|
SpuriousDragon,
|
||||||
|
Byzantium,
|
||||||
|
Constantinople,
|
||||||
|
Petersburg,
|
||||||
|
Istanbul,
|
||||||
|
Muirglacier,
|
||||||
|
Berlin,
|
||||||
|
London,
|
||||||
|
ArrowGlacier,
|
||||||
|
GrayGlacier,
|
||||||
|
Latest,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hardfork {
|
||||||
|
/// Get the first block number of the hardfork.
|
||||||
|
pub fn fork_block(&self) -> u64 {
|
||||||
|
match *self {
|
||||||
|
Hardfork::Frontier => 0,
|
||||||
|
Hardfork::Homestead => 1150000,
|
||||||
|
Hardfork::Dao => 1920000,
|
||||||
|
Hardfork::Tangerine => 2463000,
|
||||||
|
Hardfork::SpuriousDragon => 2675000,
|
||||||
|
Hardfork::Byzantium => 4370000,
|
||||||
|
Hardfork::Constantinople | Hardfork::Petersburg => 7280000,
|
||||||
|
Hardfork::Istanbul => 9069000,
|
||||||
|
Hardfork::Muirglacier => 9200000,
|
||||||
|
Hardfork::Berlin => 12244000,
|
||||||
|
Hardfork::London => 12965000,
|
||||||
|
Hardfork::ArrowGlacier => 13773000,
|
||||||
|
Hardfork::GrayGlacier | Hardfork::Latest => 15050000,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the EIP-2124 fork id for a given hardfork
|
||||||
|
///
|
||||||
|
/// The [`ForkId`](ethereum_forkid::ForkId) includes a CRC32 checksum of the all fork block
|
||||||
|
/// numbers from genesis, and the next upcoming fork block number.
|
||||||
|
/// If the next fork block number is not yet known, it is set to 0.
|
||||||
|
pub fn fork_id(&self) -> ForkId {
|
||||||
|
match *self {
|
||||||
|
Hardfork::Frontier => {
|
||||||
|
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }
|
||||||
|
}
|
||||||
|
Hardfork::Homestead => {
|
||||||
|
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }
|
||||||
|
}
|
||||||
|
Hardfork::Dao => ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||||
|
Hardfork::Tangerine => {
|
||||||
|
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }
|
||||||
|
}
|
||||||
|
Hardfork::SpuriousDragon => {
|
||||||
|
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }
|
||||||
|
}
|
||||||
|
Hardfork::Byzantium => {
|
||||||
|
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }
|
||||||
|
}
|
||||||
|
Hardfork::Constantinople | Hardfork::Petersburg => {
|
||||||
|
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }
|
||||||
|
}
|
||||||
|
Hardfork::Istanbul => {
|
||||||
|
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }
|
||||||
|
}
|
||||||
|
Hardfork::Muirglacier => {
|
||||||
|
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }
|
||||||
|
}
|
||||||
|
Hardfork::Berlin => ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||||
|
Hardfork::London => ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||||
|
Hardfork::ArrowGlacier => {
|
||||||
|
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }
|
||||||
|
}
|
||||||
|
Hardfork::Latest | Hardfork::GrayGlacier => {
|
||||||
|
// update `next` when another fork block num is known
|
||||||
|
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 0 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This returns all known hardforks in order.
|
||||||
|
pub fn all_forks() -> Vec<Self> {
|
||||||
|
vec![
|
||||||
|
Hardfork::Homestead,
|
||||||
|
Hardfork::Dao,
|
||||||
|
Hardfork::Tangerine,
|
||||||
|
Hardfork::SpuriousDragon,
|
||||||
|
Hardfork::Byzantium,
|
||||||
|
Hardfork::Constantinople, /* petersburg is skipped because it's the same block num
|
||||||
|
* as constantinople */
|
||||||
|
Hardfork::Istanbul,
|
||||||
|
Hardfork::Muirglacier,
|
||||||
|
Hardfork::Berlin,
|
||||||
|
Hardfork::London,
|
||||||
|
Hardfork::ArrowGlacier,
|
||||||
|
Hardfork::GrayGlacier,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This returns all known hardfork block numbers as a vector.
|
||||||
|
pub fn all_fork_blocks() -> Vec<BlockNumber> {
|
||||||
|
Hardfork::all_forks().iter().map(|f| f.fork_block()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a [`ForkFilter`](crate::ForkFilter) for the given hardfork.
|
||||||
|
/// This assumes the current hardfork's block number is the current head and uses all known
|
||||||
|
/// future hardforks to initialize the filter.
|
||||||
|
pub fn fork_filter(&self) -> ForkFilter {
|
||||||
|
let all_forks = Hardfork::all_forks();
|
||||||
|
let future_forks: Vec<BlockNumber> = all_forks
|
||||||
|
.iter()
|
||||||
|
.filter(|f| f.fork_block() > self.fork_block())
|
||||||
|
.map(|f| f.fork_block())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// this data structure is not chain-agnostic, so we can pass in the constant mainnet
|
||||||
|
// genesis
|
||||||
|
ForkFilter::new(self.fork_block(), MAINNET_GENESIS, future_forks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for Hardfork {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let s = s.to_lowercase();
|
||||||
|
let hardfork = match s.as_str() {
|
||||||
|
"frontier" | "1" => Hardfork::Frontier,
|
||||||
|
"homestead" | "2" => Hardfork::Homestead,
|
||||||
|
"dao" | "3" => Hardfork::Dao,
|
||||||
|
"tangerine" | "4" => Hardfork::Tangerine,
|
||||||
|
"spuriousdragon" | "5" => Hardfork::SpuriousDragon,
|
||||||
|
"byzantium" | "6" => Hardfork::Byzantium,
|
||||||
|
"constantinople" | "7" => Hardfork::Constantinople,
|
||||||
|
"petersburg" | "8" => Hardfork::Petersburg,
|
||||||
|
"istanbul" | "9" => Hardfork::Istanbul,
|
||||||
|
"muirglacier" | "10" => Hardfork::Muirglacier,
|
||||||
|
"berlin" | "11" => Hardfork::Berlin,
|
||||||
|
"london" | "12" => Hardfork::London,
|
||||||
|
"arrowglacier" | "13" => Hardfork::ArrowGlacier,
|
||||||
|
"grayglacier" => Hardfork::GrayGlacier,
|
||||||
|
"latest" | "14" => Hardfork::Latest,
|
||||||
|
_ => return Err(format!("Unknown hardfork {s}")),
|
||||||
|
};
|
||||||
|
Ok(hardfork)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Hardfork {
|
||||||
|
fn default() -> Self {
|
||||||
|
Hardfork::Latest
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BlockNumber> for Hardfork {
|
||||||
|
fn from(num: BlockNumber) -> Hardfork {
|
||||||
|
match num {
|
||||||
|
_i if num < 1_150_000 => Hardfork::Frontier,
|
||||||
|
_i if num < 1_920_000 => Hardfork::Dao,
|
||||||
|
_i if num < 2_463_000 => Hardfork::Homestead,
|
||||||
|
_i if num < 2_675_000 => Hardfork::Tangerine,
|
||||||
|
_i if num < 4_370_000 => Hardfork::SpuriousDragon,
|
||||||
|
_i if num < 7_280_000 => Hardfork::Byzantium,
|
||||||
|
_i if num < 9_069_000 => Hardfork::Constantinople,
|
||||||
|
_i if num < 9_200_000 => Hardfork::Istanbul,
|
||||||
|
_i if num < 12_244_000 => Hardfork::Muirglacier,
|
||||||
|
_i if num < 12_965_000 => Hardfork::Berlin,
|
||||||
|
_i if num < 13_773_000 => Hardfork::London,
|
||||||
|
_i if num < 15_050_000 => Hardfork::ArrowGlacier,
|
||||||
|
|
||||||
|
_ => Hardfork::Latest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::{forkid::ForkHash, hardfork::Hardfork};
|
||||||
|
use crc::crc32;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_hardfork_blocks() {
|
||||||
|
let hf: Hardfork = 12_965_000u64.into();
|
||||||
|
assert_eq!(hf, Hardfork::London);
|
||||||
|
|
||||||
|
let hf: Hardfork = 4370000u64.into();
|
||||||
|
assert_eq!(hf, Hardfork::Byzantium);
|
||||||
|
|
||||||
|
let hf: Hardfork = 12244000u64.into();
|
||||||
|
assert_eq!(hf, Hardfork::Berlin);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
// this test checks that the fork hash assigned to forks accurately map to the fork_id method
|
||||||
|
fn test_forkhash_from_fork_blocks() {
|
||||||
|
// set the genesis hash
|
||||||
|
let genesis =
|
||||||
|
hex::decode("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// set the frontier forkhash
|
||||||
|
let mut curr_forkhash = ForkHash(crc32::checksum_ieee(&genesis[..]).to_be_bytes());
|
||||||
|
|
||||||
|
// now we go through enum members
|
||||||
|
let frontier_forkid = Hardfork::Frontier.fork_id();
|
||||||
|
assert_eq!(curr_forkhash, frontier_forkid.hash);
|
||||||
|
|
||||||
|
// list of the above hardforks
|
||||||
|
let hardforks = Hardfork::all_forks();
|
||||||
|
|
||||||
|
// check that the curr_forkhash we compute matches the output of each fork_id returned
|
||||||
|
for hardfork in hardforks {
|
||||||
|
curr_forkhash += hardfork.fork_block();
|
||||||
|
assert_eq!(curr_forkhash, hardfork.fork_id().hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -10,7 +10,10 @@
|
|||||||
mod account;
|
mod account;
|
||||||
mod block;
|
mod block;
|
||||||
mod chain;
|
mod chain;
|
||||||
|
mod constants;
|
||||||
mod error;
|
mod error;
|
||||||
|
mod forkid;
|
||||||
|
mod hardfork;
|
||||||
mod header;
|
mod header;
|
||||||
mod hex_bytes;
|
mod hex_bytes;
|
||||||
mod integer_list;
|
mod integer_list;
|
||||||
@ -23,6 +26,9 @@ mod transaction;
|
|||||||
pub use account::Account;
|
pub use account::Account;
|
||||||
pub use block::{Block, BlockLocked};
|
pub use block::{Block, BlockLocked};
|
||||||
pub use chain::Chain;
|
pub use chain::Chain;
|
||||||
|
pub use constants::MAINNET_GENESIS;
|
||||||
|
pub use forkid::{ForkFilter, ForkHash, ForkId, ValidationError};
|
||||||
|
pub use hardfork::Hardfork;
|
||||||
pub use header::{Header, SealedHeader};
|
pub use header::{Header, SealedHeader};
|
||||||
pub use hex_bytes::Bytes;
|
pub use hex_bytes::Bytes;
|
||||||
pub use integer_list::IntegerList;
|
pub use integer_list::IntegerList;
|
||||||
@ -56,6 +62,12 @@ pub type StorageKey = H256;
|
|||||||
/// Storage value
|
/// Storage value
|
||||||
pub type StorageValue = U256;
|
pub type StorageValue = U256;
|
||||||
|
|
||||||
|
// TODO: should we use `PublicKey` for this? Even when dealing with public keys we should try to
|
||||||
|
// prevent misuse
|
||||||
|
/// This represents an uncompressed secp256k1 public key.
|
||||||
|
/// This encodes the concatenation of the x and y components of the affine point in bytes.
|
||||||
|
pub type PeerId = H512;
|
||||||
|
|
||||||
pub use ethers_core::{
|
pub use ethers_core::{
|
||||||
types as rpc,
|
types as rpc,
|
||||||
types::{BigEndianHash, Bloom, H128, H160, H256, H512, H64, U128, U256, U64},
|
types::{BigEndianHash, Bloom, H128, H160, H256, H512, H64, U128, U256, U64},
|
||||||
|
|||||||
Reference in New Issue
Block a user