mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat(p2p): integrate txpool in p2p (#208)
This commit is contained in:
@ -50,7 +50,7 @@ impl NetworkHandle {
|
||||
}
|
||||
|
||||
/// Sends a [`NetworkHandleMessage`] to the manager
|
||||
fn send_message(&self, msg: NetworkHandleMessage) {
|
||||
pub(crate) fn send_message(&self, msg: NetworkHandleMessage) {
|
||||
let _ = self.inner.to_manager_tx.send(msg);
|
||||
}
|
||||
|
||||
|
||||
@ -1,24 +1,35 @@
|
||||
//! Transaction management for the p2p network.
|
||||
|
||||
use crate::{cache::LruCache, manager::NetworkEvent, message::PeerRequestSender, NetworkHandle};
|
||||
use futures::stream::FuturesUnordered;
|
||||
use reth_primitives::{PeerId, Transaction, H256};
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use crate::{
|
||||
cache::LruCache,
|
||||
manager::NetworkEvent,
|
||||
message::{PeerRequest, PeerRequestSender},
|
||||
network::NetworkHandleMessage,
|
||||
NetworkHandle,
|
||||
};
|
||||
use futures::{stream::FuturesUnordered, FutureExt, StreamExt};
|
||||
use reth_eth_wire::{GetPooledTransactions, NewPooledTransactionHashes, PooledTransactions};
|
||||
use reth_interfaces::p2p::error::RequestResult;
|
||||
use reth_primitives::{
|
||||
FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, H256,
|
||||
};
|
||||
use reth_transaction_pool::{error::PoolResult, TransactionPool};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
future::Future,
|
||||
num::NonZeroUsize,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
use tokio::sync::{mpsc, oneshot, oneshot::Sender};
|
||||
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
|
||||
|
||||
/// Cache limit of transactions to keep track of for a single peer.
|
||||
const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024;
|
||||
const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024 * 10;
|
||||
|
||||
/// The future for inserting a function into the pool
|
||||
pub type PoolImportFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
|
||||
pub type PoolImportFuture = Pin<Box<dyn Future<Output = PoolResult<TxHash>> + Send + 'static>>;
|
||||
|
||||
/// Api to interact with [`TransactionsManager`] task.
|
||||
pub struct TransactionsHandle {
|
||||
@ -52,11 +63,13 @@ pub struct TransactionsManager<Pool> {
|
||||
///
|
||||
/// From which we get all new incoming transaction related messages.
|
||||
network_events: UnboundedReceiverStream<NetworkEvent>,
|
||||
/// All currently active requests for pooled transactions.
|
||||
inflight_requests: Vec<GetPooledTxRequest>,
|
||||
/// All currently pending transactions grouped by peers.
|
||||
///
|
||||
/// This way we can track incoming transactions and prevent multiple pool imports for the same
|
||||
/// transaction
|
||||
transactions_by_peers: HashMap<H256, Vec<PeerId>>,
|
||||
transactions_by_peers: HashMap<TxHash, Vec<PeerId>>,
|
||||
/// Transactions that are currently imported into the `Pool`
|
||||
pool_imports: FuturesUnordered<PoolImportFuture>,
|
||||
/// All the connected peers.
|
||||
@ -65,28 +78,36 @@ pub struct TransactionsManager<Pool> {
|
||||
command_tx: mpsc::UnboundedSender<TransactionsCommand>,
|
||||
/// Incoming commands from [`TransactionsHandle`].
|
||||
command_rx: UnboundedReceiverStream<TransactionsCommand>,
|
||||
/// Incoming commands from [`TransactionsHandle`].
|
||||
pending_transactions: ReceiverStream<TxHash>,
|
||||
}
|
||||
|
||||
// === impl TransactionsManager ===
|
||||
|
||||
impl<Pool> TransactionsManager<Pool>
|
||||
where
|
||||
Pool: TransactionPool<Transaction = Transaction>,
|
||||
Pool: TransactionPool + Clone,
|
||||
<Pool as TransactionPool>::Transaction: IntoRecoveredTransaction,
|
||||
{
|
||||
/// Sets up a new instance.
|
||||
pub fn new(network: NetworkHandle, pool: Pool) -> Self {
|
||||
let network_events = network.event_listener();
|
||||
let (command_tx, command_rx) = mpsc::unbounded_channel();
|
||||
|
||||
// install a listener for new transactions
|
||||
let pending = pool.pending_transactions_listener();
|
||||
|
||||
Self {
|
||||
pool,
|
||||
network,
|
||||
network_events: UnboundedReceiverStream::new(network_events),
|
||||
inflight_requests: Default::default(),
|
||||
transactions_by_peers: Default::default(),
|
||||
pool_imports: Default::default(),
|
||||
peers: Default::default(),
|
||||
command_tx,
|
||||
command_rx: UnboundedReceiverStream::new(command_rx),
|
||||
pending_transactions: ReceiverStream::new(pending),
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,8 +116,63 @@ where
|
||||
TransactionsHandle { manager_tx: self.command_tx.clone() }
|
||||
}
|
||||
|
||||
/// Request handler for an incoming request for transactions
|
||||
fn on_get_pooled_transactions(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: GetPooledTransactions,
|
||||
response: Sender<RequestResult<PooledTransactions>>,
|
||||
) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
let transactions = self
|
||||
.pool
|
||||
.get_all(request.0)
|
||||
.into_iter()
|
||||
.map(|tx| tx.transaction.to_recovered_transaction().into_signed())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// we sent a response at which point we assume that the peer is aware of the transaction
|
||||
peer.transactions.extend(transactions.iter().map(|tx| tx.hash()));
|
||||
|
||||
let resp = PooledTransactions(transactions);
|
||||
let _ = response.send(Ok(resp));
|
||||
}
|
||||
}
|
||||
|
||||
/// Request handler for an incoming `NewPooledTransactionHashes`
|
||||
fn on_new_pooled_transactions(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
msg: Arc<NewPooledTransactionHashes>,
|
||||
) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
let mut transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone()).0;
|
||||
|
||||
// keep track of the transactions the peer knows
|
||||
peer.transactions.extend(transactions.clone());
|
||||
|
||||
self.pool.retain_unknown(&mut transactions);
|
||||
|
||||
if transactions.is_empty() {
|
||||
// nothing to request
|
||||
return
|
||||
}
|
||||
|
||||
// request the missing transactions
|
||||
let (response, rx) = oneshot::channel();
|
||||
let req = PeerRequest::GetPooledTransactions {
|
||||
request: GetPooledTransactions(transactions),
|
||||
response,
|
||||
};
|
||||
|
||||
if peer.request_tx.try_send(req).is_ok() {
|
||||
self.inflight_requests.push(GetPooledTxRequest { peer_id, response: rx })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles a received event
|
||||
async fn on_event(&mut self, event: NetworkEvent) {
|
||||
fn on_event(&mut self, event: NetworkEvent) {
|
||||
match event {
|
||||
NetworkEvent::SessionClosed { peer_id } => {
|
||||
// remove the peer
|
||||
@ -114,35 +190,140 @@ where
|
||||
},
|
||||
);
|
||||
|
||||
// TODO send `NewPooledTransactionHashes
|
||||
// Send a `NewPooledTransactionHashes` to the peer with _all_ transactions in the
|
||||
// pool
|
||||
let msg = Arc::new(NewPooledTransactionHashes(self.pool.pooled_transactions()));
|
||||
self.network.send_message(NetworkHandleMessage::SendPooledTransactionHashes {
|
||||
peer_id,
|
||||
msg,
|
||||
})
|
||||
}
|
||||
NetworkEvent::IncomingTransactions { peer_id, msg } => {
|
||||
let transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone());
|
||||
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
for tx in transactions.0 {
|
||||
// track that the peer knows this transaction
|
||||
peer.transactions.insert(tx.hash);
|
||||
|
||||
match self.transactions_by_peers.entry(tx.hash) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
// transaction was already inserted
|
||||
entry.get_mut().push(peer_id);
|
||||
}
|
||||
Entry::Vacant(_) => {
|
||||
// TODO import into the pool
|
||||
}
|
||||
}
|
||||
}
|
||||
self.import_transactions(peer_id, transactions.0);
|
||||
}
|
||||
NetworkEvent::IncomingPooledTransactionHashes { peer_id, msg } => {
|
||||
self.on_new_pooled_transactions(peer_id, msg)
|
||||
}
|
||||
NetworkEvent::GetPooledTransactions { peer_id, request, response } => {
|
||||
if let Ok(response) = Arc::try_unwrap(response) {
|
||||
// TODO(mattsse): there should be a dedicated channel for the transaction
|
||||
// manager instead
|
||||
self.on_get_pooled_transactions(peer_id, request, response)
|
||||
}
|
||||
}
|
||||
NetworkEvent::IncomingPooledTransactionHashes { .. } => {}
|
||||
NetworkEvent::GetPooledTransactions { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes an endless future
|
||||
pub async fn run(self) {}
|
||||
/// Starts the import process for the given transactions.
|
||||
fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec<TransactionSigned>) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
for tx in transactions {
|
||||
// recover transaction
|
||||
let tx = if let Some(tx) = tx.into_ecrecovered() {
|
||||
tx
|
||||
} else {
|
||||
// TODO: report peer?
|
||||
continue
|
||||
};
|
||||
|
||||
// track that the peer knows this transaction
|
||||
peer.transactions.insert(tx.hash);
|
||||
|
||||
match self.transactions_by_peers.entry(tx.hash) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
// transaction was already inserted
|
||||
entry.get_mut().push(peer_id);
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
// this is a new transaction that should be imported into the pool
|
||||
let pool_transaction = <Pool::Transaction as FromRecoveredTransaction>::from_recovered_transaction(tx);
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let import = Box::pin(async move {
|
||||
pool.add_external_transaction(pool_transaction).await
|
||||
});
|
||||
|
||||
self.pool_imports.push(import);
|
||||
entry.insert(vec![peer_id]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_good_import(&mut self, hash: TxHash) {
|
||||
if let Some(_peers) = self.transactions_by_peers.remove(&hash) {
|
||||
// TODO report good peer?
|
||||
}
|
||||
}
|
||||
|
||||
fn on_bad_import(&mut self, hash: TxHash) {
|
||||
if let Some(_peers) = self.transactions_by_peers.remove(&hash) {
|
||||
// TODO report bad peer?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An endless future.
|
||||
///
|
||||
/// This should be spawned or used as part of `tokio::select!`.
|
||||
impl<Pool> Future for TransactionsManager<Pool>
|
||||
where
|
||||
Pool: TransactionPool + Clone + Unpin,
|
||||
<Pool as TransactionPool>::Transaction: IntoRecoveredTransaction,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
// Advance all imports
|
||||
while let Poll::Ready(Some(import_res)) = this.pool_imports.poll_next_unpin(cx) {
|
||||
match import_res {
|
||||
Ok(hash) => {
|
||||
this.on_good_import(hash);
|
||||
}
|
||||
Err(err) => {
|
||||
this.on_bad_import(*err.hash());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle new transactions
|
||||
while let Poll::Ready(Some(_hash)) = this.pending_transactions.poll_next_unpin(cx) {
|
||||
// TODO(mattsse): propagate new transactions
|
||||
}
|
||||
|
||||
// Advance all requests.
|
||||
// We remove each request one by one and add them back.
|
||||
for idx in (0..this.inflight_requests.len()).rev() {
|
||||
let mut req = this.inflight_requests.swap_remove(idx);
|
||||
match req.response.poll_unpin(cx) {
|
||||
Poll::Pending => {
|
||||
this.inflight_requests.push(req);
|
||||
}
|
||||
Poll::Ready(Ok(Ok(txs))) => {
|
||||
this.import_transactions(req.peer_id, txs.0);
|
||||
}
|
||||
Poll::Ready(Ok(Err(_))) => {
|
||||
// TODO report bad peer
|
||||
}
|
||||
Poll::Ready(Err(_)) => {
|
||||
// TODO report bad peer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// An inflight request for `PooledTransactions` from a peer
|
||||
#[allow(missing_docs)]
|
||||
struct GetPooledTxRequest {
|
||||
peer_id: PeerId,
|
||||
response: oneshot::Receiver<RequestResult<PooledTransactions>>,
|
||||
}
|
||||
|
||||
/// Tracks a single peer
|
||||
|
||||
Reference in New Issue
Block a user