chore: downgrade debug! to trace! (#5186)

This commit is contained in:
Matthias Seitz
2023-10-26 11:13:26 +02:00
committed by GitHub
parent 4891559acc
commit f782a33310
5 changed files with 12 additions and 12 deletions

View File

@ -678,7 +678,7 @@ where
let total_active =
this.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1;
this.metrics.connected_peers.set(total_active as f64);
debug!(
trace!(
target: "net",
?remote_addr,
%client_version,
@ -768,7 +768,7 @@ where
.notify(NetworkEvent::SessionClosed { peer_id, reason });
}
SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => {
debug!(
trace!(
target : "net",
?remote_addr,
?error,

View File

@ -27,7 +27,7 @@ use tokio::{
time::{Instant, Interval},
};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::{debug, info, trace};
use tracing::{info, trace};
/// A communication channel to the [`PeersManager`] to apply manual changes to the peer set.
#[derive(Clone, Debug)]
@ -610,7 +610,7 @@ impl PeersManager {
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
if peer.state.is_connected() {
debug!(target : "net::peers", ?peer_id, "disconnecting on remove from discovery");
trace!(target : "net::peers", ?peer_id, "disconnecting on remove from discovery");
// we terminate the active session here, but only remove the peer after the session
// was disconnected, this prevents the case where the session is scheduled for
// disconnect but the node is immediately rediscovered, See also

View File

@ -29,7 +29,7 @@ use std::{
task::{Context, Poll},
};
use tokio::sync::oneshot;
use tracing::debug;
use tracing::{debug, trace};
/// Cache limit of blocks to keep track of for a single peer.
const PEER_BLOCK_CACHE_LIMIT: usize = 512;
@ -259,13 +259,13 @@ where
/// Bans the [`IpAddr`] in the discovery service.
pub(crate) fn ban_ip_discovery(&self, ip: IpAddr) {
debug!(target: "net", ?ip, "Banning discovery");
trace!(target: "net", ?ip, "Banning discovery");
self.discovery.ban_ip(ip)
}
/// Bans the [`PeerId`] and [`IpAddr`] in the discovery service.
pub(crate) fn ban_discovery(&self, peer_id: PeerId, ip: IpAddr) {
debug!(target: "net", ?peer_id, ?ip, "Banning discovery");
trace!(target: "net", ?peer_id, ?ip, "Banning discovery");
self.discovery.ban(peer_id, ip)
}

View File

@ -20,7 +20,7 @@ use std::{
sync::Arc,
task::{Context, Poll},
};
use tracing::{debug, trace};
use tracing::trace;
/// Contains the connectivity related state of the network.
///
@ -226,7 +226,7 @@ where
return Some(SwarmEvent::IncomingTcpConnection { session_id, remote_addr })
}
Err(err) => {
debug!(target: "net", ?err, "Incoming connection rejected, capacity already reached.");
trace!(target: "net", ?err, "Incoming connection rejected, capacity already reached.");
self.state_mut()
.peers_mut()
.on_incoming_pending_session_rejected_internally();

View File

@ -35,7 +35,7 @@ use std::{
};
use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError};
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
use tracing::{debug, trace};
use tracing::trace;
/// Cache limit of transactions to keep track of for a single peer.
const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024 * 10;
@ -519,7 +519,7 @@ where
if num_already_seen > 0 {
self.metrics.messages_with_already_seen_hashes.increment(1);
debug!(target: "net::tx", num_hashes=%num_already_seen, ?peer_id, client=?peer.client_version, "Peer sent already seen hashes");
trace!(target: "net::tx", num_hashes=%num_already_seen, ?peer_id, client=?peer.client_version, "Peer sent already seen hashes");
}
}
@ -698,7 +698,7 @@ where
if num_already_seen > 0 {
self.metrics.messages_with_already_seen_transactions.increment(1);
debug!(target: "net::tx", num_txs=%num_already_seen, ?peer_id, client=?peer.client_version, "Peer sent already seen transactions");
trace!(target: "net::tx", num_txs=%num_already_seen, ?peer_id, client=?peer.client_version, "Peer sent already seen transactions");
}
}