chore: downgrade error and warn traces to debug (#2541)

This commit is contained in:
Matthias Seitz
2023-05-03 15:53:05 +02:00
committed by GitHub
parent 03977950c8
commit 10b97b29af
7 changed files with 16 additions and 16 deletions

View File

@ -13,7 +13,7 @@ use std::{
time::Duration,
};
use tokio::time::Interval;
use tracing::{info, warn};
use tracing::{debug, info};
/// The current high-level state of the node.
struct NodeState {
@ -67,7 +67,7 @@ impl NodeState {
}
NetworkEvent::SessionClosed { peer_id, reason } => {
let reason = reason.map(|s| s.to_string()).unwrap_or_else(|| "None".to_string());
warn!(target: "reth::cli", connected_peers = self.num_connected_peers(), peer_id = %peer_id, %reason, "Peer disconnected.");
debug!(target: "reth::cli", connected_peers = self.num_connected_peers(), peer_id = %peer_id, %reason, "Peer disconnected.");
}
_ => (),
}

View File

@ -1491,7 +1491,7 @@ impl Discv4Service {
match event {
IngressEvent::RecvError(_) => {}
IngressEvent::BadPacket(from, err, data) => {
warn!(target : "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
debug!(target : "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
}
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => {
trace!( target : "discv4", r#type=?msg.msg_type(), from=?remote_addr,"received packet");
@ -1602,7 +1602,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
let res = udp.recv_from(&mut buf).await;
match res {
Err(err) => {
warn!(target : "discv4", ?err, "Failed to read datagram.");
debug!(target : "discv4", ?err, "Failed to read datagram.");
send(IngressEvent::RecvError(err)).await;
}
Ok((read, remote_addr)) => {
@ -1611,13 +1611,13 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
Ok(packet) => {
if packet.node_id == local_id {
// received our own message
warn!(target : "discv4", ?remote_addr, "Received own packet.");
debug!(target : "discv4", ?remote_addr, "Received own packet.");
continue
}
send(IngressEvent::Packet(remote_addr, packet)).await;
}
Err(err) => {
warn!( target : "discv4", ?err,"Failed to decode packet");
debug!( target : "discv4", ?err,"Failed to decode packet");
send(IngressEvent::BadPacket(remote_addr, err, packet.to_vec())).await
}
}

View File

@ -336,7 +336,7 @@ where
this.metrics.buffered_responses.increment(1.);
}
Err(error) => {
tracing::error!(target: "downloaders::bodies", ?error, "Request failed");
tracing::debug!(target: "downloaders::bodies", ?error, "Request failed");
this.clear();
return Poll::Ready(Some(Err(error)))
}
@ -363,7 +363,7 @@ where
}
Ok(None) => break 'inner,
Err(error) => {
tracing::error!(target: "downloaders::bodies", ?error, "Failed to form next request");
tracing::error!(target: "downloaders::bodies", ?error, "Failed to download from next request");
this.clear();
return Poll::Ready(Some(Err(error)))
}

View File

@ -83,7 +83,7 @@ where
fn on_error(&mut self, error: DownloadError, peer_id: Option<PeerId>) {
self.metrics.increment_errors(&error);
tracing::error!(target: "downloaders::bodies", ?peer_id, %error, "Error requesting bodies");
tracing::debug!(target: "downloaders::bodies", ?peer_id, %error, "Error requesting bodies");
if let Some(peer_id) = peer_id {
self.client.report_bad_message(peer_id);
}

View File

@ -54,7 +54,7 @@ use std::{
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::{debug, error, info, trace, warn};
use tracing::{debug, error, info, trace};
/// Manages the _entire_ state of the network.
///
/// This is an endless [`Future`] that consistently drives the state of the entire network forward.
@ -742,7 +742,7 @@ where
.notify(NetworkEvent::SessionClosed { peer_id, reason });
}
SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => {
warn!(
debug!(
target : "net",
?remote_addr,
?error,

View File

@ -37,7 +37,7 @@ use tokio::{
time::Interval,
};
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, error, info, trace, warn};
use tracing::{debug, error, info, trace};
/// Constants for timeout updating
@ -415,7 +415,7 @@ impl ActiveSession {
for (id, req) in self.inflight_requests.iter_mut() {
if req.is_timed_out(now) {
if req.is_waiting() {
warn!(target: "net::session", ?id, remote_peer_id=?self.remote_peer_id, "timed out outgoing request");
debug!(target: "net::session", ?id, remote_peer_id=?self.remote_peer_id, "timed out outgoing request");
req.timeout();
} else if now - req.timestamp > self.protocol_breach_request_timeout {
return true
@ -585,7 +585,7 @@ impl Future for ActiveSession {
}
}
Err(err) => {
error!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to receive message");
debug!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to receive message");
this.close_on_error(err);
return Poll::Ready(())
}

View File

@ -28,7 +28,7 @@ use std::{
task::{Context, Poll},
};
use tokio::sync::oneshot;
use tracing::{debug, error};
use tracing::debug;
/// Cache limit of blocks to keep track of for a single peer.
const PEER_BLOCK_CACHE_LIMIT: usize = 512;
@ -405,7 +405,7 @@ where
Poll::Ready(res) => {
// check if the error is due to a closed channel to the session
if res.err().map(|err| err.is_channel_closed()).unwrap_or_default() {
error!(
debug!(
target : "net",
?id,
"Request canceled, response channel from session closed."