mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore: use Display instead of Debug for most errors (#6777)
This commit is contained in:
@ -837,7 +837,7 @@ impl<DB: Database, EVM: ExecutorFactory> BlockchainTree<DB, EVM> {
|
||||
.try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation)
|
||||
.map_err(|err| {
|
||||
debug!(
|
||||
target: "blockchain_tree", ?err,
|
||||
target: "blockchain_tree", %err,
|
||||
"Failed to insert buffered block",
|
||||
);
|
||||
err
|
||||
|
||||
@ -182,7 +182,7 @@ where
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(target: "consensus::auto", ?err, "Autoseal fork choice update failed");
|
||||
error!(target: "consensus::auto", %err, "Autoseal fork choice update failed");
|
||||
return None
|
||||
}
|
||||
}
|
||||
@ -219,7 +219,7 @@ where
|
||||
.send(reth_provider::CanonStateNotification::Commit { new: chain });
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "consensus::auto", ?err, "failed to execute block")
|
||||
warn!(target: "consensus::auto", %err, "failed to execute block")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -427,7 +427,7 @@ where
|
||||
Err(error) => {
|
||||
if let RethError::Canonical(ref err) = error {
|
||||
if err.is_fatal() {
|
||||
tracing::error!(target: "consensus::engine", ?err, "Encountered fatal error");
|
||||
tracing::error!(target: "consensus::engine", %err, "Encountered fatal error");
|
||||
return Err(error)
|
||||
}
|
||||
}
|
||||
@ -658,7 +658,7 @@ where
|
||||
// skip the pipeline run
|
||||
match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) {
|
||||
Err(err) => {
|
||||
warn!(target: "consensus::engine", ?err, "Failed to get finalized block header");
|
||||
warn!(target: "consensus::engine", %err, "Failed to get finalized block header");
|
||||
}
|
||||
Ok(None) => {
|
||||
// we don't have the block yet and the distance exceeds the allowed
|
||||
@ -984,8 +984,8 @@ where
|
||||
// check if the new head was previously invalidated, if so then we deem this FCU
|
||||
// as invalid
|
||||
if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash) {
|
||||
warn!(target: "consensus::engine", ?error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid");
|
||||
debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=?error, "Head was previously marked as invalid");
|
||||
warn!(target: "consensus::engine", %error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid");
|
||||
debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=%error, "Head was previously marked as invalid");
|
||||
return invalid_ancestor
|
||||
}
|
||||
|
||||
@ -993,7 +993,7 @@ where
|
||||
RethError::Canonical(
|
||||
error @ CanonicalError::Validation(BlockValidationError::BlockPreMerge { .. }),
|
||||
) => {
|
||||
warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash");
|
||||
warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash");
|
||||
return PayloadStatus::from_status(PayloadStatusEnum::Invalid {
|
||||
validation_error: error.to_string(),
|
||||
})
|
||||
@ -1007,7 +1007,7 @@ where
|
||||
// to a new target and is considered normal operation during sync
|
||||
}
|
||||
_ => {
|
||||
warn!(target: "consensus::engine", ?error, ?state, "Failed to canonicalize the head hash");
|
||||
warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash");
|
||||
// TODO(mattsse) better error handling before attempting to sync (FCU could be
|
||||
// invalid): only trigger sync if we can't determine whether the FCU is invalid
|
||||
}
|
||||
@ -1127,7 +1127,7 @@ where
|
||||
Ok(status)
|
||||
}
|
||||
Err(error) => {
|
||||
warn!(target: "consensus::engine", ?error, "Error while processing payload");
|
||||
warn!(target: "consensus::engine", %error, "Error while processing payload");
|
||||
self.map_insert_error(error)
|
||||
}
|
||||
};
|
||||
@ -1170,7 +1170,7 @@ where
|
||||
match self.payload_validator.ensure_well_formed_payload(payload, cancun_fields.into()) {
|
||||
Ok(block) => Ok(block),
|
||||
Err(error) => {
|
||||
error!(target: "consensus::engine", ?error, "Invalid payload");
|
||||
error!(target: "consensus::engine", %error, "Invalid payload");
|
||||
// we need to convert the error to a payload status (response to the CL)
|
||||
|
||||
let latest_valid_hash =
|
||||
@ -1322,7 +1322,7 @@ where
|
||||
let (block, error) = err.split();
|
||||
|
||||
if error.is_invalid_block() {
|
||||
warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, ?error, "Invalid block error on new payload");
|
||||
warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload");
|
||||
|
||||
// all of these occurred if the payload is invalid
|
||||
let parent_hash = block.parent_hash;
|
||||
@ -1412,10 +1412,10 @@ where
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "consensus::engine", ?err, "Failed to insert downloaded block");
|
||||
warn!(target: "consensus::engine", %err, "Failed to insert downloaded block");
|
||||
if err.kind().is_invalid_block() {
|
||||
let (block, err) = err.split();
|
||||
warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), ?err, "Marking block as invalid");
|
||||
warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid");
|
||||
|
||||
self.invalid_headers.insert(block.header);
|
||||
}
|
||||
@ -1608,7 +1608,7 @@ where
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
error!(target: "consensus::engine", ?error, "Error getting canonical header for continuous sync");
|
||||
error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync");
|
||||
return Some(Err(RethError::Provider(error).into()))
|
||||
}
|
||||
};
|
||||
@ -1690,7 +1690,7 @@ where
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
error!(target: "consensus::engine", ?error, "Error restoring blockchain tree state");
|
||||
error!(target: "consensus::engine", %error, "Error restoring blockchain tree state");
|
||||
return Some(Err(error.into()))
|
||||
}
|
||||
};
|
||||
@ -1724,7 +1724,7 @@ where
|
||||
if let Err(error) =
|
||||
self.blockchain.connect_buffered_blocks_to_canonical_hashes()
|
||||
{
|
||||
error!(target: "consensus::engine", ?error, "Error connecting buffered blocks to canonical hashes on hook result");
|
||||
error!(target: "consensus::engine", %error, "Error connecting buffered blocks to canonical hashes on hook result");
|
||||
return Err(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ where
|
||||
BodyResponse::PendingValidation(resp) => {
|
||||
// ensure the block is valid, else retry
|
||||
if let Err(err) = ensure_valid_body_response(&header, resp.data()) {
|
||||
debug!(target: "downloaders", ?err, hash=?header.hash(), "Received wrong body");
|
||||
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body");
|
||||
self.client.report_bad_message(resp.peer_id());
|
||||
self.header = Some(header);
|
||||
self.request.body = Some(self.client.get_block_body(self.hash));
|
||||
@ -164,7 +164,7 @@ where
|
||||
fn on_block_response(&mut self, resp: WithPeerId<BlockBody>) {
|
||||
if let Some(ref header) = self.header {
|
||||
if let Err(err) = ensure_valid_body_response(header, resp.data()) {
|
||||
debug!(target: "downloaders", ?err, hash=?header.hash(), "Received wrong body");
|
||||
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body");
|
||||
self.client.report_bad_message(resp.peer_id());
|
||||
return
|
||||
}
|
||||
@ -438,7 +438,7 @@ where
|
||||
BodyResponse::PendingValidation(resp) => {
|
||||
// ensure the block is valid, else retry
|
||||
if let Err(err) = ensure_valid_body_response(header, resp.data()) {
|
||||
debug!(target: "downloaders", ?err, hash=?header.hash(), "Received wrong body in range response");
|
||||
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response");
|
||||
self.client.report_bad_message(resp.peer_id());
|
||||
|
||||
// get body that doesn't match, put back into vecdeque, and retry it
|
||||
|
||||
@ -1639,7 +1639,7 @@ impl Discv4Service {
|
||||
debug!(target: "discv4", %err, "failed to read datagram");
|
||||
}
|
||||
IngressEvent::BadPacket(from, err, data) => {
|
||||
debug!(target: "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
||||
debug!(target: "discv4", ?from, %err, packet=?hex::encode(&data), "bad packet");
|
||||
}
|
||||
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => {
|
||||
trace!(target: "discv4", r#type=?msg.msg_type(), from=?remote_addr,"received packet");
|
||||
@ -1765,7 +1765,7 @@ pub(crate) async fn send_loop(udp: Arc<UdpSocket>, rx: EgressReceiver) {
|
||||
trace!(target: "discv4", ?to, ?size,"sent payload");
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target: "discv4", ?to, ?err,"Failed to send datagram.");
|
||||
debug!(target: "discv4", ?to, %err,"Failed to send datagram.");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1788,7 +1788,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
||||
let res = udp.recv_from(&mut buf).await;
|
||||
match res {
|
||||
Err(err) => {
|
||||
debug!(target: "discv4", ?err, "Failed to read datagram.");
|
||||
debug!(target: "discv4", %err, "Failed to read datagram.");
|
||||
send(IngressEvent::RecvError(err)).await;
|
||||
}
|
||||
Ok((read, remote_addr)) => {
|
||||
@ -1803,7 +1803,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
||||
send(IngressEvent::Packet(remote_addr, packet)).await;
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target: "discv4", ?err,"Failed to decode packet");
|
||||
debug!(target: "discv4", %err,"Failed to decode packet");
|
||||
send(IngressEvent::BadPacket(remote_addr, err, packet.to_vec())).await
|
||||
}
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ impl Stream for MockDiscovery {
|
||||
match event {
|
||||
IngressEvent::RecvError(_) => {}
|
||||
IngressEvent::BadPacket(from, err, data) => {
|
||||
debug!(target: "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
||||
debug!(target: "discv4", ?from, %err, packet=?hex::encode(&data), "bad packet");
|
||||
}
|
||||
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => match msg {
|
||||
Message::Ping(ping) => {
|
||||
|
||||
@ -234,7 +234,7 @@ impl<R: Resolver> DnsDiscoveryService<R> {
|
||||
}
|
||||
},
|
||||
Err((err, link)) => {
|
||||
debug!(target: "disc::dns",?err, ?link, "Failed to lookup root")
|
||||
debug!(target: "disc::dns",%err, ?link, "Failed to lookup root")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -251,7 +251,7 @@ impl<R: Resolver> DnsDiscoveryService<R> {
|
||||
|
||||
match entry {
|
||||
Some(Err(err)) => {
|
||||
debug!(target: "disc::dns",?err, domain=%link.domain, ?hash, "Failed to lookup entry")
|
||||
debug!(target: "disc::dns",%err, domain=%link.domain, ?hash, "Failed to lookup entry")
|
||||
}
|
||||
None => {
|
||||
debug!(target: "disc::dns",domain=%link.domain, ?hash, "No dns entry")
|
||||
|
||||
@ -19,7 +19,7 @@ impl<P: ConnectionProvider> Resolver for AsyncResolver<P> {
|
||||
let fqn = if query.ends_with('.') { query.to_string() } else { format!("{query}.") };
|
||||
match self.txt_lookup(fqn).await {
|
||||
Err(err) => {
|
||||
trace!(target: "disc::dns", ?err, ?query, "dns lookup failed");
|
||||
trace!(target: "disc::dns", %err, ?query, "dns lookup failed");
|
||||
None
|
||||
}
|
||||
Ok(lookup) => {
|
||||
|
||||
@ -360,7 +360,7 @@ where
|
||||
this.buffer_bodies_response(response);
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::debug!(target: "downloaders::bodies", ?error, "Request failed");
|
||||
tracing::debug!(target: "downloaders::bodies", %error, "Request failed");
|
||||
this.clear();
|
||||
return Poll::Ready(Some(Err(error)))
|
||||
}
|
||||
@ -386,7 +386,7 @@ where
|
||||
}
|
||||
Ok(None) => break 'inner,
|
||||
Err(error) => {
|
||||
tracing::error!(target: "downloaders::bodies", ?error, "Failed to download from next request");
|
||||
tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request");
|
||||
this.clear();
|
||||
return Poll::Ready(Some(Err(error)))
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ impl<T: BodyDownloader> Future for SpawnedDownloader<T> {
|
||||
while let Poll::Ready(update) = this.updates.poll_next_unpin(cx) {
|
||||
if let Some(range) = update {
|
||||
if let Err(err) = this.downloader.set_download_range(range) {
|
||||
tracing::error!(target: "downloaders::bodies", ?err, "Failed to set bodies download range");
|
||||
tracing::error!(target: "downloaders::bodies", %err, "Failed to set bodies download range");
|
||||
|
||||
// Clone the sender ensure its availability. See [PollSender::clone].
|
||||
let mut bodies_tx = this.bodies_tx.clone();
|
||||
|
||||
@ -258,7 +258,7 @@ where
|
||||
validated.last().or_else(|| self.lowest_validated_header())
|
||||
{
|
||||
if let Err(error) = self.validate(validated_header, &parent) {
|
||||
trace!(target: "downloaders::headers", ?error ,"Failed to validate header");
|
||||
trace!(target: "downloaders::headers", %error ,"Failed to validate header");
|
||||
return Err(
|
||||
HeadersResponseError { request, peer_id: Some(peer_id), error }.into()
|
||||
)
|
||||
@ -278,7 +278,7 @@ where
|
||||
{
|
||||
// Every header must be valid on its own
|
||||
if let Err(error) = self.consensus.validate_header(last_header) {
|
||||
trace!(target: "downloaders::headers", ?error, "Failed to validate header");
|
||||
trace!(target: "downloaders::headers", %error, "Failed to validate header");
|
||||
return Err(HeadersResponseError {
|
||||
request,
|
||||
peer_id: Some(peer_id),
|
||||
@ -294,7 +294,7 @@ where
|
||||
// detached head error.
|
||||
if let Err(error) = self.consensus.validate_header_against_parent(last_header, head) {
|
||||
// Replace the last header with a detached variant
|
||||
error!(target: "downloaders::headers", ?error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain");
|
||||
error!(target: "downloaders::headers", %error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain");
|
||||
return Err(HeadersDownloaderError::DetachedHead {
|
||||
local_head: Box::new(head.clone()),
|
||||
header: Box::new(last_header.clone()),
|
||||
@ -529,7 +529,7 @@ where
|
||||
fn penalize_peer(&self, peer_id: Option<PeerId>, error: &DownloadError) {
|
||||
// Penalize the peer for bad response
|
||||
if let Some(peer_id) = peer_id {
|
||||
trace!(target: "downloaders::headers", ?peer_id, ?error, "Penalizing peer");
|
||||
trace!(target: "downloaders::headers", ?peer_id, %error, "Penalizing peer");
|
||||
self.client.report_bad_message(peer_id);
|
||||
}
|
||||
}
|
||||
@ -775,7 +775,7 @@ where
|
||||
match this.on_sync_target_outcome(outcome) {
|
||||
Ok(()) => break,
|
||||
Err(ReverseHeadersDownloaderError::Response(error)) => {
|
||||
trace!(target: "downloaders::headers", ?error, "invalid sync target response");
|
||||
trace!(target: "downloaders::headers", %error, "invalid sync target response");
|
||||
if error.is_channel_closed() {
|
||||
// download channel closed which means the network was dropped
|
||||
return Poll::Ready(None)
|
||||
|
||||
@ -11,7 +11,6 @@ use std::io;
|
||||
|
||||
/// Errors when sending/receiving messages
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
|
||||
pub enum EthStreamError {
|
||||
#[error(transparent)]
|
||||
/// Error of the underlying P2P connection.
|
||||
|
||||
@ -134,7 +134,7 @@ where
|
||||
Err(P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(reason)))
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(?err, msg=%hex::encode(&first_message_bytes), "Failed to decode first message from peer");
|
||||
debug!(%err, msg=%hex::encode(&first_message_bytes), "Failed to decode first message from peer");
|
||||
Err(P2PStreamError::HandshakeError(err.into()))
|
||||
}
|
||||
Ok(msg) => {
|
||||
@ -349,7 +349,7 @@ impl<S> DisconnectP2P for P2PStream<S> {
|
||||
let compressed_size =
|
||||
self.encoder.compress(&buf[1..], &mut compressed[1..]).map_err(|err| {
|
||||
debug!(
|
||||
?err,
|
||||
%err,
|
||||
msg=%hex::encode(&buf[1..]),
|
||||
"error compressing disconnect"
|
||||
);
|
||||
@ -436,7 +436,7 @@ where
|
||||
// to decompress the message before we can decode it.
|
||||
this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..]).map_err(|err| {
|
||||
debug!(
|
||||
?err,
|
||||
%err,
|
||||
msg=%hex::encode(&bytes[1..]),
|
||||
"error decompressing p2p message"
|
||||
);
|
||||
@ -455,7 +455,7 @@ where
|
||||
_ if id == P2PMessageID::Disconnect as u8 => {
|
||||
let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| {
|
||||
debug!(
|
||||
?err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer"
|
||||
%err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer"
|
||||
);
|
||||
err
|
||||
})?;
|
||||
@ -573,7 +573,7 @@ where
|
||||
let compressed_size =
|
||||
this.encoder.compress(&item[1..], &mut compressed[1..]).map_err(|err| {
|
||||
debug!(
|
||||
?err,
|
||||
%err,
|
||||
msg=%hex::encode(&item[1..]),
|
||||
"error compressing p2p message"
|
||||
);
|
||||
|
||||
@ -244,14 +244,14 @@ async fn resolve_external_ip_upnp() -> Option<IpAddr> {
|
||||
search_gateway(Default::default())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
debug!(target: "net::nat", ?err, "Failed to resolve external IP via UPnP: failed to find gateway");
|
||||
debug!(target: "net::nat", %err, "Failed to resolve external IP via UPnP: failed to find gateway");
|
||||
err
|
||||
})
|
||||
.ok()?
|
||||
.get_external_ip()
|
||||
.await
|
||||
.map_err(|err| {
|
||||
debug!(target: "net::nat", ?err, "Failed to resolve external IP via UPnP");
|
||||
debug!(target: "net::nat", %err, "Failed to resolve external IP via UPnP");
|
||||
err
|
||||
})
|
||||
.ok()
|
||||
|
||||
@ -77,7 +77,7 @@ impl NetworkError {
|
||||
|
||||
/// Abstraction over errors that can lead to a failed session
|
||||
#[auto_impl::auto_impl(&)]
|
||||
pub(crate) trait SessionError: fmt::Debug {
|
||||
pub(crate) trait SessionError: fmt::Debug + fmt::Display {
|
||||
/// Returns true if the error indicates that the corresponding peer should be removed from peer
|
||||
/// discovery, for example if it's using a different genesis hash.
|
||||
fn merits_discovery_ban(&self) -> bool;
|
||||
|
||||
@ -690,7 +690,7 @@ where
|
||||
trace!(target: "net", ?remote_addr, "TCP listener closed.");
|
||||
}
|
||||
SwarmEvent::TcpListenerError(err) => {
|
||||
trace!(target: "net", ?err, "TCP connection error.");
|
||||
trace!(target: "net", %err, "TCP connection error.");
|
||||
}
|
||||
SwarmEvent::IncomingTcpConnection { remote_addr, session_id } => {
|
||||
trace!(target: "net", ?session_id, ?remote_addr, "Incoming connection");
|
||||
@ -883,7 +883,7 @@ where
|
||||
target: "net",
|
||||
?remote_addr,
|
||||
?peer_id,
|
||||
?error,
|
||||
%error,
|
||||
"Outgoing connection error"
|
||||
);
|
||||
|
||||
|
||||
@ -460,10 +460,10 @@ impl PeersManager {
|
||||
err: impl SessionError,
|
||||
reputation_change: ReputationChangeKind,
|
||||
) {
|
||||
trace!(target: "net::peers", ?remote_addr, ?peer_id, ?err, "handling failed connection");
|
||||
trace!(target: "net::peers", ?remote_addr, ?peer_id, %err, "handling failed connection");
|
||||
|
||||
if err.is_fatal_protocol_error() {
|
||||
trace!(target: "net::peers", ?remote_addr, ?peer_id, ?err, "fatal connection error");
|
||||
trace!(target: "net::peers", ?remote_addr, ?peer_id, %err, "fatal connection error");
|
||||
// remove the peer to which we can't establish a connection due to protocol related
|
||||
// issues.
|
||||
if let Some((peer_id, peer)) = self.peers.remove_entry(peer_id) {
|
||||
|
||||
@ -293,7 +293,7 @@ impl ActiveSession {
|
||||
self.queued_outgoing.push_back(msg.into());
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target: "net", ?err, "Failed to respond to received request");
|
||||
debug!(target: "net", %err, "Failed to respond to received request");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -408,7 +408,7 @@ impl ActiveSession {
|
||||
self.poll_disconnect(cx)
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target: "net::session", ?err, remote_peer_id=?self.remote_peer_id, "could not send disconnect");
|
||||
debug!(target: "net::session", %err, remote_peer_id=?self.remote_peer_id, "could not send disconnect");
|
||||
self.close_on_error(err, cx)
|
||||
}
|
||||
}
|
||||
@ -557,7 +557,7 @@ impl Future for ActiveSession {
|
||||
OutgoingMessage::Broadcast(msg) => this.conn.start_send_broadcast(msg),
|
||||
};
|
||||
if let Err(err) = res {
|
||||
debug!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to send message");
|
||||
debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to send message");
|
||||
// notify the manager
|
||||
return this.close_on_error(err, cx)
|
||||
}
|
||||
@ -614,7 +614,7 @@ impl Future for ActiveSession {
|
||||
progress = true;
|
||||
}
|
||||
OnIncomingMessageOutcome::BadMessage { error, message } => {
|
||||
debug!(target: "net::session", ?error, msg=?message, remote_peer_id=?this.remote_peer_id, "received invalid protocol message");
|
||||
debug!(target: "net::session", %error, msg=?message, remote_peer_id=?this.remote_peer_id, "received invalid protocol message");
|
||||
return this.close_on_error(error, cx)
|
||||
}
|
||||
OnIncomingMessageOutcome::NoCapacity(msg) => {
|
||||
@ -625,7 +625,7 @@ impl Future for ActiveSession {
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target: "net::session", ?err, remote_peer_id=?this.remote_peer_id, "failed to receive message");
|
||||
debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to receive message");
|
||||
return this.close_on_error(err, cx)
|
||||
}
|
||||
}
|
||||
|
||||
@ -395,7 +395,7 @@ impl SessionManager {
|
||||
remote_addr,
|
||||
error,
|
||||
} => {
|
||||
trace!(target: "net::session", ?peer_id, ?error,"closed session.");
|
||||
trace!(target: "net::session", ?peer_id, %error,"closed session.");
|
||||
self.remove_active_session(&peer_id);
|
||||
Poll::Ready(SessionEvent::SessionClosedOnConnectionError {
|
||||
remote_addr,
|
||||
@ -568,7 +568,7 @@ impl SessionManager {
|
||||
} => {
|
||||
trace!(
|
||||
target: "net::session",
|
||||
?error,
|
||||
%error,
|
||||
?session_id,
|
||||
?remote_addr,
|
||||
?peer_id,
|
||||
@ -580,7 +580,7 @@ impl SessionManager {
|
||||
PendingSessionEvent::EciesAuthError { remote_addr, session_id, error, direction } => {
|
||||
trace!(
|
||||
target: "net::session",
|
||||
?error,
|
||||
%error,
|
||||
?session_id,
|
||||
?remote_addr,
|
||||
"ecies auth failed"
|
||||
@ -753,11 +753,13 @@ pub enum SessionEvent {
|
||||
}
|
||||
|
||||
/// Errors that can occur during handshaking/authenticating the underlying streams.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum PendingSessionHandshakeError {
|
||||
/// The pending session failed due to an error while establishing the `eth` stream
|
||||
#[error(transparent)]
|
||||
Eth(EthStreamError),
|
||||
/// The pending session failed due to an error while establishing the ECIES stream
|
||||
#[error(transparent)]
|
||||
Ecies(ECIESError),
|
||||
}
|
||||
|
||||
|
||||
@ -220,7 +220,7 @@ where
|
||||
return Some(SwarmEvent::IncomingTcpConnection { session_id, remote_addr })
|
||||
}
|
||||
Err(err) => {
|
||||
trace!(target: "net", ?err, "Incoming connection rejected, capacity already reached.");
|
||||
trace!(target: "net", %err, "Incoming connection rejected, capacity already reached.");
|
||||
self.state_mut()
|
||||
.peers_mut()
|
||||
.on_incoming_pending_session_rejected_internally();
|
||||
|
||||
@ -1234,7 +1234,7 @@ where
|
||||
);
|
||||
}
|
||||
FetchEvent::FetchError { peer_id, error } => {
|
||||
trace!(target: "net::tx", ?peer_id, ?error, "requesting transactions from peer failed");
|
||||
trace!(target: "net::tx", ?peer_id, %error, "requesting transactions from peer failed");
|
||||
this.on_request_error(peer_id, error);
|
||||
}
|
||||
}
|
||||
@ -1265,7 +1265,7 @@ where
|
||||
// known that this transaction is bad. (e.g. consensus
|
||||
// rules)
|
||||
if err.is_bad_transaction() && !this.network.is_syncing() {
|
||||
debug!(target: "net::tx", ?err, "bad pool transaction import");
|
||||
debug!(target: "net::tx", %err, "bad pool transaction import");
|
||||
this.on_bad_import(err.hash);
|
||||
continue
|
||||
}
|
||||
|
||||
@ -116,43 +116,43 @@ fn collect_memory_stats() {
|
||||
use metrics::gauge;
|
||||
use tracing::error;
|
||||
|
||||
if epoch::advance().map_err(|error| error!(?error, "Failed to advance jemalloc epoch")).is_err()
|
||||
if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::active::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.active"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.active"))
|
||||
{
|
||||
gauge!("jemalloc.active", value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::allocated::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.allocated"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated"))
|
||||
{
|
||||
gauge!("jemalloc.allocated", value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::mapped::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.mapped"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped"))
|
||||
{
|
||||
gauge!("jemalloc.mapped", value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::metadata::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.metadata"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata"))
|
||||
{
|
||||
gauge!("jemalloc.metadata", value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::resident::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.resident"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident"))
|
||||
{
|
||||
gauge!("jemalloc.resident", value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::retained::read()
|
||||
.map_err(|error| error!(?error, "Failed to read jemalloc.stats.retained"))
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained"))
|
||||
{
|
||||
gauge!("jemalloc.retained", value as f64);
|
||||
}
|
||||
@ -205,14 +205,14 @@ fn collect_io_stats() {
|
||||
use tracing::error;
|
||||
|
||||
let Ok(process) = procfs::process::Process::myself()
|
||||
.map_err(|error| error!(?error, "Failed to get currently running process"))
|
||||
.map_err(|error| error!(%error, "Failed to get currently running process"))
|
||||
else {
|
||||
return
|
||||
};
|
||||
|
||||
let Ok(io) = process.io().map_err(|error| {
|
||||
error!(?error, "Failed to get IO stats for the currently running process")
|
||||
}) else {
|
||||
let Ok(io) = process.io().map_err(
|
||||
|error| error!(%error, "Failed to get IO stats for the currently running process"),
|
||||
) else {
|
||||
return
|
||||
};
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ where
|
||||
info!(target: "reth::cli", peers_file=?file_path, "Wrote network peers to file");
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "reth::cli", ?err, peers_file=?file_path, "Failed to write network peers to file");
|
||||
warn!(target: "reth::cli", %err, peers_file=?file_path, "Failed to write network peers to file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -428,7 +428,7 @@ where
|
||||
}
|
||||
Poll::Ready(Err(error)) => {
|
||||
// job failed, but we simply try again next interval
|
||||
debug!(target: "payload_builder", ?error, "payload build attempt failed");
|
||||
debug!(target: "payload_builder", %error, "payload build attempt failed");
|
||||
this.metrics.inc_failed_payload_builds();
|
||||
}
|
||||
Poll::Pending => {
|
||||
@ -568,7 +568,7 @@ where
|
||||
match empty_payload.poll_unpin(cx) {
|
||||
Poll::Ready(Ok(res)) => {
|
||||
if let Err(err) = &res {
|
||||
warn!(target: "payload_builder", ?err, "failed to resolve empty payload");
|
||||
warn!(target: "payload_builder", %err, "failed to resolve empty payload");
|
||||
} else {
|
||||
debug!(target: "payload_builder", "resolving empty payload");
|
||||
}
|
||||
|
||||
@ -391,7 +391,7 @@ where
|
||||
trace!(%id, "payload job finished");
|
||||
}
|
||||
Poll::Ready(Err(err)) => {
|
||||
warn!(?err, ?id, "Payload builder job failed; resolving payload");
|
||||
warn!(%err, ?id, "Payload builder job failed; resolving payload");
|
||||
this.metrics.inc_failed_jobs();
|
||||
this.metrics.set_active_jobs(this.payload_jobs.len());
|
||||
}
|
||||
@ -426,7 +426,7 @@ where
|
||||
}
|
||||
Err(err) => {
|
||||
this.metrics.inc_failed_jobs();
|
||||
warn!(?err, %id, "Failed to create payload builder job");
|
||||
warn!(%err, %id, "Failed to create payload builder job");
|
||||
res = Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ mod builder {
|
||||
debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload");
|
||||
|
||||
let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to get state for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload");
|
||||
err
|
||||
})?;
|
||||
let mut db = State::builder()
|
||||
@ -98,13 +98,13 @@ mod builder {
|
||||
&initialized_block_env,
|
||||
&attributes,
|
||||
).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to apply beacon root contract call for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
|
||||
commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals.clone()).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to commit withdrawals for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
@ -116,7 +116,7 @@ mod builder {
|
||||
let bundle_state =
|
||||
BundleStateWithReceipts::new(db.take_bundle(), Receipts::new(), block_number);
|
||||
let state_root = state.state_root(&bundle_state).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to calculate state root for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
@ -278,11 +278,11 @@ mod builder {
|
||||
EVMError::Transaction(err) => {
|
||||
if matches!(err, InvalidTransaction::NonceTooLow { .. }) {
|
||||
// if the nonce is too low, we can skip this transaction
|
||||
trace!(target: "payload_builder", ?err, ?tx, "skipping nonce too low transaction");
|
||||
trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction");
|
||||
} else {
|
||||
// if the transaction is invalid, we can skip it and all of its
|
||||
// descendants
|
||||
trace!(target: "payload_builder", ?err, ?tx, "skipping invalid transaction and its descendants");
|
||||
trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants");
|
||||
best_txs.mark_invalid(&pool_tx);
|
||||
}
|
||||
|
||||
|
||||
@ -115,7 +115,7 @@ mod builder {
|
||||
debug!(target: "payload_builder", parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building empty payload");
|
||||
|
||||
let state = client.state_by_block_hash(parent_block.hash()).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to get state for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to get state for empty payload");
|
||||
err
|
||||
})?;
|
||||
let mut db = State::builder()
|
||||
@ -137,13 +137,13 @@ mod builder {
|
||||
&initialized_block_env,
|
||||
&attributes,
|
||||
).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to apply beacon root contract call for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to apply beacon root contract call for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
let WithdrawalsOutcome { withdrawals_root, withdrawals } =
|
||||
commit_withdrawals(&mut db, &chain_spec, attributes.payload_attributes.timestamp, attributes.payload_attributes.withdrawals.clone()).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to commit withdrawals for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to commit withdrawals for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
@ -155,7 +155,7 @@ mod builder {
|
||||
let bundle_state =
|
||||
BundleStateWithReceipts::new(db.take_bundle(), Receipts::new(), block_number);
|
||||
let state_root = state.state_root(&bundle_state).map_err(|err| {
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), ?err, "failed to calculate state root for empty payload");
|
||||
warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to calculate state root for empty payload");
|
||||
err
|
||||
})?;
|
||||
|
||||
@ -345,7 +345,7 @@ mod builder {
|
||||
Err(err) => {
|
||||
match err {
|
||||
EVMError::Transaction(err) => {
|
||||
trace!(target: "payload_builder", ?err, ?sequencer_tx, "Error in sequencer transaction, skipping.");
|
||||
trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping.");
|
||||
continue
|
||||
}
|
||||
err => {
|
||||
@ -431,11 +431,11 @@ mod builder {
|
||||
EVMError::Transaction(err) => {
|
||||
if matches!(err, InvalidTransaction::NonceTooLow { .. }) {
|
||||
// if the nonce is too low, we can skip this transaction
|
||||
trace!(target: "payload_builder", ?err, ?tx, "skipping nonce too low transaction");
|
||||
trace!(target: "payload_builder", %err, ?tx, "skipping nonce too low transaction");
|
||||
} else {
|
||||
// if the transaction is invalid, we can skip it and all of its
|
||||
// descendants
|
||||
trace!(target: "payload_builder", ?err, ?tx, "skipping invalid transaction and its descendants");
|
||||
trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants");
|
||||
best_txs.mark_invalid(&pool_tx);
|
||||
}
|
||||
|
||||
|
||||
@ -74,7 +74,7 @@ where
|
||||
self.chain_spec.as_ref(),
|
||||
block.timestamp,
|
||||
) {
|
||||
debug!(target: "evm", ?error, ?receipts, "receipts verification failed");
|
||||
debug!(target: "evm", %error, ?receipts, "receipts verification failed");
|
||||
return Err(error)
|
||||
};
|
||||
self.stats.receipt_root_duration += time.elapsed();
|
||||
|
||||
@ -434,7 +434,7 @@ where
|
||||
if let Err(error) =
|
||||
verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts.iter())
|
||||
{
|
||||
debug!(target: "evm", ?error, ?receipts, "receipts verification failed");
|
||||
debug!(target: "evm", %error, ?receipts, "receipts verification failed");
|
||||
return Err(error)
|
||||
};
|
||||
self.stats.receipt_root_duration += time.elapsed();
|
||||
|
||||
@ -403,7 +403,7 @@ fn on_stage_error<DB: Database>(
|
||||
err: StageError,
|
||||
) -> Result<Option<ControlFlow>, PipelineError> {
|
||||
if let StageError::DetachedHead { local_head, header, error } = err {
|
||||
warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, ?error, "Stage encountered detached head");
|
||||
warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, %error, "Stage encountered detached head");
|
||||
|
||||
// We unwind because of a detached head.
|
||||
let unwind_to =
|
||||
|
||||
@ -164,7 +164,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => {
|
||||
error!(target: "sync::stages::headers", ?error, "Cannot attach header to head");
|
||||
error!(target: "sync::stages::headers", %error, "Cannot attach header to head");
|
||||
Err(StageError::DetachedHead { local_head, header, error })
|
||||
}
|
||||
None => Err(StageError::ChannelClosed),
|
||||
|
||||
@ -167,10 +167,10 @@ impl DatabaseMetrics for DatabaseEnv {
|
||||
|
||||
Ok::<(), eyre::Report>(())
|
||||
})
|
||||
.map_err(|error| error!(?error, "Failed to read db table stats"));
|
||||
.map_err(|error| error!(%error, "Failed to read db table stats"));
|
||||
|
||||
if let Ok(freelist) =
|
||||
self.freelist().map_err(|error| error!(?error, "Failed to read db.freelist"))
|
||||
self.freelist().map_err(|error| error!(%error, "Failed to read db.freelist"))
|
||||
{
|
||||
metrics.push(("db.freelist", freelist as f64, vec![]));
|
||||
}
|
||||
|
||||
@ -271,7 +271,7 @@ mod read_transactions {
|
||||
if was_in_active && err != Error::BadSignature {
|
||||
// If the transaction was in the list of active transactions and the
|
||||
// error code is not `EBADSIGN`, then user didn't abort it.
|
||||
error!(target: "libmdbx", ?err, ?open_duration, "Failed to abort the long-lived read transactions");
|
||||
error!(target: "libmdbx", %err, ?open_duration, "Failed to abort the long-lived read transactions");
|
||||
}
|
||||
} else {
|
||||
// Happy path, the transaction has been aborted by us with no errors.
|
||||
|
||||
@ -86,7 +86,7 @@ impl BlobStore for DiskFileBlobStore {
|
||||
});
|
||||
let _ = fs::remove_file(&path).map_err(|e| {
|
||||
let err = DiskFileBlobStoreError::DeleteFile(tx, path, e);
|
||||
debug!(target:"txpool::blob", ?err);
|
||||
debug!(target:"txpool::blob", %err);
|
||||
});
|
||||
}
|
||||
self.inner.size_tracker.sub_size(subsize as usize);
|
||||
@ -197,7 +197,7 @@ impl DiskFileBlobStoreInner {
|
||||
let _lock = self.file_lock.write();
|
||||
for (path, data) in raw {
|
||||
if let Err(err) = fs::write(&path, &data) {
|
||||
debug!(target:"txpool::blob", ?err, ?path, "Failed to write blob file");
|
||||
debug!(target:"txpool::blob", %err, ?path, "Failed to write blob file");
|
||||
} else {
|
||||
add += data.len();
|
||||
num += 1;
|
||||
@ -284,7 +284,7 @@ impl DiskFileBlobStoreInner {
|
||||
res.push((tx, data));
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(target:"txpool::blob", ?err, ?tx, "Failed to read blob file");
|
||||
debug!(target:"txpool::blob", %err, ?tx, "Failed to read blob file");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ pub async fn maintain_transaction_pool<Client, P, St, Tasks>(
|
||||
Some(Ok(Err(res))) => {
|
||||
// Failed to load accounts from state
|
||||
let (accs, err) = *res;
|
||||
debug!(target: "txpool", ?err, "failed to load accounts");
|
||||
debug!(target: "txpool", %err, "failed to load accounts");
|
||||
dirty_addresses.extend(accs);
|
||||
}
|
||||
Some(Err(_)) => {
|
||||
@ -292,7 +292,7 @@ pub async fn maintain_transaction_pool<Client, P, St, Tasks>(
|
||||
let (addresses, err) = *err;
|
||||
debug!(
|
||||
target: "txpool",
|
||||
?err,
|
||||
%err,
|
||||
"failed to load missing changed accounts at new tip: {:?}",
|
||||
new_tip.hash()
|
||||
);
|
||||
|
||||
@ -761,7 +761,7 @@ where
|
||||
/// Inserts a blob transaction into the blob store
|
||||
fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) {
|
||||
if let Err(err) = self.blob_store.insert(hash, blob) {
|
||||
warn!(target: "txpool", ?err, "[{:?}] failed to insert blob", hash);
|
||||
warn!(target: "txpool", %err, "[{:?}] failed to insert blob", hash);
|
||||
self.blob_store_metrics.blobstore_failed_inserts.increment(1);
|
||||
}
|
||||
self.update_blob_store_metrics();
|
||||
@ -770,7 +770,7 @@ where
|
||||
/// Delete a blob from the blob store
|
||||
pub(crate) fn delete_blob(&self, blob: TxHash) {
|
||||
if let Err(err) = self.blob_store.delete(blob) {
|
||||
warn!(target: "txpool", ?err, "[{:?}] failed to delete blobs", blob);
|
||||
warn!(target: "txpool", %err, "[{:?}] failed to delete blobs", blob);
|
||||
self.blob_store_metrics.blobstore_failed_deletes.increment(1);
|
||||
}
|
||||
self.update_blob_store_metrics();
|
||||
@ -780,7 +780,7 @@ where
|
||||
pub(crate) fn delete_blobs(&self, txs: Vec<TxHash>) {
|
||||
let num = txs.len();
|
||||
if let Err(err) = self.blob_store.delete_all(txs) {
|
||||
warn!(target: "txpool", ?err,?num, "failed to delete blobs");
|
||||
warn!(target: "txpool", %err,?num, "failed to delete blobs");
|
||||
self.blob_store_metrics.blobstore_failed_deletes.increment(num as u64);
|
||||
}
|
||||
self.update_blob_store_metrics();
|
||||
|
||||
Reference in New Issue
Block a user