diff --git a/Cargo.lock b/Cargo.lock
index 8c130152d96e..2fdc862bc031 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1896,6 +1896,31 @@ dependencies = [
"instant",
]
+[[package]]
+name = "fatality"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2ad875162843b0d046276327afe0136e9ed3a23d5a754210fb6f1f33610d39ab"
+dependencies = [
+ "fatality-proc-macro",
+ "thiserror",
+]
+
+[[package]]
+name = "fatality-proc-macro"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd"
+dependencies = [
+ "expander",
+ "indexmap",
+ "proc-macro-crate 1.1.3",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "thiserror",
+]
+
[[package]]
name = "fdlimit"
version = "0.2.1"
@@ -6226,6 +6251,7 @@ version = "0.9.17"
dependencies = [
"assert_matches",
"derive_more",
+ "fatality",
"futures 0.3.21",
"futures-timer",
"lru 0.7.2",
@@ -6254,6 +6280,7 @@ version = "0.9.17"
dependencies = [
"assert_matches",
"env_logger 0.9.0",
+ "fatality",
"futures 0.3.21",
"futures-timer",
"log",
@@ -6337,8 +6364,8 @@ version = "0.9.17"
dependencies = [
"always-assert",
"assert_matches",
- "derive_more",
"env_logger 0.9.0",
+ "fatality",
"futures 0.3.21",
"futures-timer",
"log",
@@ -6378,6 +6405,7 @@ dependencies = [
"assert_matches",
"async-trait",
"derive_more",
+ "fatality",
"futures 0.3.21",
"futures-timer",
"lazy_static",
@@ -6656,6 +6684,7 @@ name = "polkadot-node-core-dispute-coordinator"
version = "0.9.17"
dependencies = [
"assert_matches",
+ "fatality",
"futures 0.3.21",
"kvdb",
"kvdb-memorydb",
@@ -6838,7 +6867,7 @@ name = "polkadot-node-network-protocol"
version = "0.9.17"
dependencies = [
"async-trait",
- "derive_more",
+ "fatality",
"futures 0.3.21",
"parity-scale-codec",
"polkadot-node-jaeger",
@@ -6925,6 +6954,7 @@ dependencies = [
"async-trait",
"derive_more",
"env_logger 0.9.0",
+ "fatality",
"futures 0.3.21",
"itertools",
"lazy_static",
@@ -7429,7 +7459,7 @@ version = "0.9.17"
dependencies = [
"arrayvec 0.5.2",
"assert_matches",
- "derive_more",
+ "fatality",
"futures 0.3.21",
"futures-timer",
"indexmap",
@@ -11558,7 +11588,7 @@ version = "1.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
dependencies = [
- "cfg-if 0.1.10",
+ "cfg-if 1.0.0",
"digest 0.10.2",
"rand 0.8.5",
"static_assertions",
diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml
index 5dcc3d35b03e..e3a4ddfa7744 100644
--- a/node/core/dispute-coordinator/Cargo.toml
+++ b/node/core/dispute-coordinator/Cargo.toml
@@ -11,6 +11,7 @@ parity-scale-codec = "3.0.0"
kvdb = "0.11.0"
thiserror = "1.0.30"
lru = "0.7.2"
+fatality = "0.0.6"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
@@ -19,6 +20,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
[dev-dependencies]
kvdb-memorydb = "0.11.0"
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
diff --git a/node/core/dispute-coordinator/src/dummy.rs b/node/core/dispute-coordinator/src/dummy.rs
index 4f56a0fb9bba..ee58e617b0b5 100644
--- a/node/core/dispute-coordinator/src/dummy.rs
+++ b/node/core/dispute-coordinator/src/dummy.rs
@@ -24,7 +24,8 @@ use polkadot_primitives::v1::BlockNumber;
use futures::prelude::*;
-use crate::error::{Error, Result};
+use crate::error::Result;
+use fatality::Nested;
const LOG_TARGET: &str = "parachain::dispute-coordinator";
@@ -62,13 +63,16 @@ where
{
loop {
let res = run_until_error(&mut ctx, &subsystem).await;
- match res {
- Err(e) =>
- if let Error::Fatal(_) = e {
- break
- },
- Ok(()) => {
- tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+ match res.into_nested() {
+ Err(fatal) => {
+ tracing::error!(target: LOG_TARGET, "Observed fatal issue: {:?}", fatal);
+ break
+ },
+ Ok(Err(jfyi)) => {
+ tracing::debug!(target: LOG_TARGET, "Observed issue: {:?}", jfyi);
+ },
+ Ok(Ok(())) => {
+ tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
break
},
}
diff --git a/node/core/dispute-coordinator/src/error.rs b/node/core/dispute-coordinator/src/error.rs
index ad7115db45b0..f2445640a239 100644
--- a/node/core/dispute-coordinator/src/error.rs
+++ b/node/core/dispute-coordinator/src/error.rs
@@ -14,90 +14,64 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
+use fatality::Nested;
use futures::channel::oneshot;
-use thiserror::Error;
-use polkadot_node_subsystem::{
- errors::{ChainApiError, RuntimeApiError},
- SubsystemError,
-};
+use polkadot_node_subsystem::{errors::ChainApiError, SubsystemError};
use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime};
-use crate::LOG_TARGET;
+use crate::{real::participation, LOG_TARGET};
use parity_scale_codec::Error as CodecError;
-/// Errors for this subsystem.
-#[derive(Debug, Error)]
-#[error(transparent)]
-pub enum Error {
- /// All fatal errors.
- Fatal(#[from] Fatal),
- /// All nonfatal/potentially recoverable errors.
- NonFatal(#[from] NonFatal),
-}
-
-/// General `Result` type for dispute coordinator.
-pub type Result = std::result::Result;
-/// Result type with only fatal errors.
-pub type FatalResult = std::result::Result;
-/// Result type with only non fatal errors.
-pub type NonFatalResult = std::result::Result;
-
-impl From for Error {
- fn from(o: runtime::Error) -> Self {
- match o {
- runtime::Error::Fatal(f) => Self::Fatal(Fatal::Runtime(f)),
- runtime::Error::NonFatal(f) => Self::NonFatal(NonFatal::Runtime(f)),
- }
- }
-}
-
-impl From for Error {
- fn from(o: SubsystemError) -> Self {
- match o {
- SubsystemError::Context(msg) => Self::Fatal(Fatal::SubsystemContext(msg)),
- _ => Self::NonFatal(NonFatal::Subsystem(o)),
- }
- }
-}
-
-/// Fatal errors of this subsystem.
-#[derive(Debug, Error)]
-pub enum Fatal {
- /// Errors coming from runtime::Runtime.
- #[error("Error while accessing runtime information {0}")]
- Runtime(#[from] runtime::Fatal),
+pub type Result = std::result::Result;
+pub type FatalResult = std::result::Result;
+pub type JfyiResult = std::result::Result;
+#[allow(missing_docs)]
+#[fatality::fatality(splitable)]
+pub enum Error {
/// We received a legacy `SubystemError::Context` error which is considered fatal.
+ #[fatal]
#[error("SubsystemError::Context error: {0}")]
SubsystemContext(String),
/// `ctx.spawn` failed with an error.
+ #[fatal]
#[error("Spawning a task failed: {0}")]
- SpawnFailed(SubsystemError),
+ SpawnFailed(#[source] SubsystemError),
+ #[fatal]
#[error("Participation worker receiver exhausted.")]
ParticipationWorkerReceiverExhausted,
/// Receiving subsystem message from overseer failed.
+ #[fatal]
#[error("Receiving message from overseer failed: {0}")]
SubsystemReceive(#[source] SubsystemError),
+ #[fatal]
#[error("Writing to database failed: {0}")]
DbWriteFailed(std::io::Error),
- #[error("Oneshot for receiving response from chain API got cancelled")]
- ChainApiSenderDropped,
+ #[fatal]
+ #[error("Oneshot for receiving block number from chain API got cancelled")]
+ CanceledBlockNumber,
- #[error("Retrieving response from chain API unexpectedly failed with error: {0}")]
- ChainApi(#[from] ChainApiError),
-}
+ #[fatal]
+ #[error("Retrieving block number from chain API failed with error: {0}")]
+ ChainApiBlockNumber(ChainApiError),
-#[derive(Debug, thiserror::Error)]
-#[allow(missing_docs)]
-pub enum NonFatal {
+ #[fatal]
#[error(transparent)]
- RuntimeApi(#[from] RuntimeApiError),
+ ChainApiAncestors(ChainApiError),
+
+ #[fatal]
+ #[error("Chain API dropped response channel sender")]
+ ChainApiSenderDropped,
+
+ #[fatal(forward)]
+ #[error("Error while accessing runtime information {0}")]
+ Runtime(#[from] runtime::Error),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
@@ -112,7 +86,7 @@ pub enum NonFatal {
DisputeImportOneshotSend,
#[error(transparent)]
- Subsystem(SubsystemError),
+ Subsystem(#[from] SubsystemError),
#[error(transparent)]
Codec(#[from] CodecError),
@@ -121,36 +95,32 @@ pub enum NonFatal {
#[error("Sessions unavailable in `RollingSessionWindow`: {0}")]
RollingSessionWindow(#[from] SessionsUnavailable),
- /// Errors coming from runtime::Runtime.
- #[error("Error while accessing runtime information: {0}")]
- Runtime(#[from] runtime::NonFatal),
-
#[error(transparent)]
- QueueError(#[from] crate::real::participation::QueueError),
+ QueueError(#[from] participation::QueueError),
}
/// Utility for eating top level errors and log them.
///
/// We basically always want to try and continue on error. This utility function is meant to
/// consume top-level errors by simply logging them
-pub fn log_error(result: Result<()>) -> std::result::Result<(), Fatal> {
- match result {
- Err(Error::Fatal(f)) => Err(f),
- Err(Error::NonFatal(error)) => {
- error.log();
+pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
+ match result.into_nested()? {
+ Ok(()) => Ok(()),
+ Err(jfyi) => {
+ jfyi.log();
Ok(())
},
- Ok(()) => Ok(()),
}
}
-impl NonFatal {
- /// Log a `NonFatal`.
+impl JfyiError {
+ /// Log a `JfyiError`.
pub fn log(self) {
match self {
// don't spam the log with spurious errors
- Self::RuntimeApi(_) | Self::Oneshot(_) =>
- tracing::debug!(target: LOG_TARGET, error = ?self),
+ Self::Runtime(_) | Self::Oneshot(_) => {
+ tracing::debug!(target: LOG_TARGET, error = ?self)
+ },
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, error = ?self),
}
diff --git a/node/core/dispute-coordinator/src/real/db/v1.rs b/node/core/dispute-coordinator/src/real/db/v1.rs
index 05d58e88f286..bb8ce761281b 100644
--- a/node/core/dispute-coordinator/src/real/db/v1.rs
+++ b/node/core/dispute-coordinator/src/real/db/v1.rs
@@ -28,15 +28,14 @@ use kvdb::{DBTransaction, KeyValueDB};
use parity_scale_codec::{Decode, Encode};
use crate::{
- error::{Fatal, FatalResult},
+ error::{FatalError, FatalResult},
+ real::{
+ backend::{Backend, BackendWriteOp, OverlayedBackend},
+ DISPUTE_WINDOW,
+ },
status::DisputeStatus,
};
-use crate::real::{
- backend::{Backend, BackendWriteOp, OverlayedBackend},
- DISPUTE_WINDOW,
-};
-
const RECENT_DISPUTES_KEY: &[u8; 15] = b"recent-disputes";
const EARLIEST_SESSION_KEY: &[u8; 16] = b"earliest-session";
const CANDIDATE_VOTES_SUBKEY: &[u8; 15] = b"candidate-votes";
@@ -100,7 +99,7 @@ impl Backend for DbBackend {
}
}
- self.inner.write(tx).map_err(Fatal::DbWriteFailed)
+ self.inner.write(tx).map_err(FatalError::DbWriteFailed)
}
}
@@ -168,8 +167,8 @@ pub enum Error {
impl From for crate::error::Error {
fn from(err: Error) -> Self {
match err {
- Error::Io(io) => Self::NonFatal(crate::error::NonFatal::Io(io)),
- Error::Codec(e) => Self::NonFatal(crate::error::NonFatal::Codec(e)),
+ Error::Io(io) => Self::Io(io),
+ Error::Codec(e) => Self::Codec(e),
}
}
}
diff --git a/node/core/dispute-coordinator/src/real/initialized.rs b/node/core/dispute-coordinator/src/real/initialized.rs
index 8123aaada4ee..42ee0ae46217 100644
--- a/node/core/dispute-coordinator/src/real/initialized.rs
+++ b/node/core/dispute-coordinator/src/real/initialized.rs
@@ -53,7 +53,7 @@ use polkadot_primitives::{
};
use crate::{
- error::{log_error, Error, Fatal, FatalResult, NonFatal, NonFatalResult, Result},
+ error::{log_error, Error, FatalError, FatalResult, JfyiError, JfyiResult, Result},
metrics::Metrics,
real::{ordering::get_finalized_block_number, DisputeCoordinatorSubsystem},
status::{get_active_with_status, Clock, DisputeStatus, Timestamp},
@@ -610,7 +610,7 @@ impl Initialized {
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
message: DisputeCoordinatorMessage,
now: Timestamp,
- ) -> Result NonFatalResult<()>>> {
+ ) -> Result JfyiResult<()>>> {
match message {
DisputeCoordinatorMessage::ImportStatements {
candidate_hash,
@@ -633,7 +633,7 @@ impl Initialized {
let report = move || {
pending_confirmation
.send(outcome)
- .map_err(|_| NonFatal::DisputeImportOneshotSend)
+ .map_err(|_| JfyiError::DisputeImportOneshotSend)
};
match outcome {
ImportStatementsResult::InvalidImport => {
@@ -733,7 +733,7 @@ impl Initialized {
// Helper function for checking subsystem errors in message processing.
fn ensure_available_session_info(&self) -> Result<()> {
if let Some(subsystem_error) = self.error.clone() {
- return Err(Error::NonFatal(NonFatal::RollingSessionWindow(subsystem_error)))
+ return Err(Error::RollingSessionWindow(subsystem_error))
}
Ok(())
@@ -1174,8 +1174,8 @@ impl MuxedMessage {
let from_overseer = ctx.recv().fuse();
futures::pin_mut!(from_overseer, from_sender);
futures::select!(
- msg = from_overseer => Ok(Self::Subsystem(msg.map_err(Fatal::SubsystemReceive)?)),
- msg = from_sender.next() => Ok(Self::Participation(msg.ok_or(Fatal::ParticipationWorkerReceiverExhausted)?)),
+ msg = from_overseer => Ok(Self::Subsystem(msg.map_err(FatalError::SubsystemReceive)?)),
+ msg = from_sender.next() => Ok(Self::Participation(msg.ok_or(FatalError::ParticipationWorkerReceiverExhausted)?)),
)
}
}
diff --git a/node/core/dispute-coordinator/src/real/mod.rs b/node/core/dispute-coordinator/src/real/mod.rs
index 6d6d7be85abc..064ead5b6b26 100644
--- a/node/core/dispute-coordinator/src/real/mod.rs
+++ b/node/core/dispute-coordinator/src/real/mod.rs
@@ -40,13 +40,13 @@ use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow;
use polkadot_primitives::v1::{ValidatorIndex, ValidatorPair};
use crate::{
- error::{Error, FatalResult, NonFatal, Result},
+ error::{FatalResult, JfyiError, Result},
metrics::Metrics,
status::{get_active_with_status, SystemClock},
};
-
use backend::{Backend, OverlayedBackend};
use db::v1::DbBackend;
+use fatality::Split;
use self::{
ordering::CandidateComparator,
@@ -196,9 +196,8 @@ impl DisputeCoordinatorSubsystem {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
return Ok(None)
},
- Err(Error::Fatal(f)) => return Err(f),
- Err(Error::NonFatal(e)) => {
- e.log();
+ Err(e) => {
+ e.split()?.log();
continue
},
};
@@ -219,9 +218,8 @@ impl DisputeCoordinatorSubsystem {
.await
{
Ok(v) => v,
- Err(Error::Fatal(f)) => return Err(f),
- Err(Error::NonFatal(e)) => {
- e.log();
+ Err(e) => {
+ e.split()?.log();
continue
},
};
@@ -371,7 +369,7 @@ where
leaf.clone(),
RollingSessionWindow::new(ctx, DISPUTE_WINDOW, leaf.hash)
.await
- .map_err(NonFatal::RollingSessionWindow)?,
+ .map_err(JfyiError::RollingSessionWindow)?,
)))
} else {
Ok(None)
@@ -401,11 +399,13 @@ where
// available). So instead of telling subsystems, everything is fine, because of an
// hour old database state, we should rather cancel contained oneshots and delay
// finality until we are fully functional.
+ {
tracing::warn!(
target: LOG_TARGET,
?msg,
"Received msg before first active leaves update. This is not expected - message will be dropped."
- ),
+ )
+ },
}
}
}
diff --git a/node/core/dispute-coordinator/src/real/ordering/mod.rs b/node/core/dispute-coordinator/src/real/ordering/mod.rs
index 52650a9cd252..3b7532135a11 100644
--- a/node/core/dispute-coordinator/src/real/ordering/mod.rs
+++ b/node/core/dispute-coordinator/src/real/ordering/mod.rs
@@ -29,7 +29,7 @@ use polkadot_node_subsystem_util::runtime::get_candidate_events;
use polkadot_primitives::v1::{BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash};
use crate::{
- error::{Fatal, FatalResult, Result},
+ error::{FatalError, FatalResult, Result},
LOG_TARGET,
};
@@ -182,7 +182,7 @@ impl OrderingProvider {
&mut self,
sender: &mut Sender,
update: &ActiveLeavesUpdate,
- ) -> Result<()> {
+ ) -> crate::error::Result<()> {
if let Some(activated) = update.activated.as_ref() {
// Fetch last finalized block.
let ancestors = match get_finalized_block_number(sender).await {
@@ -299,7 +299,9 @@ impl OrderingProvider {
)
.await;
- rx.await.or(Err(Fatal::ChainApiSenderDropped))?.map_err(Fatal::ChainApi)?
+ rx.await
+ .or(Err(FatalError::ChainApiSenderDropped))?
+ .map_err(FatalError::ChainApiAncestors)?
};
let earliest_block_number = match head_number.checked_sub(hashes.len() as u32) {
@@ -356,8 +358,8 @@ where
receiver
.await
- .map_err(|_| Fatal::ChainApiSenderDropped)?
- .map_err(Fatal::ChainApi)
+ .map_err(|_| FatalError::ChainApiSenderDropped)?
+ .map_err(FatalError::ChainApiAncestors)
}
async fn get_block_number(
diff --git a/node/core/dispute-coordinator/src/real/participation/mod.rs b/node/core/dispute-coordinator/src/real/participation/mod.rs
index f2f3862ab5ed..1041a380a29f 100644
--- a/node/core/dispute-coordinator/src/real/participation/mod.rs
+++ b/node/core/dispute-coordinator/src/real/participation/mod.rs
@@ -29,12 +29,10 @@ use polkadot_node_subsystem::{
use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash;
use polkadot_primitives::v1::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
-use crate::{
- error::{Fatal, FatalResult, NonFatal, Result},
- LOG_TARGET,
-};
+use crate::real::LOG_TARGET;
use super::ordering::CandidateComparator;
+use crate::error::{FatalError, FatalResult, JfyiError, Result};
#[cfg(test)]
mod tests;
@@ -43,7 +41,7 @@ pub use tests::{participation_full_happy_path, participation_missing_availabilit
mod queues;
use queues::Queues;
-pub use queues::{Error as QueueError, ParticipationRequest};
+pub use queues::{ParticipationRequest, QueueError};
/// How many participation processes do we want to run in parallel the most.
///
@@ -161,7 +159,7 @@ impl Participation {
}
}
// Out of capacity/no recent block yet - queue:
- Ok(self.queue.queue(comparator, req).map_err(NonFatal::QueueError)?)
+ Ok(self.queue.queue(comparator, req).map_err(JfyiError::QueueError)?)
}
/// Message from a worker task was received - get the outcome.
@@ -239,7 +237,7 @@ impl Participation {
"participation-worker",
participate(self.worker_sender.clone(), sender, recent_head, req).boxed(),
)
- .map_err(Fatal::SpawnFailed)?;
+ .map_err(FatalError::SpawnFailed)?;
}
Ok(())
}
diff --git a/node/core/dispute-coordinator/src/real/participation/queues/mod.rs b/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
index e1cac851f4bc..dbdf00b77dae 100644
--- a/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
+++ b/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
@@ -16,8 +16,6 @@
use std::collections::{BTreeMap, HashMap};
-use thiserror::Error;
-
use polkadot_primitives::v1::{CandidateHash, CandidateReceipt, SessionIndex};
use crate::real::ordering::CandidateComparator;
@@ -83,8 +81,8 @@ struct BestEffortEntry {
}
/// What can go wrong when queuing a request.
-#[derive(Debug, Error)]
-pub enum Error {
+#[derive(Debug, thiserror::Error)]
+pub enum QueueError {
#[error("Request could not be queued, because best effort queue was already full.")]
BestEffortFull,
#[error("Request could not be queued, because priority queue was already full.")]
@@ -137,21 +135,21 @@ impl Queues {
&mut self,
comparator: Option,
req: ParticipationRequest,
- ) -> Result<(), Error> {
+ ) -> Result<(), QueueError> {
debug_assert!(comparator
.map(|c| c.matches_candidate(req.candidate_hash()))
.unwrap_or(true));
if let Some(comparator) = comparator {
if self.priority.len() >= PRIORITY_QUEUE_SIZE {
- return Err(Error::PriorityFull)
+ return Err(QueueError::PriorityFull)
}
// Remove any best effort entry:
self.best_effort.remove(&req.candidate_hash);
self.priority.insert(comparator, req);
} else {
if self.best_effort.len() >= BEST_EFFORT_QUEUE_SIZE {
- return Err(Error::BestEffortFull)
+ return Err(QueueError::BestEffortFull)
}
// Note: The request might have been added to priority in a previous call already, we
// take care of that case in `dequeue` (more efficient).
diff --git a/node/core/dispute-coordinator/src/real/participation/queues/tests.rs b/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
index 45c4dc5ded2e..3d5759a66d6b 100644
--- a/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
+++ b/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
@@ -20,7 +20,7 @@ use polkadot_primitives::v1::{BlockNumber, Hash};
use crate::real::ordering::CandidateComparator;
-use super::{Error, ParticipationRequest, Queues};
+use super::{ParticipationRequest, QueueError, Queues};
/// Make a `ParticipationRequest` based on the given commitments hash.
fn make_participation_request(hash: Hash) -> ParticipationRequest {
@@ -64,9 +64,9 @@ fn ordering_works_as_expected() {
queue.queue(None, req5.clone()).unwrap();
assert_matches!(
queue.queue(Some(make_dummy_comparator(&req_prio_full, 3)), req_prio_full),
- Err(Error::PriorityFull)
+ Err(QueueError::PriorityFull)
);
- assert_matches!(queue.queue(None, req_full), Err(Error::BestEffortFull));
+ assert_matches!(queue.queue(None, req_full), Err(QueueError::BestEffortFull));
assert_eq!(queue.dequeue(), Some(req_prio));
assert_eq!(queue.dequeue(), Some(req_prio_2));
diff --git a/node/core/dispute-coordinator/src/real/spam_slots.rs b/node/core/dispute-coordinator/src/real/spam_slots.rs
index b58b812b042a..7818b112411d 100644
--- a/node/core/dispute-coordinator/src/real/spam_slots.rs
+++ b/node/core/dispute-coordinator/src/real/spam_slots.rs
@@ -64,14 +64,14 @@ impl SpamSlots {
let mut slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount> = HashMap::new();
for ((session, _), validators) in unconfirmed_disputes.iter() {
for validator in validators {
- let e = slots.entry((*session, *validator)).or_default();
- *e += 1;
- if *e > MAX_SPAM_VOTES {
+ let spam_vote_count = slots.entry((*session, *validator)).or_default();
+ *spam_vote_count += 1;
+ if *spam_vote_count > MAX_SPAM_VOTES {
tracing::debug!(
target: LOG_TARGET,
?session,
?validator,
- count = ?e,
+ count = ?spam_vote_count,
"Import exceeded spam slot for validator"
);
}
@@ -93,8 +93,8 @@ impl SpamSlots {
candidate: CandidateHash,
validator: ValidatorIndex,
) -> bool {
- let c = self.slots.entry((session, validator)).or_default();
- if *c >= MAX_SPAM_VOTES {
+ let spam_vote_count = self.slots.entry((session, validator)).or_default();
+ if *spam_vote_count >= MAX_SPAM_VOTES {
return false
}
let validators = self.unconfirmed.entry((session, candidate)).or_default();
@@ -103,7 +103,7 @@ impl SpamSlots {
// We only increment spam slots once per candidate, as each validator has to provide an
// opposing vote for sending out its own vote. Therefore, receiving multiple votes for
// a single candidate is expected and should not get punished here.
- *c += 1;
+ *spam_vote_count += 1;
}
true
@@ -118,8 +118,8 @@ impl SpamSlots {
if let Some(validators) = self.unconfirmed.remove(key) {
let (session, _) = key;
for validator in validators {
- if let Some(c) = self.slots.remove(&(*session, validator)) {
- let new = c - 1;
+ if let Some(spam_vote_count) = self.slots.remove(&(*session, validator)) {
+ let new = spam_vote_count - 1;
if new > 0 {
self.slots.insert((*session, validator), new);
}
diff --git a/node/core/parachains-inherent/src/lib.rs b/node/core/parachains-inherent/src/lib.rs
index b15afb831065..fdccb8321dd3 100644
--- a/node/core/parachains-inherent/src/lib.rs
+++ b/node/core/parachains-inherent/src/lib.rs
@@ -117,8 +117,8 @@ impl sp_inherents::InherentDataProvider for ParachainsInherentDataProvider {
async fn try_handle_error(
&self,
- _: &sp_inherents::InherentIdentifier,
- _: &[u8],
+ _identifier: &sp_inherents::InherentIdentifier,
+ _error: &[u8],
) -> Option> {
// Inherent isn't checked and can not return any error
None
diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml
index 57cde892cb90..e87d12cb1221 100644
--- a/node/network/availability-distribution/Cargo.toml
+++ b/node/network/availability-distribution/Cargo.toml
@@ -20,6 +20,7 @@ thiserror = "1.0.30"
rand = "0.8.5"
derive_more = "0.99.17"
lru = "0.7.2"
+fatality = "0.0.6"
[dev-dependencies]
polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" }
diff --git a/node/network/availability-distribution/src/error.rs b/node/network/availability-distribution/src/error.rs
index 4ecec30ae574..920eaef2395a 100644
--- a/node/network/availability-distribution/src/error.rs
+++ b/node/network/availability-distribution/src/error.rs
@@ -17,9 +17,9 @@
//! Error handling related code and Error/Result definitions.
+use fatality::Nested;
use polkadot_node_network_protocol::request_response::outgoing::RequestError;
use polkadot_primitives::v1::SessionIndex;
-use thiserror::Error;
use futures::channel::oneshot;
@@ -28,116 +28,86 @@ use polkadot_subsystem::{ChainApiError, SubsystemError};
use crate::LOG_TARGET;
-#[derive(Debug, Error, derive_more::From)]
-#[error(transparent)]
+#[allow(missing_docs)]
+#[fatality::fatality(splitable)]
pub enum Error {
- /// All fatal errors.
- Fatal(Fatal),
- /// All nonfatal/potentially recoverable errors.
- NonFatal(NonFatal),
-}
-
-impl From for Error {
- fn from(o: runtime::Error) -> Self {
- match o {
- runtime::Error::Fatal(f) => Self::Fatal(Fatal::Runtime(f)),
- runtime::Error::NonFatal(f) => Self::NonFatal(NonFatal::Runtime(f)),
- }
- }
-}
-
-/// Fatal errors of this subsystem.
-#[derive(Debug, Error)]
-pub enum Fatal {
- /// Spawning a running task failed.
+ #[fatal]
#[error("Spawning subsystem task failed: {0}")]
SpawnTask(#[source] SubsystemError),
- /// Requester stream exhausted.
+ #[fatal]
#[error("Erasure chunk requester stream exhausted")]
RequesterExhausted,
+ #[fatal]
#[error("Receive channel closed: {0}")]
IncomingMessageChannel(#[source] SubsystemError),
- /// Errors coming from runtime::Runtime.
+ #[fatal(forward)]
#[error("Error while accessing runtime information: {0}")]
- Runtime(#[from] runtime::Fatal),
+ Runtime(#[from] runtime::Error),
+ #[fatal]
#[error("Oneshot for receiving response from Chain API got cancelled")]
ChainApiSenderDropped(#[source] oneshot::Canceled),
+ #[fatal]
#[error("Retrieving response from Chain API unexpectedly failed with error: {0}")]
ChainApi(#[from] ChainApiError),
-}
-/// Non-fatal errors of this subsystem.
-#[derive(Debug, Error)]
-pub enum NonFatal {
- /// av-store will drop the sender on any error that happens.
+ // av-store will drop the sender on any error that happens.
#[error("Response channel to obtain chunk failed")]
QueryChunkResponseChannel(#[source] oneshot::Canceled),
- /// av-store will drop the sender on any error that happens.
+ // av-store will drop the sender on any error that happens.
#[error("Response channel to obtain available data failed")]
QueryAvailableDataResponseChannel(#[source] oneshot::Canceled),
- /// We tried accessing a session that was not cached.
+ // We tried accessing a session that was not cached.
#[error("Session {missing_session} is not cached, cached sessions: {available_sessions:?}.")]
NoSuchCachedSession { available_sessions: Vec, missing_session: SessionIndex },
- /// Sending request response failed (Can happen on timeouts for example).
+ // Sending request response failed (Can happen on timeouts for example).
#[error("Sending a request's response failed.")]
SendResponse,
- /// Fetching PoV failed with `RequestError`.
#[error("FetchPoV request error: {0}")]
FetchPoV(#[source] RequestError),
- /// Fetching PoV failed as the received PoV did not match the expected hash.
#[error("Fetched PoV does not match expected hash")]
UnexpectedPoV,
#[error("Remote responded with `NoSuchPoV`")]
NoSuchPoV,
- /// No validator with the index could be found in current session.
- #[error("Given validator index could not be found")]
+ #[error("Given validator index could not be found in current session")]
InvalidValidatorIndex,
-
- /// Errors coming from runtime::Runtime.
- #[error("Error while accessing runtime information: {0}")]
- Runtime(#[from] runtime::NonFatal),
}
-/// General result type for fatal/nonfatal errors.
+/// General result abbreviation type alias.
pub type Result = std::result::Result;
-/// Results which are never fatal.
-pub type NonFatalResult = std::result::Result;
-
/// Utility for eating top level errors and log them.
///
/// We basically always want to try and continue on error. This utility function is meant to
/// consume top-level errors by simply logging them
-pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), Fatal> {
- match result {
- Err(Error::Fatal(f)) => Err(f),
- Err(Error::NonFatal(error)) => {
- match error {
- NonFatal::UnexpectedPoV |
- NonFatal::InvalidValidatorIndex |
- NonFatal::NoSuchCachedSession { .. } |
- NonFatal::QueryAvailableDataResponseChannel(_) |
- NonFatal::QueryChunkResponseChannel(_) =>
- tracing::warn!(target: LOG_TARGET, error = %error, ctx),
- NonFatal::FetchPoV(_) |
- NonFatal::SendResponse |
- NonFatal::NoSuchPoV |
- NonFatal::Runtime(_) => tracing::debug!(target: LOG_TARGET, error = ?error, ctx),
+pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), FatalError> {
+ match result.into_nested()? {
+ Ok(()) => Ok(()),
+ Err(jfyi) => {
+ match jfyi {
+ JfyiError::UnexpectedPoV |
+ JfyiError::InvalidValidatorIndex |
+ JfyiError::NoSuchCachedSession { .. } |
+ JfyiError::QueryAvailableDataResponseChannel(_) |
+ JfyiError::QueryChunkResponseChannel(_) =>
+ tracing::warn!(target: LOG_TARGET, error = %jfyi, ctx),
+ JfyiError::FetchPoV(_) |
+ JfyiError::SendResponse |
+ JfyiError::NoSuchPoV |
+ JfyiError::Runtime(_) => tracing::debug!(target: LOG_TARGET, error = ?jfyi, ctx),
}
Ok(())
},
- Ok(()) => Ok(()),
}
}
diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs
index 2b0c76799edc..c09fb208df41 100644
--- a/node/network/availability-distribution/src/lib.rs
+++ b/node/network/availability-distribution/src/lib.rs
@@ -26,7 +26,7 @@ use polkadot_subsystem::{
/// Error and [`Result`] type for this subsystem.
mod error;
-use error::{log_error, Fatal, Result};
+use error::{log_error, FatalError, Result};
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
@@ -95,7 +95,7 @@ impl AvailabilityDistributionSubsystem {
}
/// Start processing work as passed on from the Overseer.
- async fn run(self, mut ctx: Context) -> std::result::Result<(), Fatal>
+ async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError>
where
Context: SubsystemContext,
Context: overseer::SubsystemContext,
@@ -111,13 +111,13 @@ impl AvailabilityDistributionSubsystem {
"pov-receiver",
run_pov_receiver(sender.clone(), pov_req_receiver, metrics.clone()).boxed(),
)
- .map_err(Fatal::SpawnTask)?;
+ .map_err(FatalError::SpawnTask)?;
ctx.spawn(
"chunk-receiver",
run_chunk_receiver(sender, chunk_req_receiver, metrics.clone()).boxed(),
)
- .map_err(Fatal::SpawnTask)?;
+ .map_err(FatalError::SpawnTask)?;
}
loop {
@@ -132,9 +132,9 @@ impl AvailabilityDistributionSubsystem {
// Handle task messages sending:
let message = match action {
Either::Left(subsystem_msg) =>
- subsystem_msg.map_err(|e| Fatal::IncomingMessageChannel(e))?,
+ subsystem_msg.map_err(|e| FatalError::IncomingMessageChannel(e))?,
Either::Right(from_task) => {
- let from_task = from_task.ok_or(Fatal::RequesterExhausted)?;
+ let from_task = from_task.ok_or(FatalError::RequesterExhausted)?;
ctx.send_message(from_task).await;
continue
},
diff --git a/node/network/availability-distribution/src/pov_requester/mod.rs b/node/network/availability-distribution/src/pov_requester/mod.rs
index 1e860f56b9b0..1e5c8b25ec7a 100644
--- a/node/network/availability-distribution/src/pov_requester/mod.rs
+++ b/node/network/availability-distribution/src/pov_requester/mod.rs
@@ -33,7 +33,7 @@ use polkadot_subsystem::{
};
use crate::{
- error::{Fatal, NonFatal},
+ error::{Error, FatalError, JfyiError, Result},
metrics::{FAILED, NOT_FOUND, SUCCEEDED},
Metrics, LOG_TARGET,
};
@@ -48,7 +48,7 @@ pub async fn fetch_pov(
pov_hash: Hash,
tx: oneshot::Sender,
metrics: Metrics,
-) -> super::Result<()>
+) -> Result<()>
where
Context: SubsystemContext,
{
@@ -56,7 +56,7 @@ where
let authority_id = info
.discovery_keys
.get(from_validator.0 as usize)
- .ok_or(NonFatal::InvalidValidatorIndex)?
+ .ok_or(JfyiError::InvalidValidatorIndex)?
.clone();
let (req, pending_response) = OutgoingRequest::new(
Recipient::Authority(authority_id.clone()),
@@ -77,7 +77,7 @@ where
"pov-fetcher",
fetch_pov_job(pov_hash, authority_id, pending_response.boxed(), span, tx, metrics).boxed(),
)
- .map_err(|e| Fatal::SpawnTask(e))?;
+ .map_err(|e| FatalError::SpawnTask(e))?;
Ok(())
}
@@ -85,7 +85,7 @@ where
async fn fetch_pov_job(
pov_hash: Hash,
authority_id: AuthorityDiscoveryId,
- pending_response: BoxFuture<'static, Result>,
+ pending_response: BoxFuture<'static, std::result::Result>,
span: jaeger::Span,
tx: oneshot::Sender,
metrics: Metrics,
@@ -98,17 +98,17 @@ async fn fetch_pov_job(
/// Do the actual work of waiting for the response.
async fn do_fetch_pov(
pov_hash: Hash,
- pending_response: BoxFuture<'static, Result>,
+ pending_response: BoxFuture<'static, std::result::Result>,
_span: jaeger::Span,
tx: oneshot::Sender,
metrics: Metrics,
-) -> std::result::Result<(), NonFatal> {
- let response = pending_response.await.map_err(NonFatal::FetchPoV);
+) -> Result<()> {
+ let response = pending_response.await.map_err(Error::FetchPoV);
let pov = match response {
Ok(PoVFetchingResponse::PoV(pov)) => pov,
Ok(PoVFetchingResponse::NoSuchPoV) => {
metrics.on_fetched_pov(NOT_FOUND);
- return Err(NonFatal::NoSuchPoV)
+ return Err(Error::NoSuchPoV)
},
Err(err) => {
metrics.on_fetched_pov(FAILED);
@@ -117,10 +117,10 @@ async fn do_fetch_pov(
};
if pov.hash() == pov_hash {
metrics.on_fetched_pov(SUCCEEDED);
- tx.send(pov).map_err(|_| NonFatal::SendResponse)
+ tx.send(pov).map_err(|_| Error::SendResponse)
} else {
metrics.on_fetched_pov(FAILED);
- Err(NonFatal::UnexpectedPoV)
+ Err(Error::UnexpectedPoV)
}
}
diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs
index a05ee0cd2d4c..480c3a889562 100644
--- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs
+++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs
@@ -39,7 +39,7 @@ use polkadot_subsystem::{
};
use crate::{
- error::{Fatal, Result},
+ error::{FatalError, Result},
metrics::{Metrics, FAILED, SUCCEEDED},
requester::session_cache::{BadValidators, SessionInfo},
LOG_TARGET,
@@ -185,7 +185,7 @@ impl FetchTask {
let (handle, kill) = oneshot::channel();
ctx.spawn("chunk-fetcher", running.run(kill).boxed())
- .map_err(|e| Fatal::SpawnTask(e))?;
+ .map_err(|e| FatalError::SpawnTask(e))?;
Ok(FetchTask { live_in, state: FetchedState::Started(handle) })
} else {
diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs
index 6a9a86321b12..14310b3384fd 100644
--- a/node/network/availability-distribution/src/requester/mod.rs
+++ b/node/network/availability-distribution/src/requester/mod.rs
@@ -39,8 +39,7 @@ use polkadot_subsystem::{
ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, SubsystemContext,
};
-use super::{Metrics, Result, LOG_TARGET};
-use crate::error::Fatal;
+use super::{FatalError, Metrics, Result, LOG_TARGET};
#[cfg(test)]
mod tests;
@@ -324,6 +323,9 @@ where
})
.await;
- let ancestors = rx.await.map_err(Fatal::ChainApiSenderDropped)?.map_err(Fatal::ChainApi)?;
+ let ancestors = rx
+ .await
+ .map_err(FatalError::ChainApiSenderDropped)?
+ .map_err(FatalError::ChainApi)?;
Ok(ancestors)
}
diff --git a/node/network/availability-distribution/src/requester/session_cache.rs b/node/network/availability-distribution/src/requester/session_cache.rs
index 10fda8cd9c6a..7e3406e61769 100644
--- a/node/network/availability-distribution/src/requester/session_cache.rs
+++ b/node/network/availability-distribution/src/requester/session_cache.rs
@@ -26,7 +26,7 @@ use polkadot_primitives::v1::{
use polkadot_subsystem::SubsystemContext;
use crate::{
- error::{Error, NonFatal},
+ error::{Error, Result},
LOG_TARGET,
};
@@ -100,7 +100,7 @@ impl SessionCache {
runtime: &mut RuntimeInfo,
parent: Hash,
with_info: F,
- ) -> Result