diff --git a/Cargo.lock b/Cargo.lock
index a51ee4e6ce59..1ae7601a02de 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6210,6 +6210,7 @@ dependencies = [
name = "polkadot-availability-bitfield-distribution"
version = "0.9.25"
dependencies = [
+ "always-assert",
"assert_matches",
"bitvec 1.0.0",
"env_logger",
diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs
index 604c6c0a0c37..2e10e75dad4e 100644
--- a/node/core/backing/src/error.rs
+++ b/node/core/backing/src/error.rs
@@ -17,9 +17,9 @@
use fatality::Nested;
use futures::channel::{mpsc, oneshot};
-use polkadot_node_subsystem::{messages::ValidationFailed, SubsystemError};
+use polkadot_node_subsystem::{messages::ValidationFailed, RuntimeApiError, SubsystemError};
use polkadot_node_subsystem_util::Error as UtilError;
-use polkadot_primitives::v2::BackedCandidate;
+use polkadot_primitives::v2::{BackedCandidate, ValidationCodeHash};
use crate::LOG_TARGET;
@@ -42,16 +42,28 @@ pub enum Error {
#[error("FetchPoV failed")]
FetchPoV,
+ #[error("Fetching validation code by hash failed {0:?}, {1:?}")]
+ FetchValidationCode(ValidationCodeHash, RuntimeApiError),
+
+ #[error("No validation code {0:?}")]
+ NoValidationCode(ValidationCodeHash),
+
+ #[error("Candidate rejected by prospective parachains subsystem")]
+ RejectedByProspectiveParachains,
+
#[fatal]
#[error("Failed to spawn background task")]
FailedToSpawnBackgroundTask,
- #[error("ValidateFromChainState channel closed before receipt")]
- ValidateFromChainState(#[source] oneshot::Canceled),
+ #[error("ValidateFromExhaustive channel closed before receipt")]
+ ValidateFromExhaustive(#[source] oneshot::Canceled),
#[error("StoreAvailableData channel closed before receipt")]
StoreAvailableData(#[source] oneshot::Canceled),
+ #[error("RuntimeAPISubsystem channel closed before receipt")]
+ RuntimeApiUnavailable(#[source] oneshot::Canceled),
+
#[error("a channel was closed before receipt in try_join!")]
JoinMultiple(#[source] oneshot::Canceled),
diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs
index a189b5955c89..d80dbcb65de8 100644
--- a/node/core/backing/src/lib.rs
+++ b/node/core/backing/src/lib.rs
@@ -14,44 +14,94 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! Implements a `CandidateBackingSubsystem`.
+//! Implements the `CandidateBackingSubsystem`.
+//!
+//! This subsystem maintains the entire responsibility of tracking parachain
+//! candidates which can be backed, as well as the issuance of statements
+//! about candidates when run on a validator node.
+//!
+//! There are two types of statements: `Seconded` and `Valid`.
+//! `Seconded` implies `Valid`, and nothing should be stated as
+//! `Valid` unless its already been `Seconded`.
+//!
+//! Validators may only second candidates which fall under their own group
+//! assignment, and they may only second one candidate per depth per active leaf.
+//! Candidates which are stated as either `Second` or `Valid` by a majority of the
+//! assigned group of validators may be backed on-chain and proceed to the availability
+//! stage.
+//!
+//! Depth is a concept relating to asynchronous backing, by which validators
+//! short sub-chains of candidates are backed and extended off-chain, and then placed
+//! asynchronously into blocks of the relay chain as those are authored and as the
+//! relay-chain state becomes ready for them. Asynchronous backing allows parachains to
+//! grow mostly independently from the state of the relay chain, which gives more time for
+//! parachains to be validated and thereby increases performance.
+//!
+//! Most of the work of asynchronous backing is handled by the Prospective Parachains
+//! subsystem. The 'depth' of a parachain block with respect to a relay chain block is
+//! a measure of how many parachain blocks are between the most recent included parachain block
+//! in the post-state of the relay-chain block and the candidate. For instance,
+//! a candidate that descends directly from the most recent parachain block in the relay-chain
+//! state has depth 0. The child of that candidate would have depth 1. And so on.
+//!
+//! The candidate backing subsystem keeps track of a set of 'active leaves' which are the
+//! most recent blocks in the relay-chain (which is in fact a tree) which could be built
+//! upon. Depth is always measured against active leaves, and the valid relay-parent that
+//! each candidate can have is determined by the active leaves. The Prospective Parachains
+//! subsystem enforces that the relay-parent increases monotonoically, so that logic
+//! is not handled here. By communicating with the Prospective Parachains subsystem,
+//! this subsystem extrapolates an "implicit view" from the set of currently active leaves,
+//! which determines the set of all recent relay-chain block hashes which could be relay-parents
+//! for candidates backed in children of the active leaves.
+//!
+//! In fact, this subsystem relies on the Statement Distribution subsystem to prevent spam
+//! by enforcing the rule that each validator may second at most one candidate per depth per
+//! active leaf. This bounds the number of candidates that the system needs to consider and
+//! is not handled within this subsystem, except for candidates seconded locally.
+//!
+//! This subsystem also handles relay-chain heads which don't support asynchronous backing.
+//! For such active leaves, the only valid relay-parent is the leaf hash itself and the only
+//! allowed depth is 0.
#![deny(unused_crate_dependencies)]
use std::{
- collections::{HashMap, HashSet},
+ collections::{BTreeMap, HashMap, HashSet},
sync::Arc,
};
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
- FutureExt, SinkExt, StreamExt,
+ stream::FuturesOrdered,
+ FutureExt, SinkExt, StreamExt, TryFutureExt,
};
use error::{Error, FatalResult};
use polkadot_node_primitives::{
- AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, Statement,
- ValidationResult, BACKING_EXECUTION_TIMEOUT,
+ AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatementWithPVD,
+ StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT,
};
use polkadot_node_subsystem::{
- jaeger,
messages::{
AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage,
CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage,
- ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage,
+ HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData,
+ ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage,
},
- overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
- Stage, SubsystemError,
+ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
};
use polkadot_node_subsystem_util::{
- self as util, request_from_runtime, request_session_index_for_child, request_validator_groups,
+ self as util,
+ backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView},
+ request_from_runtime, request_session_index_for_child, request_validator_groups,
request_validators, Validator,
};
use polkadot_primitives::v2::{
- BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId,
- CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex,
- SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation,
+ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt,
+ CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData,
+ SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature,
+ ValidityAttestation,
};
use sp_keystore::SyncCryptoStorePtr;
use statement_table::{
@@ -60,7 +110,7 @@ use statement_table::{
SignedStatement as TableSignedStatement, Statement as TableStatement,
Summary as TableSummary,
},
- Context as TableContextTrait, Table,
+ Config as TableConfig, Context as TableContextTrait, Table,
};
mod error;
@@ -108,9 +158,9 @@ impl std::fmt::Debug for ValidatedCandidateCommand {
impl ValidatedCandidateCommand {
fn candidate_hash(&self) -> CandidateHash {
match *self {
- ValidatedCandidateCommand::Second(Ok((ref candidate, _, _))) => candidate.hash(),
+ ValidatedCandidateCommand::Second(Ok(ref outputs)) => outputs.candidate.hash(),
ValidatedCandidateCommand::Second(Err(ref candidate)) => candidate.hash(),
- ValidatedCandidateCommand::Attest(Ok((ref candidate, _, _))) => candidate.hash(),
+ ValidatedCandidateCommand::Attest(Ok(ref outputs)) => outputs.candidate.hash(),
ValidatedCandidateCommand::Attest(Err(ref candidate)) => candidate.hash(),
ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => candidate_hash,
}
@@ -147,6 +197,113 @@ where
}
}
+struct PerRelayParentState {
+ prospective_parachains_mode: ProspectiveParachainsMode,
+ /// The hash of the relay parent on top of which this job is doing it's work.
+ parent: Hash,
+ /// The session index this corresponds to.
+ session_index: SessionIndex,
+ /// The `ParaId` assigned to the local validator at this relay parent.
+ assignment: Option,
+ /// The candidates that are backed by enough validators in their group, by hash.
+ backed: HashSet,
+ /// The table of candidates and statements under this relay-parent.
+ table: Table,
+ /// The table context, including groups.
+ table_context: TableContext,
+ /// We issued `Seconded` or `Valid` statements on about these candidates.
+ issued_statements: HashSet,
+ /// These candidates are undergoing validation in the background.
+ awaiting_validation: HashSet,
+ /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
+ fallbacks: HashMap,
+}
+
+struct PerCandidateState {
+ persisted_validation_data: PersistedValidationData,
+ seconded_locally: bool,
+ para_id: ParaId,
+ relay_parent: Hash,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ProspectiveParachainsMode {
+ // v2 runtime API: no prospective parachains.
+ Disabled,
+ // vstaging runtime API: prospective parachains.
+ Enabled,
+}
+
+impl ProspectiveParachainsMode {
+ fn is_enabled(&self) -> bool {
+ self == &ProspectiveParachainsMode::Enabled
+ }
+}
+
+struct ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode,
+ /// The candidates seconded at various depths under this active
+ /// leaf. A candidate can only be seconded when its hypothetical
+ /// depth under every active leaf has an empty entry in this map.
+ ///
+ /// When prospective parachains are disabled, the only depth
+ /// which is allowed is '0'.
+ seconded_at_depth: BTreeMap,
+}
+
+/// The state of the subsystem.
+struct State {
+ /// The utility for managing the implicit and explicit views in a consistent way.
+ ///
+ /// We only feed leaves which have prospective parachains enabled to this view.
+ implicit_view: ImplicitView,
+ /// State tracked for all active leaves, whether or not they have prospective parachains
+ /// enabled.
+ per_leaf: HashMap,
+ /// State tracked for all relay-parents backing work is ongoing for. This includes
+ /// all active leaves.
+ ///
+ /// relay-parents fall into one of 3 categories.
+ /// 1. active leaves which do support prospective parachains
+ /// 2. active leaves which do not support prospective parachains
+ /// 3. relay-chain blocks which are ancestors of an active leaf and
+ /// do support prospective parachains.
+ ///
+ /// Relay-chain blocks which don't support prospective parachains are
+ /// never included in the fragment trees of active leaves which do.
+ ///
+ /// While it would be technically possible to support such leaves in
+ /// fragment trees, it only benefits the transition period when asynchronous
+ /// backing is being enabled and complicates code complexity.
+ per_relay_parent: HashMap,
+ /// State tracked for all candidates relevant to the implicit view.
+ ///
+ /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit
+ /// or explicit view for which a `Seconded` statement has been successfully imported.
+ per_candidate: HashMap,
+ /// A cloneable sender which is dispatched to background candidate validation tasks to inform
+ /// the main task of the result.
+ background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ /// The handle to the keystore used for signing.
+ keystore: SyncCryptoStorePtr,
+}
+
+impl State {
+ fn new(
+ background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ keystore: SyncCryptoStorePtr,
+ ) -> Self {
+ State {
+ implicit_view: ImplicitView::default(),
+ per_leaf: HashMap::default(),
+ per_relay_parent: HashMap::default(),
+ per_candidate: HashMap::new(),
+ background_validation_tx,
+ keystore,
+ }
+ }
+}
+
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
async fn run(
mut ctx: Context,
@@ -154,18 +311,11 @@ async fn run(
metrics: Metrics,
) -> FatalResult<()> {
let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16);
- let mut jobs = HashMap::new();
+ let mut state = State::new(background_validation_tx, keystore);
loop {
- let res = run_iteration(
- &mut ctx,
- keystore.clone(),
- &metrics,
- &mut jobs,
- background_validation_tx.clone(),
- &mut background_validation_rx,
- )
- .await;
+ let res =
+ run_iteration(&mut ctx, &mut state, &metrics, &mut background_validation_rx).await;
match res {
Ok(()) => break,
@@ -179,10 +329,8 @@ async fn run(
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
async fn run_iteration(
ctx: &mut Context,
- keystore: SyncCryptoStorePtr,
+ state: &mut State,
metrics: &Metrics,
- jobs: &mut HashMap>,
- background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>,
) -> Result<(), Error> {
loop {
@@ -191,9 +339,10 @@ async fn run_iteration(
if let Some((relay_parent, command)) = validated_command {
handle_validated_candidate_command(
&mut *ctx,
- jobs,
+ state,
relay_parent,
command,
+ metrics,
).await?;
} else {
panic!("background_validation_tx always alive at this point; qed");
@@ -201,243 +350,24 @@ async fn run_iteration(
}
from_overseer = ctx.recv().fuse() => {
match from_overseer? {
- FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => handle_active_leaves_update(
- &mut *ctx,
- update,
- jobs,
- &keystore,
- &background_validation_tx,
- &metrics,
- ).await?,
+ FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
+ handle_active_leaves_update(
+ &mut *ctx,
+ update,
+ state,
+ ).await?;
+ }
FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
- FromOrchestra::Communication { msg } => handle_communication(&mut *ctx, jobs, msg).await?,
+ FromOrchestra::Communication { msg } => {
+ handle_communication(&mut *ctx, state, msg, metrics).await?;
+ }
}
}
)
}
}
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_validated_candidate_command(
- ctx: &mut Context,
- jobs: &mut HashMap>,
- relay_parent: Hash,
- command: ValidatedCandidateCommand,
-) -> Result<(), Error> {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_validated_candidate_command(&job.span, ctx, command).await?;
- } else {
- // simple race condition; can be ignored - this relay-parent
- // is no longer relevant.
- }
-
- Ok(())
-}
-
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_communication(
- ctx: &mut Context,
- jobs: &mut HashMap>,
- message: CandidateBackingMessage,
-) -> Result<(), Error> {
- match message {
- CandidateBackingMessage::Second(relay_parent, candidate, pov) => {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_second_msg(&job.span, ctx, candidate, pov).await?;
- }
- },
- CandidateBackingMessage::Statement(relay_parent, statement) => {
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_statement_message(&job.span, ctx, statement).await?;
- }
- },
- CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) =>
- if let Some(job) = jobs.get_mut(&relay_parent) {
- job.job.handle_get_backed_candidates_message(requested_candidates, tx)?;
- },
- }
-
- Ok(())
-}
-
-#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-async fn handle_active_leaves_update(
- ctx: &mut Context,
- update: ActiveLeavesUpdate,
- jobs: &mut HashMap>,
- keystore: &SyncCryptoStorePtr,
- background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
- metrics: &Metrics,
-) -> Result<(), Error> {
- for deactivated in update.deactivated {
- jobs.remove(&deactivated);
- }
-
- let leaf = match update.activated {
- None => return Ok(()),
- Some(a) => a,
- };
-
- macro_rules! try_runtime_api {
- ($x: expr) => {
- match $x {
- Ok(x) => x,
- Err(e) => {
- gum::warn!(
- target: LOG_TARGET,
- err = ?e,
- "Failed to fetch runtime API data for job",
- );
-
- // We can't do candidate validation work if we don't have the
- // requisite runtime API data. But these errors should not take
- // down the node.
- return Ok(());
- }
- }
- }
- }
-
- let parent = leaf.hash;
- let span = PerLeafSpan::new(leaf.span, "backing");
- let _span = span.child("runtime-apis");
-
- let (validators, groups, session_index, cores) = futures::try_join!(
- request_validators(parent, ctx.sender()).await,
- request_validator_groups(parent, ctx.sender()).await,
- request_session_index_for_child(parent, ctx.sender()).await,
- request_from_runtime(parent, ctx.sender(), |tx| {
- RuntimeApiRequest::AvailabilityCores(tx)
- },)
- .await,
- )
- .map_err(Error::JoinMultiple)?;
-
- let validators: Vec<_> = try_runtime_api!(validators);
- let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
- let session_index = try_runtime_api!(session_index);
- let cores = try_runtime_api!(cores);
-
- drop(_span);
- let _span = span.child("validator-construction");
-
- let signing_context = SigningContext { parent_hash: parent, session_index };
- let validator =
- match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await {
- Ok(v) => Some(v),
- Err(util::Error::NotAValidator) => None,
- Err(e) => {
- gum::warn!(
- target: LOG_TARGET,
- err = ?e,
- "Cannot participate in candidate backing",
- );
-
- return Ok(())
- },
- };
-
- drop(_span);
- let mut assignments_span = span.child("compute-assignments");
-
- let mut groups = HashMap::new();
-
- let n_cores = cores.len();
-
- let mut assignment = None;
-
- for (idx, core) in cores.into_iter().enumerate() {
- // Ignore prospective assignments on occupied cores for the time being.
- if let CoreState::Scheduled(scheduled) = core {
- let core_index = CoreIndex(idx as _);
- let group_index = group_rotation_info.group_for_core(core_index, n_cores);
- if let Some(g) = validator_groups.get(group_index.0 as usize) {
- if validator.as_ref().map_or(false, |v| g.contains(&v.index())) {
- assignment = Some((scheduled.para_id, scheduled.collator));
- }
- groups.insert(scheduled.para_id, g.clone());
- }
- }
- }
-
- let table_context = TableContext { groups, validators, validator };
-
- let (assignment, required_collator) = match assignment {
- None => {
- assignments_span.add_string_tag("assigned", "false");
- (None, None)
- },
- Some((assignment, required_collator)) => {
- assignments_span.add_string_tag("assigned", "true");
- assignments_span.add_para_id(assignment);
- (Some(assignment), required_collator)
- },
- };
-
- drop(assignments_span);
- let _span = span.child("wait-for-job");
-
- let job = CandidateBackingJob {
- parent,
- session_index,
- assignment,
- required_collator,
- issued_statements: HashSet::new(),
- awaiting_validation: HashSet::new(),
- fallbacks: HashMap::new(),
- seconded: None,
- unbacked_candidates: HashMap::new(),
- backed: HashSet::new(),
- keystore: keystore.clone(),
- table: Table::default(),
- table_context,
- background_validation_tx: background_validation_tx.clone(),
- metrics: metrics.clone(),
- _marker: std::marker::PhantomData,
- };
-
- jobs.insert(parent, JobAndSpan { job, span });
-
- Ok(())
-}
-
-struct JobAndSpan {
- job: CandidateBackingJob,
- span: PerLeafSpan,
-}
-
-/// Holds all data needed for candidate backing job operation.
-struct CandidateBackingJob {
- /// The hash of the relay parent on top of which this job is doing it's work.
- parent: Hash,
- /// The session index this corresponds to.
- session_index: SessionIndex,
- /// The `ParaId` assigned to this validator
- assignment: Option,
- /// The collator required to author the candidate, if any.
- required_collator: Option,
- /// Spans for all candidates that are not yet backable.
- unbacked_candidates: HashMap,
- /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates.
- issued_statements: HashSet,
- /// These candidates are undergoing validation in the background.
- awaiting_validation: HashSet,
- /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`.
- fallbacks: HashMap)>,
- /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash.
- seconded: Option,
- /// The candidates that are includable, by hash. Each entry here indicates
- /// that we've sent the provisioner the backed candidate.
- backed: HashSet,
- keystore: SyncCryptoStorePtr,
- table: Table,
- table_context: TableContext,
- background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
- metrics: Metrics,
- _marker: std::marker::PhantomData,
-}
-
/// In case a backing validator does not provide a PoV, we need to retry with other backing
/// validators.
///
@@ -499,10 +429,10 @@ struct InvalidErasureRoot;
// It looks like it's not possible to do an `impl From` given the current state of
// the code. So this does the necessary conversion.
-fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement {
+fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedStatement {
let statement = match s.payload() {
- Statement::Seconded(c) => TableStatement::Seconded(c.clone()),
- Statement::Valid(h) => TableStatement::Valid(h.clone()),
+ StatementWithPVD::Seconded(c, _) => TableStatement::Seconded(c.clone()),
+ StatementWithPVD::Valid(h) => TableStatement::Valid(h.clone()),
};
TableSignedStatement {
@@ -586,21 +516,17 @@ async fn store_available_data(
//
// This will compute the erasure root internally and compare it to the expected erasure root.
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
-
async fn make_pov_available(
sender: &mut impl overseer::CandidateBackingSenderTrait,
n_validators: usize,
pov: Arc,
candidate_hash: CandidateHash,
- validation_data: polkadot_primitives::v2::PersistedValidationData,
+ validation_data: PersistedValidationData,
expected_erasure_root: Hash,
- span: Option<&jaeger::Span>,
) -> Result, Error> {
let available_data = AvailableData { pov, validation_data };
{
- let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash));
-
let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?;
let branches = erasure_coding::branches(chunks.as_ref());
@@ -612,8 +538,6 @@ async fn make_pov_available(
}
{
- let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash));
-
store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?;
}
@@ -644,13 +568,17 @@ async fn request_pov(
async fn request_candidate_validation(
sender: &mut impl overseer::CandidateBackingSenderTrait,
+ pvd: PersistedValidationData,
+ code: ValidationCode,
candidate_receipt: CandidateReceipt,
pov: Arc,
) -> Result {
let (tx, rx) = oneshot::channel();
sender
- .send_message(CandidateValidationMessage::ValidateFromChainState(
+ .send_message(CandidateValidationMessage::ValidateFromExhaustive(
+ pvd,
+ code,
candidate_receipt,
pov,
BACKING_EXECUTION_TIMEOUT,
@@ -661,21 +589,26 @@ async fn request_candidate_validation(
match rx.await {
Ok(Ok(validation_result)) => Ok(validation_result),
Ok(Err(err)) => Err(Error::ValidationFailed(err)),
- Err(err) => Err(Error::ValidateFromChainState(err)),
+ Err(err) => Err(Error::ValidateFromExhaustive(err)),
}
}
-type BackgroundValidationResult =
- Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>;
+struct BackgroundValidationOutputs {
+ candidate: CandidateReceipt,
+ commitments: CandidateCommitments,
+ persisted_validation_data: PersistedValidationData,
+}
+
+type BackgroundValidationResult = Result;
struct BackgroundValidationParams {
sender: S,
tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
candidate: CandidateReceipt,
relay_parent: Hash,
+ persisted_validation_data: PersistedValidationData,
pov: PoVData,
n_validators: usize,
- span: Option,
make_command: F,
}
@@ -690,16 +623,33 @@ async fn validate_and_make_available(
mut tx_command,
candidate,
relay_parent,
+ persisted_validation_data,
pov,
n_validators,
- span,
make_command,
} = params;
+ let validation_code = {
+ let validation_code_hash = candidate.descriptor().validation_code_hash;
+ let (tx, rx) = oneshot::channel();
+ sender
+ .send_message(RuntimeApiMessage::Request(
+ relay_parent,
+ RuntimeApiRequest::ValidationCodeByHash(validation_code_hash, tx),
+ ))
+ .await;
+
+ let code = rx.await.map_err(Error::RuntimeApiUnavailable)?;
+ match code {
+ Err(e) => return Err(Error::FetchValidationCode(validation_code_hash, e)),
+ Ok(None) => return Err(Error::NoValidationCode(validation_code_hash)),
+ Ok(Some(c)) => c,
+ }
+ };
+
let pov = match pov {
PoVData::Ready(pov) => pov,
- PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => {
- let _span = span.as_ref().map(|s| s.child("request-pov"));
+ PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } =>
match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash)
.await
{
@@ -715,17 +665,18 @@ async fn validate_and_make_available(
},
Err(err) => return Err(err),
Ok(pov) => pov,
- }
- },
+ },
};
let v = {
- let _span = span.as_ref().map(|s| {
- s.child("request-validation")
- .with_pov(&pov)
- .with_para_id(candidate.descriptor().para_id)
- });
- request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await?
+ request_candidate_validation(
+ &mut sender,
+ persisted_validation_data,
+ validation_code,
+ candidate.clone(),
+ pov.clone(),
+ )
+ .await?
};
let res = match v {
@@ -741,14 +692,17 @@ async fn validate_and_make_available(
n_validators,
pov.clone(),
candidate.hash(),
- validation_data,
+ validation_data.clone(),
candidate.descriptor.erasure_root,
- span.as_ref(),
)
.await?;
match erasure_valid {
- Ok(()) => Ok((candidate, commitments, pov.clone())),
+ Ok(()) => Ok(BackgroundValidationOutputs {
+ candidate,
+ commitments,
+ persisted_validation_data: validation_data,
+ }),
Err(InvalidErasureRoot) => {
gum::debug!(
target: LOG_TARGET,
@@ -786,626 +740,1231 @@ async fn validate_and_make_available(
struct ValidatorIndexOutOfBounds;
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
-impl CandidateBackingJob {
- async fn handle_validated_candidate_command(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- command: ValidatedCandidateCommand,
- ) -> Result<(), Error> {
- let candidate_hash = command.candidate_hash();
- self.awaiting_validation.remove(&candidate_hash);
-
- match command {
- ValidatedCandidateCommand::Second(res) => {
- match res {
- Ok((candidate, commitments, _)) => {
- // sanity check.
- if self.seconded.is_none() &&
- !self.issued_statements.contains(&candidate_hash)
- {
- self.seconded = Some(candidate_hash);
- self.issued_statements.insert(candidate_hash);
- self.metrics.on_candidate_seconded();
-
- let statement = Statement::Seconded(CommittedCandidateReceipt {
- descriptor: candidate.descriptor.clone(),
- commitments,
- });
- if let Some(stmt) = self
- .sign_import_and_distribute_statement(ctx, statement, root_span)
- .await?
- {
- ctx.send_message(CollatorProtocolMessage::Seconded(
- self.parent,
- stmt,
- ))
- .await;
- }
- }
- },
- Err(candidate) => {
- ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate))
- .await;
- },
- }
- },
- ValidatedCandidateCommand::Attest(res) => {
- // We are done - avoid new validation spawns:
- self.fallbacks.remove(&candidate_hash);
- // sanity check.
- if !self.issued_statements.contains(&candidate_hash) {
- if res.is_ok() {
- let statement = Statement::Valid(candidate_hash);
- self.sign_import_and_distribute_statement(ctx, statement, &root_span)
- .await?;
- }
- self.issued_statements.insert(candidate_hash);
- }
- },
- ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => {
- if let Some((attesting, span)) = self.fallbacks.get_mut(&candidate_hash) {
- if let Some(index) = attesting.backing.pop() {
- attesting.from_validator = index;
- // Ok, another try:
- let c_span = span.as_ref().map(|s| s.child("try"));
- let attesting = attesting.clone();
- self.kick_off_validation_work(ctx, attesting, c_span).await?
- }
- } else {
- gum::warn!(
- target: LOG_TARGET,
- "AttestNoPoV was triggered without fallback being available."
- );
- debug_assert!(false);
- }
+async fn handle_communication(
+ ctx: &mut Context,
+ state: &mut State,
+ message: CandidateBackingMessage,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ match message {
+ CandidateBackingMessage::Second(_relay_parent, candidate, pvd, pov) => {
+ handle_second_message(ctx, state, candidate, pvd, pov, metrics).await?;
+ },
+ CandidateBackingMessage::Statement(relay_parent, statement) => {
+ handle_statement_message(ctx, state, relay_parent, statement, metrics).await?;
+ },
+ CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) =>
+ if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) {
+ handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?;
},
- }
-
- Ok(())
}
- async fn background_validate_and_make_available(
- &mut self,
- ctx: &mut Context,
- params: BackgroundValidationParams<
- impl overseer::CandidateBackingSenderTrait,
- impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync,
- >,
- ) -> Result<(), Error> {
- let candidate_hash = params.candidate.hash();
- if self.awaiting_validation.insert(candidate_hash) {
- // spawn background task.
- let bg = async move {
- if let Err(e) = validate_and_make_available(params).await {
- if let Error::BackgroundValidationMpsc(error) = e {
- gum::debug!(
- target: LOG_TARGET,
- ?error,
- "Mpsc background validation mpsc died during validation- leaf no longer active?"
- );
- } else {
- gum::error!(
+ Ok(())
+}
+
+async fn prospective_parachains_mode(
+ _ctx: &mut Context,
+ _leaf_hash: Hash,
+) -> ProspectiveParachainsMode {
+ // TODO [now]: this should be a runtime API version call
+ // cc https://github.com/paritytech/substrate/discussions/11338
+ ProspectiveParachainsMode::Disabled
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_active_leaves_update(
+ ctx: &mut Context,
+ update: ActiveLeavesUpdate,
+ state: &mut State,
+) -> Result<(), Error> {
+ enum LeafHasProspectiveParachains {
+ Enabled(Result, ImplicitViewFetchError>),
+ Disabled,
+ }
+
+ // Activate in implicit view before deactivate, per the docs
+ // on ImplicitView, this is more efficient.
+ let res = if let Some(leaf) = update.activated {
+ // Only activate in implicit view if prospective
+ // parachains are enabled.
+ let mode = prospective_parachains_mode(ctx, leaf.hash).await;
+
+ let leaf_hash = leaf.hash;
+ Some((
+ leaf,
+ match mode {
+ ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled,
+ ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled(
+ state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await,
+ ),
+ },
+ ))
+ } else {
+ None
+ };
+
+ for deactivated in update.deactivated {
+ state.per_leaf.remove(&deactivated);
+ state.implicit_view.deactivate_leaf(deactivated);
+ }
+
+ // clean up `per_relay_parent` according to ancestry
+ // of leaves. we do this so we can clean up candidates right after
+ // as a result.
+ //
+ // when prospective parachains are disabled, the implicit view is empty,
+ // which means we'll clean up everything. This is correct.
+ {
+ let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect();
+ state.per_relay_parent.retain(|r, _| remaining.contains(&r));
+ }
+
+ // clean up `per_candidate` according to which relay-parents
+ // are known.
+ //
+ // when prospective parachains are disabled, we clean up all candidates
+ // because we've cleaned up all relay parents. this is correct.
+ state
+ .per_candidate
+ .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent));
+
+ // Get relay parents which might be fresh but might be known already
+ // that are explicit or implicit from the new active leaf.
+ let fresh_relay_parents = match res {
+ None => return Ok(()),
+ Some((leaf, LeafHasProspectiveParachains::Disabled)) => {
+ // defensive in this case - for enabled, this manifests as an error.
+ if state.per_leaf.contains_key(&leaf.hash) {
+ return Ok(())
+ }
+
+ state.per_leaf.insert(
+ leaf.hash,
+ ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode::Disabled,
+ // This is empty because the only allowed relay-parent and depth
+ // when prospective parachains are disabled is the leaf hash and 0,
+ // respectively. We've just learned about the leaf hash, so we cannot
+ // have any candidates seconded with it as a relay-parent yet.
+ seconded_at_depth: BTreeMap::new(),
+ },
+ );
+
+ vec![leaf.hash]
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => {
+ let fresh_relay_parents =
+ state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None);
+
+ // At this point, all candidates outside of the implicit view
+ // have been cleaned up. For all which remain, which we've seconded,
+ // we ask the prospective parachains subsystem where they land in the fragment
+ // tree for the given active leaf. This comprises our `seconded_at_depth`.
+
+ let remaining_seconded = state
+ .per_candidate
+ .iter()
+ .filter(|(_, cd)| cd.seconded_locally)
+ .map(|(c_hash, cd)| (*c_hash, cd.para_id));
+
+ // one-to-one correspondence to remaining_seconded
+ let mut membership_answers = FuturesOrdered::new();
+
+ for (candidate_hash, para_id) in remaining_seconded {
+ let (tx, rx) = oneshot::channel();
+ membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership)));
+
+ ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership(
+ para_id,
+ candidate_hash,
+ tx,
+ ))
+ .await;
+ }
+
+ let mut seconded_at_depth = BTreeMap::new();
+ for response in membership_answers.next().await {
+ match response {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
target: LOG_TARGET,
- "Failed to validate and make available: {:?}",
- e
+ "Prospective parachains subsystem unreachable for membership request",
);
- }
+
+ continue
+ },
+ Ok((candidate_hash, membership)) => {
+ // This request gives membership in all fragment trees. We have some
+ // wasted data here, and it can be optimized if it proves
+ // relevant to performance.
+ if let Some((_, depths)) =
+ membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash)
+ {
+ for depth in depths {
+ seconded_at_depth.insert(depth, candidate_hash);
+ }
+ }
+ },
}
- };
+ }
- ctx.spawn("backing-validation", bg.boxed())
- .map_err(|_| Error::FailedToSpawnBackgroundTask)?;
- }
+ state.per_leaf.insert(
+ leaf.hash,
+ ActiveLeafState {
+ prospective_parachains_mode: ProspectiveParachainsMode::Enabled,
+ seconded_at_depth,
+ },
+ );
- Ok(())
- }
+ match fresh_relay_parents {
+ Some(f) => f.to_vec(),
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf.hash,
+ "Implicit view gave no relay-parents"
+ );
+
+ vec![leaf.hash]
+ },
+ }
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => {
+ gum::debug!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf.hash,
+ err = ?e,
+ "Failed to load implicit view for leaf."
+ );
- /// Kick off background validation with intent to second.
- async fn validate_and_second(
- &mut self,
- parent_span: &jaeger::Span,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- candidate: &CandidateReceipt,
- pov: Arc,
- ) -> Result<(), Error> {
- // Check that candidate is collated by the right collator.
- if self
- .required_collator
- .as_ref()
- .map_or(false, |c| c != &candidate.descriptor().collator)
- {
- ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate.clone()))
- .await;
return Ok(())
+ },
+ };
+
+ // add entries in `per_relay_parent`. for all new relay-parents.
+ for maybe_new in fresh_relay_parents {
+ if state.per_relay_parent.contains_key(&maybe_new) {
+ continue
}
- let candidate_hash = candidate.hash();
- let mut span = self.get_unbacked_validation_child(
- root_span,
- candidate_hash,
- candidate.descriptor().para_id,
- );
+ let mode = match state.per_leaf.get(&maybe_new) {
+ None => {
+ // If the relay-parent isn't a leaf itself,
+ // then it is guaranteed by the prospective parachains
+ // subsystem that it is an ancestor of a leaf which
+ // has prospective parachains enabled and that the
+ // block itself did.
+ ProspectiveParachainsMode::Enabled
+ },
+ Some(l) => l.prospective_parachains_mode,
+ };
- span.as_mut().map(|span| span.add_follows_from(parent_span));
+ // construct a `PerRelayParent` from the runtime API
+ // and insert it.
+ let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?;
- gum::debug!(
- target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- candidate_receipt = ?candidate,
- "Validate and second candidate",
- );
+ if let Some(per) = per {
+ state.per_relay_parent.insert(maybe_new, per);
+ }
+ }
- let bg_sender = ctx.sender().clone();
- self.background_validate_and_make_available(
- ctx,
- BackgroundValidationParams {
- sender: bg_sender,
- tx_command: self.background_validation_tx.clone(),
- candidate: candidate.clone(),
- relay_parent: self.parent,
- pov: PoVData::Ready(pov),
- n_validators: self.table_context.validators.len(),
- span,
- make_command: ValidatedCandidateCommand::Second,
- },
- )
- .await?;
+ Ok(())
+}
- Ok(())
- }
+/// Load the data necessary to do backing work on top of a relay-parent.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn construct_per_relay_parent_state(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ keystore: &SyncCryptoStorePtr,
+ mode: ProspectiveParachainsMode,
+) -> Result, Error> {
+ macro_rules! try_runtime_api {
+ ($x: expr) => {
+ match $x {
+ Ok(x) => x,
+ Err(e) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ err = ?e,
+ "Failed to fetch runtime API data for job",
+ );
- async fn sign_import_and_distribute_statement(
- &mut self,
- ctx: &mut Context,
- statement: Statement,
- root_span: &jaeger::Span,
- ) -> Result , Error> {
- if let Some(signed_statement) = self.sign_statement(statement).await {
- self.import_statement(ctx, &signed_statement, root_span).await?;
- let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone());
- ctx.send_unbounded_message(smsg);
-
- Ok(Some(signed_statement))
- } else {
- Ok(None)
+ // We can't do candidate validation work if we don't have the
+ // requisite runtime API data. But these errors should not take
+ // down the node.
+ return Ok(None);
+ }
+ }
}
}
- /// Check if there have happened any new misbehaviors and issue necessary messages.
- fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) {
- // collect the misbehaviors to avoid double mutable self borrow issues
- let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect();
- for (validator_id, report) in misbehaviors {
- // The provisioner waits on candidate-backing, which means
- // that we need to send unbounded messages to avoid cycles.
- //
- // Misbehaviors are bounded by the number of validators and
- // the block production protocol.
- sender.send_unbounded_message(ProvisionerMessage::ProvisionableData(
- self.parent,
- ProvisionableData::MisbehaviorReport(self.parent, validator_id, report),
- ));
+ let parent = relay_parent;
+
+ let (validators, groups, session_index, cores) = futures::try_join!(
+ request_validators(parent, ctx.sender()).await,
+ request_validator_groups(parent, ctx.sender()).await,
+ request_session_index_for_child(parent, ctx.sender()).await,
+ request_from_runtime(parent, ctx.sender(), |tx| {
+ RuntimeApiRequest::AvailabilityCores(tx)
+ },)
+ .await,
+ )
+ .map_err(Error::JoinMultiple)?;
+
+ let validators: Vec<_> = try_runtime_api!(validators);
+ let (validator_groups, group_rotation_info) = try_runtime_api!(groups);
+ let session_index = try_runtime_api!(session_index);
+ let cores = try_runtime_api!(cores);
+
+ let signing_context = SigningContext { parent_hash: parent, session_index };
+ let validator =
+ match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await {
+ Ok(v) => Some(v),
+ Err(util::Error::NotAValidator) => None,
+ Err(e) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ err = ?e,
+ "Cannot participate in candidate backing",
+ );
+
+ return Ok(None)
+ },
+ };
+
+ let mut groups = HashMap::new();
+ let n_cores = cores.len();
+ let mut assignment = None;
+
+ for (idx, core) in cores.into_iter().enumerate() {
+ // Ignore prospective assignments on occupied cores for the time being.
+ if let CoreState::Scheduled(scheduled) = core {
+ let core_index = CoreIndex(idx as _);
+ let group_index = group_rotation_info.group_for_core(core_index, n_cores);
+ if let Some(g) = validator_groups.get(group_index.0 as usize) {
+ if validator.as_ref().map_or(false, |v| g.contains(&v.index())) {
+ assignment = Some((scheduled.para_id, scheduled.collator));
+ }
+ groups.insert(scheduled.para_id, g.clone());
+ }
}
}
- /// Import a statement into the statement table and return the summary of the import.
- async fn import_statement(
- &mut self,
- ctx: &mut Context,
- statement: &SignedFullStatement,
- root_span: &jaeger::Span,
- ) -> Result , Error> {
- gum::debug!(
- target: LOG_TARGET,
- statement = ?statement.payload().to_compact(),
- validator_index = statement.validator_index().0,
- "Importing statement",
- );
+ let table_context = TableContext { groups, validators, validator };
+ let table_config = TableConfig {
+ allow_multiple_seconded: match mode {
+ ProspectiveParachainsMode::Enabled => true,
+ ProspectiveParachainsMode::Disabled => false,
+ },
+ };
+
+ // TODO [now]: I've removed the `required_collator` more broadly,
+ // because it's not used in practice and was intended for parathreads.
+ //
+ // We should attempt parathreads another way, I think, so it makes sense
+ // to remove.
+ let assignment = assignment.map(|(a, _required_collator)| a);
- let candidate_hash = statement.payload().candidate_hash();
- let import_statement_span = {
- // create a span only for candidates we're already aware of.
- self.get_unbacked_statement_child(
- root_span,
+ Ok(Some(PerRelayParentState {
+ prospective_parachains_mode: mode,
+ parent,
+ session_index,
+ assignment,
+ backed: HashSet::new(),
+ table: Table::new(table_config),
+ table_context,
+ issued_statements: HashSet::new(),
+ awaiting_validation: HashSet::new(),
+ fallbacks: HashMap::new(),
+ }))
+}
+
+enum SecondingAllowed {
+ No,
+ Yes(Vec<(Hash, Vec)>),
+}
+
+/// Checks whether a candidate can be seconded based on its hypothetical
+/// depths in the fragment tree and what we've already seconded in all
+/// active leaves.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn seconding_sanity_check(
+ ctx: &mut Context,
+ active_leaves: &HashMap,
+ candidate_hash: CandidateHash,
+ candidate_para: ParaId,
+ parent_head_data_hash: Hash,
+ head_data_hash: Hash,
+ candidate_relay_parent: Hash,
+) -> SecondingAllowed {
+ // Note that `GetHypotheticalDepths` doesn't account for recursion,
+ // i.e. candidates can appear at multiple depths in the tree and in fact
+ // at all depths, and we don't know what depths a candidate will ultimately occupy
+ // because that's dependent on other candidates we haven't yet received.
+ //
+ // The only way to effectively rule this out is to have candidate receipts
+ // directly commit to the parachain block number or some other incrementing
+ // counter. That requires a major primitives format upgrade, so for now
+ // we just rule out trivial cycles.
+ if parent_head_data_hash == head_data_hash {
+ return SecondingAllowed::No
+ }
+
+ let mut membership = Vec::new();
+ let mut responses = FuturesOrdered::new();
+ for (head, leaf_state) in active_leaves {
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth(
+ HypotheticalDepthRequest {
candidate_hash,
- statement.validator_index(),
- )
- };
+ candidate_para,
+ parent_head_data_hash,
+ candidate_relay_parent,
+ fragment_tree_relay_parent: *head,
+ },
+ tx,
+ ))
+ .await;
+ responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)));
+ }
- if let Err(ValidatorIndexOutOfBounds) = self
- .dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement)
- .await
- {
- gum::warn!(
- target: LOG_TARGET,
- session_index = ?self.session_index,
- relay_parent = ?self.parent,
- validator_index = statement.validator_index().0,
- "Supposedly 'Signed' statement has validator index out of bounds."
- );
+ for response in responses.next().await {
+ match response {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Failed to reach prospective parachains subsystem for hypothetical depths",
+ );
+
+ return SecondingAllowed::No
+ },
+ Ok((depths, head, leaf_state)) => {
+ for depth in &depths {
+ if leaf_state.seconded_at_depth.contains_key(&depth) {
+ gum::debug!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ depth,
+ leaf_hash = ?head,
+ "Refusing to second candidate at depth - already occupied."
+ );
+
+ return SecondingAllowed::No
+ }
+ }
- return Ok(None)
+ membership.push((*head, depths));
+ },
}
+ }
- let stmt = primitive_statement_to_table(statement);
+ // At this point we've checked the depths of the candidate against all active
+ // leaves.
+ SecondingAllowed::Yes(membership)
+}
- let summary = self.table.import_statement(&self.table_context, stmt);
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_validated_candidate_command(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ command: ValidatedCandidateCommand,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ match state.per_relay_parent.get_mut(&relay_parent) {
+ Some(rp_state) => {
+ let candidate_hash = command.candidate_hash();
+ rp_state.awaiting_validation.remove(&candidate_hash);
+
+ match command {
+ ValidatedCandidateCommand::Second(res) => match res {
+ Ok(outputs) => {
+ let BackgroundValidationOutputs {
+ candidate,
+ commitments,
+ persisted_validation_data,
+ } = outputs;
+
+ if rp_state.issued_statements.contains(&candidate_hash) {
+ return Ok(())
+ }
- let unbacked_span = if let Some(attested) = summary
- .as_ref()
- .and_then(|s| self.table.attested_candidate(&s.candidate, &self.table_context))
- {
- let candidate_hash = attested.candidate.hash();
- // `HashSet::insert` returns true if the thing wasn't in there already.
- if self.backed.insert(candidate_hash) {
- let span = self.remove_unbacked_span(&candidate_hash);
+ // sanity check that we're allowed to second the candidate
+ // and that it doesn't conflict with other candidates we've
+ // seconded.
+ let fragment_tree_membership = match seconding_sanity_check(
+ ctx,
+ &state.per_leaf,
+ candidate_hash,
+ candidate.descriptor().para_id,
+ persisted_validation_data.parent_head.hash(),
+ candidate.descriptor().relay_parent,
+ commitments.head_data.hash(),
+ )
+ .await
+ {
+ SecondingAllowed::No => return Ok(()),
+ SecondingAllowed::Yes(membership) => membership,
+ };
- if let Some(backed) = table_attested_to_backed(attested, &self.table_context) {
- gum::debug!(
- target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- relay_parent = ?self.parent,
- para_id = %backed.candidate.descriptor.para_id,
- "Candidate backed",
- );
+ let statement = StatementWithPVD::Seconded(
+ CommittedCandidateReceipt {
+ descriptor: candidate.descriptor.clone(),
+ commitments,
+ },
+ persisted_validation_data,
+ );
- // The provisioner waits on candidate-backing, which means
- // that we need to send unbounded messages to avoid cycles.
- //
- // Backed candidates are bounded by the number of validators,
- // parachains, and the block production rate of the relay chain.
- let message = ProvisionerMessage::ProvisionableData(
- self.parent,
- ProvisionableData::BackedCandidate(backed.receipt()),
- );
- ctx.send_unbounded_message(message);
+ // If we get an Error::RejectedByProspectiveParachains,
+ // then the statement has not been distributed or imported into
+ // the table.
+ let res = sign_import_and_distribute_statement(
+ ctx,
+ rp_state,
+ &mut state.per_candidate,
+ statement,
+ state.keystore.clone(),
+ metrics,
+ )
+ .await;
+
+ if let Err(Error::RejectedByProspectiveParachains) = res {
+ let candidate_hash = candidate.hash();
+ gum::debug!(
+ target: LOG_TARGET,
+ relay_parent = ?candidate.descriptor().relay_parent,
+ ?candidate_hash,
+ "Attempted to second candidate but was rejected by prospective parachains",
+ );
+
+ // Ensure the collator is reported.
+ ctx.send_message(CollatorProtocolMessage::Invalid(
+ candidate.descriptor().relay_parent,
+ candidate,
+ ))
+ .await;
- span.as_ref().map(|s| s.child("backed"));
- span
- } else {
- None
- }
- } else {
- None
- }
- } else {
- None
- };
+ return Ok(())
+ }
- self.issue_new_misbehaviors(ctx.sender());
+ if let Some(stmt) = res? {
+ match state.per_candidate.get_mut(&candidate_hash) {
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ "Missing `per_candidate` for seconded candidate.",
+ );
+ },
+ Some(p) => p.seconded_locally = true,
+ }
+
+ // update seconded depths in active leaves.
+ for (leaf, depths) in fragment_tree_membership {
+ let leaf_data = match state.per_leaf.get_mut(&leaf) {
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ leaf_hash = ?leaf,
+ "Missing `per_leaf` for known active leaf."
+ );
+
+ continue
+ },
+ Some(d) => d,
+ };
+
+ for depth in depths {
+ leaf_data.seconded_at_depth.insert(depth, candidate_hash);
+ }
+ }
- // It is important that the child span is dropped before its parent span (`unbacked_span`)
- drop(import_statement_span);
- drop(unbacked_span);
+ rp_state.issued_statements.insert(candidate_hash);
- Ok(summary)
+ metrics.on_candidate_seconded();
+ ctx.send_message(CollatorProtocolMessage::Seconded(
+ rp_state.parent,
+ StatementWithPVD::drop_pvd_from_signed(stmt),
+ ))
+ .await;
+ }
+ },
+ Err(candidate) => {
+ ctx.send_message(CollatorProtocolMessage::Invalid(
+ rp_state.parent,
+ candidate,
+ ))
+ .await;
+ },
+ },
+ ValidatedCandidateCommand::Attest(res) => {
+ // We are done - avoid new validation spawns:
+ rp_state.fallbacks.remove(&candidate_hash);
+ // sanity check.
+ if !rp_state.issued_statements.contains(&candidate_hash) {
+ if res.is_ok() {
+ let statement = StatementWithPVD::Valid(candidate_hash);
+
+ sign_import_and_distribute_statement(
+ ctx,
+ rp_state,
+ &mut state.per_candidate,
+ statement,
+ state.keystore.clone(),
+ metrics,
+ )
+ .await?;
+ }
+ rp_state.issued_statements.insert(candidate_hash);
+ }
+ },
+ ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => {
+ if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) {
+ if let Some(index) = attesting.backing.pop() {
+ attesting.from_validator = index;
+ let attesting = attesting.clone();
+
+ // The candidate state should be available because we've
+ // validated it before, the relay-parent is still around,
+ // and candidates are pruned on the basis of relay-parents.
+ //
+ // If it's not, then no point in validating it anyway.
+ if let Some(pvd) = state
+ .per_candidate
+ .get(&candidate_hash)
+ .map(|pc| pc.persisted_validation_data.clone())
+ {
+ kick_off_validation_work(
+ ctx,
+ rp_state,
+ pvd,
+ &state.background_validation_tx,
+ attesting,
+ )
+ .await?;
+ }
+ }
+ } else {
+ gum::warn!(
+ target: LOG_TARGET,
+ "AttestNoPoV was triggered without fallback being available."
+ );
+ debug_assert!(false);
+ }
+ },
+ }
+ },
+ None => {
+ // simple race condition; can be ignored = this relay-parent
+ // is no longer relevant.
+ },
}
- /// The dispute coordinator keeps track of all statements by validators about every recent
- /// candidate.
- ///
- /// When importing a statement, this should be called access the candidate receipt either
- /// from the statement itself or from the underlying statement table in order to craft
- /// and dispatch the notification to the dispute coordinator.
- ///
- /// This also does bounds-checking on the validator index and will return an error if the
- /// validator index is out of bounds for the current validator set. It's expected that
- /// this should never happen due to the interface of the candidate backing subsystem -
- /// the networking component responsible for feeding statements to the backing subsystem
- /// is meant to check the signature and provenance of all statements before submission.
- async fn dispatch_new_statement_to_dispute_coordinator(
- &self,
- sender: &mut impl overseer::CandidateBackingSenderTrait,
- candidate_hash: CandidateHash,
- statement: &SignedFullStatement,
- ) -> Result<(), ValidatorIndexOutOfBounds> {
- // Dispatch the statement to the dispute coordinator.
- let validator_index = statement.validator_index();
- let signing_context =
- SigningContext { parent_hash: self.parent, session_index: self.session_index };
-
- let validator_public = match self.table_context.validators.get(validator_index.0 as usize) {
- None => return Err(ValidatorIndexOutOfBounds),
- Some(v) => v,
- };
+ Ok(())
+}
- let maybe_candidate_receipt = match statement.payload() {
- Statement::Seconded(receipt) => Some(receipt.to_plain()),
- Statement::Valid(candidate_hash) => {
- // Valid statements are only supposed to be imported
- // once we've seen at least one `Seconded` statement.
- self.table.get_candidate(&candidate_hash).map(|c| c.to_plain())
- },
- };
+async fn sign_statement(
+ rp_state: &PerRelayParentState,
+ statement: StatementWithPVD,
+ keystore: SyncCryptoStorePtr,
+ metrics: &Metrics,
+) -> Option {
+ let signed = rp_state
+ .table_context
+ .validator
+ .as_ref()?
+ .sign(keystore, statement)
+ .await
+ .ok()
+ .flatten()?;
+ metrics.on_statement_signed();
+ Some(signed)
+}
- let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement(
- statement.as_unchecked(),
- signing_context,
- validator_public.clone(),
- )
- .ok();
+/// The dispute coordinator keeps track of all statements by validators about every recent
+/// candidate.
+///
+/// When importing a statement, this should be called access the candidate receipt either
+/// from the statement itself or from the underlying statement table in order to craft
+/// and dispatch the notification to the dispute coordinator.
+///
+/// This also does bounds-checking on the validator index and will return an error if the
+/// validator index is out of bounds for the current validator set. It's expected that
+/// this should never happen due to the interface of the candidate backing subsystem -
+/// the networking component responsible for feeding statements to the backing subsystem
+/// is meant to check the signature and provenance of all statements before submission.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn dispatch_new_statement_to_dispute_coordinator(
+ ctx: &mut Context,
+ rp_state: &PerRelayParentState,
+ candidate_hash: CandidateHash,
+ statement: &SignedFullStatementWithPVD,
+) -> Result<(), ValidatorIndexOutOfBounds> {
+ // Dispatch the statement to the dispute coordinator.
+ let validator_index = statement.validator_index();
+ let signing_context =
+ SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index };
+
+ let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) {
+ None => return Err(ValidatorIndexOutOfBounds),
+ Some(v) => v,
+ };
- if let (Some(candidate_receipt), Some(dispute_statement)) =
- (maybe_candidate_receipt, maybe_signed_dispute_statement)
- {
- sender
- .send_message(DisputeCoordinatorMessage::ImportStatements {
- candidate_hash,
- candidate_receipt,
- session: self.session_index,
- statements: vec![(dispute_statement, validator_index)],
- pending_confirmation: None,
- })
- .await;
- }
+ let maybe_candidate_receipt = match statement.payload() {
+ StatementWithPVD::Seconded(receipt, _) => Some(receipt.to_plain()),
+ StatementWithPVD::Valid(candidate_hash) => {
+ // Valid statements are only supposed to be imported
+ // once we've seen at least one `Seconded` statement.
+ rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain())
+ },
+ };
- Ok(())
+ let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement(
+ statement.as_unchecked(),
+ signing_context,
+ validator_public.clone(),
+ )
+ .ok();
+
+ if let (Some(candidate_receipt), Some(dispute_statement)) =
+ (maybe_candidate_receipt, maybe_signed_dispute_statement)
+ {
+ ctx.send_message(DisputeCoordinatorMessage::ImportStatements {
+ candidate_hash,
+ candidate_receipt,
+ session: rp_state.session_index,
+ statements: vec![(dispute_statement, validator_index)],
+ pending_confirmation: None,
+ })
+ .await;
}
- async fn handle_second_msg(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- candidate: CandidateReceipt,
- pov: PoV,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_process_second();
-
- let candidate_hash = candidate.hash();
- let span = root_span
- .child("second")
- .with_stage(jaeger::Stage::CandidateBacking)
- .with_pov(&pov)
- .with_candidate(candidate_hash)
- .with_relay_parent(self.parent);
-
- // Sanity check that candidate is from our assignment.
- if Some(candidate.descriptor().para_id) != self.assignment {
- gum::debug!(
- target: LOG_TARGET,
- our_assignment = ?self.assignment,
- collation = ?candidate.descriptor().para_id,
- "Subsystem asked to second for para outside of our assignment",
+ Ok(())
+}
+
+/// Import a statement into the statement table and return the summary of the import.
+///
+/// This will fail with `Error::RejectedByProspectiveParachains` if the message type
+/// is seconded, the candidate is fresh,
+/// and any of the following are true:
+/// 1. There is no `PersistedValidationData` attached.
+/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains
+/// subsystem returned an empty `FragmentTreeMembership`
+/// i.e. did not recognize the candidate as being applicable to any of the active leaves.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn import_statement(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ per_candidate: &mut HashMap,
+ statement: &SignedFullStatementWithPVD,
+) -> Result, Error> {
+ gum::debug!(
+ target: LOG_TARGET,
+ statement = ?statement.payload().to_compact(),
+ validator_index = statement.validator_index().0,
+ "Importing statement",
+ );
+
+ let candidate_hash = statement.payload().candidate_hash();
+
+ // If this is a new candidate (statement is 'seconded' and candidate is unknown),
+ // we need to create an entry in the `PerCandidateState` map.
+ //
+ // If the relay parent supports prospective parachains, we also need
+ // to inform the prospective parachains subsystem of the seconded candidate
+ // If `ProspectiveParachainsMessage::Second` fails, then we return
+ // Error::RejectedByProspectiveParachains.
+ //
+ // Persisted Validation Data should be available - it may already be available
+ // if this is a candidate we are seconding.
+ //
+ // We should also not accept any candidates which have no valid depths under any of
+ // our active leaves.
+ if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() {
+ if !per_candidate.contains_key(&candidate_hash) {
+ per_candidate.insert(
+ candidate_hash,
+ PerCandidateState {
+ persisted_validation_data: pvd.clone(),
+ // This is set after importing when seconding locally.
+ seconded_locally: false,
+ para_id: candidate.descriptor().para_id,
+ relay_parent: candidate.descriptor().relay_parent,
+ },
);
- return Ok(())
- }
+ if rp_state.prospective_parachains_mode.is_enabled() {
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded(
+ candidate.descriptor().para_id,
+ candidate.clone(),
+ pvd.clone(),
+ tx,
+ ))
+ .await;
- // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a
- // Seconded statement only if we have not seconded any other candidate and
- // have not signed a Valid statement for the requested candidate.
- if self.seconded.is_none() {
- // This job has not seconded a candidate yet.
+ match rx.await {
+ Err(oneshot::Canceled) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Could not reach the Prospective Parachains subsystem."
+ );
- if !self.issued_statements.contains(&candidate_hash) {
- let pov = Arc::new(pov);
- self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?;
+ return Err(Error::RejectedByProspectiveParachains)
+ },
+ Ok(membership) =>
+ if membership.is_empty() {
+ return Err(Error::RejectedByProspectiveParachains)
+ },
+ }
}
}
+ }
- Ok(())
+ if let Err(ValidatorIndexOutOfBounds) =
+ dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, statement)
+ .await
+ {
+ gum::warn!(
+ target: LOG_TARGET,
+ session_index = ?rp_state.session_index,
+ relay_parent = ?rp_state.parent,
+ validator_index = statement.validator_index().0,
+ "Supposedly 'Signed' statement has validator index out of bounds."
+ );
+
+ return Ok(None)
}
- async fn handle_statement_message(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- statement: SignedFullStatement,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_process_statement();
- let _span = root_span
- .child("statement")
- .with_stage(jaeger::Stage::CandidateBacking)
- .with_candidate(statement.payload().candidate_hash())
- .with_relay_parent(self.parent);
-
- match self.maybe_validate_and_import(&root_span, ctx, statement).await {
- Err(Error::ValidationFailed(_)) => Ok(()),
- Err(e) => Err(e),
- Ok(()) => Ok(()),
+ let stmt = primitive_statement_to_table(statement);
+
+ let summary = rp_state.table.import_statement(&rp_state.table_context, stmt);
+
+ if let Some(attested) = summary
+ .as_ref()
+ .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context))
+ {
+ // `HashSet::insert` returns true if the thing wasn't in there already.
+ if rp_state.backed.insert(candidate_hash) {
+ if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) {
+ let para_id = backed.candidate.descriptor.para_id;
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ relay_parent = ?rp_state.parent,
+ %para_id,
+ "Candidate backed",
+ );
+
+ // Inform the prospective parachains subsystem
+ // that the candidate is now backed.
+ if rp_state.prospective_parachains_mode.is_enabled() {
+ ctx.send_message(ProspectiveParachainsMessage::CandidateBacked(
+ para_id,
+ candidate_hash,
+ ))
+ .await;
+ }
+
+ // The provisioner waits on candidate-backing, which means
+ // that we need to send unbounded messages to avoid cycles.
+ //
+ // Backed candidates are bounded by the number of validators,
+ // parachains, and the block production rate of the relay chain.
+ let message = ProvisionerMessage::ProvisionableData(
+ rp_state.parent,
+ ProvisionableData::BackedCandidate(backed.receipt()),
+ );
+ ctx.send_unbounded_message(message);
+ }
}
}
- fn handle_get_backed_candidates_message(
- &mut self,
- requested_candidates: Vec,
- tx: oneshot::Sender>,
- ) -> Result<(), Error> {
- let _timer = self.metrics.time_get_backed_candidates();
+ issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table);
- let backed = requested_candidates
- .into_iter()
- .filter_map(|hash| {
- self.table
- .attested_candidate(&hash, &self.table_context)
- .and_then(|attested| table_attested_to_backed(attested, &self.table_context))
- })
- .collect();
-
- tx.send(backed).map_err(|data| Error::Send(data))?;
- Ok(())
+ Ok(summary)
+}
+
+/// Check if there have happened any new misbehaviors and issue necessary messages.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+fn issue_new_misbehaviors(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ table: &mut Table,
+) {
+ // collect the misbehaviors to avoid double mutable self borrow issues
+ let misbehaviors: Vec<_> = table.drain_misbehaviors().collect();
+ for (validator_id, report) in misbehaviors {
+ // The provisioner waits on candidate-backing, which means
+ // that we need to send unbounded messages to avoid cycles.
+ //
+ // Misbehaviors are bounded by the number of validators and
+ // the block production protocol.
+ ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData(
+ relay_parent,
+ ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report),
+ ));
+ }
+}
+
+/// Sign, import, and distribute a statement.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn sign_import_and_distribute_statement(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ per_candidate: &mut HashMap,
+ statement: StatementWithPVD,
+ keystore: SyncCryptoStorePtr,
+ metrics: &Metrics,
+) -> Result, Error> {
+ if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await {
+ import_statement(ctx, rp_state, per_candidate, &signed_statement).await?;
+
+ let smsg = StatementDistributionMessage::Share(
+ rp_state.parent,
+ StatementWithPVD::drop_pvd_from_signed(signed_statement.clone()),
+ );
+ ctx.send_unbounded_message(smsg);
+
+ Ok(Some(signed_statement))
+ } else {
+ Ok(None)
+ }
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn background_validate_and_make_available(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ params: BackgroundValidationParams<
+ impl overseer::CandidateBackingSenderTrait,
+ impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync,
+ >,
+) -> Result<(), Error> {
+ let candidate_hash = params.candidate.hash();
+ if rp_state.awaiting_validation.insert(candidate_hash) {
+ // spawn background task.
+ let bg = async move {
+ if let Err(e) = validate_and_make_available(params).await {
+ if let Error::BackgroundValidationMpsc(error) = e {
+ gum::debug!(
+ target: LOG_TARGET,
+ ?error,
+ "Mpsc background validation mpsc died during validation- leaf no longer active?"
+ );
+ } else {
+ gum::error!(
+ target: LOG_TARGET,
+ "Failed to validate and make available: {:?}",
+ e
+ );
+ }
+ }
+ };
+
+ ctx.spawn("backing-validation", bg.boxed())
+ .map_err(|_| Error::FailedToSpawnBackgroundTask)?;
+ }
+
+ Ok(())
+}
+
+/// Kick off validation work and distribute the result as a signed statement.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn kick_off_validation_work(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ persisted_validation_data: PersistedValidationData,
+ background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+ attesting: AttestingData,
+) -> Result<(), Error> {
+ let candidate_hash = attesting.candidate.hash();
+ if rp_state.issued_statements.contains(&candidate_hash) {
+ return Ok(())
}
- /// Kick off validation work and distribute the result as a signed statement.
- async fn kick_off_validation_work(
- &mut self,
- ctx: &mut Context,
- attesting: AttestingData,
- span: Option,
- ) -> Result<(), Error> {
- let candidate_hash = attesting.candidate.hash();
- if self.issued_statements.contains(&candidate_hash) {
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ candidate_receipt = ?attesting.candidate,
+ "Kicking off validation",
+ );
+
+ let bg_sender = ctx.sender().clone();
+ let pov = PoVData::FetchFromValidator {
+ from_validator: attesting.from_validator,
+ candidate_hash,
+ pov_hash: attesting.pov_hash,
+ };
+
+ background_validate_and_make_available(
+ ctx,
+ rp_state,
+ BackgroundValidationParams {
+ sender: bg_sender,
+ tx_command: background_validation_tx.clone(),
+ candidate: attesting.candidate,
+ relay_parent: rp_state.parent,
+ persisted_validation_data,
+ pov,
+ n_validators: rp_state.table_context.validators.len(),
+ make_command: ValidatedCandidateCommand::Attest,
+ },
+ )
+ .await
+}
+
+/// Import the statement and kick off validation work if it is a part of our assignment.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn maybe_validate_and_import(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ statement: SignedFullStatementWithPVD,
+) -> Result<(), Error> {
+ let rp_state = match state.per_relay_parent.get_mut(&relay_parent) {
+ Some(r) => r,
+ None => {
+ gum::trace!(
+ target: LOG_TARGET,
+ ?relay_parent,
+ "Received statement for unknown relay-parent"
+ );
+
return Ok(())
- }
+ },
+ };
- let descriptor = attesting.candidate.descriptor().clone();
+ let res = import_statement(ctx, rp_state, &mut state.per_candidate, &statement).await;
+ // if we get an Error::RejectedByProspectiveParachains,
+ // we will do nothing.
+ if let Err(Error::RejectedByProspectiveParachains) = res {
gum::debug!(
target: LOG_TARGET,
- candidate_hash = ?candidate_hash,
- candidate_receipt = ?attesting.candidate,
- "Kicking off validation",
+ ?relay_parent,
+ "Statement rejected by prospective parachains."
);
- // Check that candidate is collated by the right collator.
- if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) {
- // If not, we've got the statement in the table but we will
- // not issue validation work for it.
- //
- // Act as though we've issued a statement.
- self.issued_statements.insert(candidate_hash);
+ return Ok(())
+ }
+
+ if let Some(summary) = res? {
+ // import_statement already takes care of communicating with the
+ // prospective parachains subsystem. At this point, the candidate
+ // has already been accepted into the fragment trees.
+
+ let candidate_hash = summary.candidate;
+
+ if Some(summary.group_id) != rp_state.assignment {
return Ok(())
}
+ let attesting = match statement.payload() {
+ StatementWithPVD::Seconded(receipt, _) => {
+ let attesting = AttestingData {
+ candidate: rp_state
+ .table
+ .get_candidate(&candidate_hash)
+ .ok_or(Error::CandidateNotFound)?
+ .to_plain(),
+ pov_hash: receipt.descriptor.pov_hash,
+ from_validator: statement.validator_index(),
+ backing: Vec::new(),
+ };
+ rp_state.fallbacks.insert(summary.candidate, attesting.clone());
+ attesting
+ },
+ StatementWithPVD::Valid(candidate_hash) => {
+ if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) {
+ let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index());
+ if our_index == Some(statement.validator_index()) {
+ return Ok(())
+ }
- let bg_sender = ctx.sender().clone();
- let pov = PoVData::FetchFromValidator {
- from_validator: attesting.from_validator,
- candidate_hash,
- pov_hash: attesting.pov_hash,
- };
- self.background_validate_and_make_available(
- ctx,
- BackgroundValidationParams {
- sender: bg_sender,
- tx_command: self.background_validation_tx.clone(),
- candidate: attesting.candidate,
- relay_parent: self.parent,
- pov,
- n_validators: self.table_context.validators.len(),
- span,
- make_command: ValidatedCandidateCommand::Attest,
+ if rp_state.awaiting_validation.contains(candidate_hash) {
+ // Job already running:
+ attesting.backing.push(statement.validator_index());
+ return Ok(())
+ } else {
+ // No job, so start another with current validator:
+ attesting.from_validator = statement.validator_index();
+ attesting.clone()
+ }
+ } else {
+ return Ok(())
+ }
},
- )
- .await
+ };
+
+ // After `import_statement` succeeds, the candidate entry is guaranteed
+ // to exist.
+ if let Some(pvd) = state
+ .per_candidate
+ .get(&candidate_hash)
+ .map(|pc| pc.persisted_validation_data.clone())
+ {
+ kick_off_validation_work(
+ ctx,
+ rp_state,
+ pvd,
+ &state.background_validation_tx,
+ attesting,
+ )
+ .await?;
+ }
}
+ Ok(())
+}
- /// Import the statement and kick off validation work if it is a part of our assignment.
- async fn maybe_validate_and_import(
- &mut self,
- root_span: &jaeger::Span,
- ctx: &mut Context,
- statement: SignedFullStatement,
- ) -> Result<(), Error> {
- if let Some(summary) = self.import_statement(ctx, &statement, root_span).await? {
- if Some(summary.group_id) != self.assignment {
- return Ok(())
- }
- let (attesting, span) = match statement.payload() {
- Statement::Seconded(receipt) => {
- let candidate_hash = summary.candidate;
-
- let span = self.get_unbacked_validation_child(
- root_span,
- summary.candidate,
- summary.group_id,
- );
+/// Kick off background validation with intent to second.
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn validate_and_second(
+ ctx: &mut Context,
+ rp_state: &mut PerRelayParentState,
+ persisted_validation_data: PersistedValidationData,
+ candidate: &CandidateReceipt,
+ pov: Arc,
+ background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
+) -> Result<(), Error> {
+ let candidate_hash = candidate.hash();
+
+ gum::debug!(
+ target: LOG_TARGET,
+ candidate_hash = ?candidate_hash,
+ candidate_receipt = ?candidate,
+ "Validate and second candidate",
+ );
+
+ let bg_sender = ctx.sender().clone();
+ background_validate_and_make_available(
+ ctx,
+ rp_state,
+ BackgroundValidationParams {
+ sender: bg_sender,
+ tx_command: background_validation_tx.clone(),
+ candidate: candidate.clone(),
+ relay_parent: rp_state.parent,
+ persisted_validation_data,
+ pov: PoVData::Ready(pov),
+ n_validators: rp_state.table_context.validators.len(),
+ make_command: ValidatedCandidateCommand::Second,
+ },
+ )
+ .await?;
- let attesting = AttestingData {
- candidate: self
- .table
- .get_candidate(&candidate_hash)
- .ok_or(Error::CandidateNotFound)?
- .to_plain(),
- pov_hash: receipt.descriptor.pov_hash,
- from_validator: statement.validator_index(),
- backing: Vec::new(),
- };
- let child = span.as_ref().map(|s| s.child("try"));
- self.fallbacks.insert(summary.candidate, (attesting.clone(), span));
- (attesting, child)
- },
- Statement::Valid(candidate_hash) => {
- if let Some((attesting, span)) = self.fallbacks.get_mut(candidate_hash) {
- let our_index = self.table_context.validator.as_ref().map(|v| v.index());
- if our_index == Some(statement.validator_index()) {
- return Ok(())
- }
+ Ok(())
+}
- if self.awaiting_validation.contains(candidate_hash) {
- // Job already running:
- attesting.backing.push(statement.validator_index());
- return Ok(())
- } else {
- // No job, so start another with current validator:
- attesting.from_validator = statement.validator_index();
- (attesting.clone(), span.as_ref().map(|s| s.child("try")))
- }
- } else {
- return Ok(())
- }
- },
- };
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_second_message(
+ ctx: &mut Context,
+ state: &mut State,
+ candidate: CandidateReceipt,
+ persisted_validation_data: PersistedValidationData,
+ pov: PoV,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_process_second();
- self.kick_off_validation_work(ctx, attesting, span).await?;
- }
- Ok(())
+ let candidate_hash = candidate.hash();
+ let relay_parent = candidate.descriptor().relay_parent;
+
+ if candidate.descriptor().persisted_validation_data_hash != persisted_validation_data.hash() {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?candidate_hash,
+ "Candidate backing was asked to second candidate with wrong PVD",
+ );
+
+ return Ok(())
}
- async fn sign_statement(&mut self, statement: Statement) -> Option {
- let signed = self
- .table_context
- .validator
- .as_ref()?
- .sign(self.keystore.clone(), statement)
- .await
- .ok()
- .flatten()?;
- self.metrics.on_statement_signed();
- Some(signed)
+ let rp_state = match state.per_relay_parent.get_mut(&relay_parent) {
+ None => {
+ gum::trace!(
+ target: LOG_TARGET,
+ ?relay_parent,
+ ?candidate_hash,
+ "We were asked to second a candidate outside of our view."
+ );
+
+ return Ok(())
+ },
+ Some(r) => r,
+ };
+
+ // Sanity check that candidate is from our assignment.
+ if Some(candidate.descriptor().para_id) != rp_state.assignment {
+ gum::debug!(
+ target: LOG_TARGET,
+ our_assignment = ?rp_state.assignment,
+ collation = ?candidate.descriptor().para_id,
+ "Subsystem asked to second for para outside of our assignment",
+ );
+
+ return Ok(())
}
- /// Insert or get the unbacked-span for the given candidate hash.
- fn insert_or_get_unbacked_span(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- para_id: Option,
- ) -> Option<&jaeger::Span> {
- if !self.backed.contains(&hash) {
- // only add if we don't consider this backed.
- let span = self.unbacked_candidates.entry(hash).or_insert_with(|| {
- let s = parent_span.child("unbacked-candidate").with_candidate(hash);
- if let Some(para_id) = para_id {
- s.with_para_id(para_id)
- } else {
- s
- }
- });
- Some(span)
- } else {
- None
- }
+ // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a
+ // Seconded statement only if we have not signed a Valid statement for the requested candidate.
+ //
+ // The actual logic of issuing the signed statement checks that this isn't
+ // conflicting with other seconded candidates. Not doing that check here
+ // gives other subsystems the ability to get us to execute arbitrary candidates,
+ // but no more.
+ if !rp_state.issued_statements.contains(&candidate_hash) {
+ let pov = Arc::new(pov);
+
+ validate_and_second(
+ ctx,
+ rp_state,
+ persisted_validation_data,
+ &candidate,
+ pov,
+ &state.background_validation_tx,
+ )
+ .await?;
}
- fn get_unbacked_validation_child(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- para_id: ParaId,
- ) -> Option {
- self.insert_or_get_unbacked_span(parent_span, hash, Some(para_id)).map(|span| {
- span.child("validation")
- .with_candidate(hash)
- .with_stage(Stage::CandidateBacking)
- })
+ Ok(())
+}
+
+#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
+async fn handle_statement_message(
+ ctx: &mut Context,
+ state: &mut State,
+ relay_parent: Hash,
+ statement: SignedFullStatementWithPVD,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_process_statement();
+
+ match maybe_validate_and_import(ctx, state, relay_parent, statement).await {
+ Err(Error::ValidationFailed(_)) => Ok(()),
+ Err(e) => Err(e),
+ Ok(()) => Ok(()),
}
+}
- fn get_unbacked_statement_child(
- &mut self,
- parent_span: &jaeger::Span,
- hash: CandidateHash,
- validator: ValidatorIndex,
- ) -> Option {
- self.insert_or_get_unbacked_span(parent_span, hash, None).map(|span| {
- span.child("import-statement")
- .with_candidate(hash)
- .with_validator_index(validator)
+fn handle_get_backed_candidates_message(
+ rp_state: &PerRelayParentState,
+ requested_candidates: Vec,
+ tx: oneshot::Sender>,
+ metrics: &Metrics,
+) -> Result<(), Error> {
+ let _timer = metrics.time_get_backed_candidates();
+
+ let backed = requested_candidates
+ .into_iter()
+ .filter_map(|hash| {
+ rp_state
+ .table
+ .attested_candidate(&hash, &rp_state.table_context)
+ .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context))
})
- }
+ .collect();
- fn remove_unbacked_span(&mut self, hash: &CandidateHash) -> Option {
- self.unbacked_candidates.remove(hash)
- }
+ tx.send(backed).map_err(|data| Error::Send(data))?;
+ Ok(())
}
diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs
index 9972b60490a1..ab9d678f77b0 100644
--- a/node/core/prospective-parachains/src/fragment_tree.rs
+++ b/node/core/prospective-parachains/src/fragment_tree.rs
@@ -62,7 +62,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{
ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo,
};
use polkadot_primitives::vstaging::{
- BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId,
+ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId,
PersistedValidationData,
};
@@ -158,6 +158,17 @@ impl CandidateStorage {
})
}
+ /// Get head-data by hash.
+ pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> {
+ // Get some candidate which has a parent-head with the same hash as requested.
+ let a_candidate_hash = self.by_parent_head.get(hash).and_then(|m| m.iter().next())?;
+
+ // Extract the full parent head from that candidate's `PersistedValidationData`.
+ self.by_candidate_hash
+ .get(a_candidate_hash)
+ .map(|e| &e.candidate.persisted_validation_data.parent_head)
+ }
+
fn iter_para_children<'a>(
&'a self,
parent_head_hash: &Hash,
@@ -271,13 +282,19 @@ impl Scope {
.unwrap_or_else(|| self.relay_parent.clone())
}
- fn ancestor_by_hash(&self, hash: &Hash) -> Option {
+ /// Get the ancestor of the fragment tree by hash.
+ pub fn ancestor_by_hash(&self, hash: &Hash) -> Option {
if hash == &self.relay_parent.hash {
return Some(self.relay_parent.clone())
}
self.ancestors_by_hash.get(hash).map(|info| info.clone())
}
+
+ /// Get the base constraints of the scope
+ pub fn base_constraints(&self) -> &Constraints {
+ &self.base_constraints
+ }
}
// We use indices into a flat vector to refer to nodes in the tree.
diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs
index 963c99e0a743..ef629d4c5a79 100644
--- a/node/core/prospective-parachains/src/lib.rs
+++ b/node/core/prospective-parachains/src/lib.rs
@@ -34,7 +34,8 @@ use futures::{channel::oneshot, prelude::*};
use polkadot_node_subsystem::{
messages::{
ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest,
- ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest,
+ ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage,
+ RuntimeApiRequest,
},
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
};
@@ -137,8 +138,10 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()
answer_hypothetical_depths_request(&view, request, tx),
ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) =>
answer_tree_membership_request(&view, para, candidate, tx),
- ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) =>
- answer_minimum_relay_parent_request(&view, para, relay_parent, tx),
+ ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) =>
+ answer_minimum_relay_parents_request(&view, relay_parent, tx),
+ ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) =>
+ answer_prospective_validation_data_request(&view, request, tx),
},
}
}
@@ -160,6 +163,9 @@ async fn handle_active_leaves_update(
}
for activated in update.activated.into_iter() {
+ // TODO [now]: skip leaves which don't have prospective parachains
+ // enabled. This should be a runtime API version check.
+
let hash = activated.hash;
let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?;
@@ -461,19 +467,75 @@ fn answer_tree_membership_request(
let _ = tx.send(membership);
}
-fn answer_minimum_relay_parent_request(
+fn answer_minimum_relay_parents_request(
view: &View,
- para: ParaId,
relay_parent: Hash,
- tx: oneshot::Sender>,
+ tx: oneshot::Sender>,
+) {
+ let mut v = Vec::new();
+ if let Some(leaf_data) = view.active_leaves.get(&relay_parent) {
+ for (para_id, fragment_tree) in &leaf_data.fragment_trees {
+ v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number));
+ }
+ }
+
+ let _ = tx.send(v);
+}
+
+fn answer_prospective_validation_data_request(
+ view: &View,
+ request: ProspectiveValidationDataRequest,
+ tx: oneshot::Sender>,
) {
- let res = view
+ // 1. Try to get the head-data from the candidate store if known.
+ // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by
+ // iterating fragment trees.
+ // 3. Otherwise, it is unknown.
+ // 4. Also try to find the relay parent block info by scanning
+ // fragment trees.
+ // 5. If head data and relay parent block info are found - success. Otherwise, failure.
+
+ let storage = match view.candidate_storage.get(&request.para_id) {
+ None => {
+ let _ = tx.send(None);
+ return
+ },
+ Some(s) => s,
+ };
+
+ let mut head_data =
+ storage.head_data_by_hash(&request.parent_head_data_hash).map(|x| x.clone());
+ let mut relay_parent_info = None;
+
+ for fragment_tree in view
.active_leaves
- .get(&relay_parent)
- .and_then(|data| data.fragment_trees.get(¶))
- .map(|tree| tree.scope().earliest_relay_parent().number);
+ .values()
+ .filter_map(|x| x.fragment_trees.get(&request.para_id))
+ {
+ if head_data.is_some() && relay_parent_info.is_some() {
+ break
+ }
+ if relay_parent_info.is_none() {
+ relay_parent_info =
+ fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent);
+ }
+ if head_data.is_none() {
+ let required_parent = &fragment_tree.scope().base_constraints().required_parent;
+ if required_parent.hash() == request.parent_head_data_hash {
+ head_data = Some(required_parent.clone());
+ }
+ }
+ }
- let _ = tx.send(res);
+ let _ = tx.send(match (head_data, relay_parent_info) {
+ (Some(h), Some(i)) => Some(PersistedValidationData {
+ parent_head: h,
+ relay_parent_number: i.number,
+ relay_parent_storage_root: i.storage_root,
+ max_pov_size: request.max_pov_size,
+ }),
+ _ => None,
+ });
}
#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml
index e79c72c4f575..0087741a1a6a 100644
--- a/node/network/bitfield-distribution/Cargo.toml
+++ b/node/network/bitfield-distribution/Cargo.toml
@@ -5,6 +5,7 @@ authors = ["Parity Technologies "]
edition = "2021"
[dependencies]
+always-assert = "0.1"
futures = "0.3.21"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs
index 06bad64911b1..f1dec02ae637 100644
--- a/node/network/bitfield-distribution/src/lib.rs
+++ b/node/network/bitfield-distribution/src/lib.rs
@@ -22,6 +22,7 @@
#![deny(unused_crate_dependencies)]
+use always_assert::never;
use futures::{channel::oneshot, FutureExt};
use polkadot_node_network_protocol::{
@@ -29,7 +30,8 @@ use polkadot_node_network_protocol::{
grid_topology::{
RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology,
},
- v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View,
+ v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, ProtocolVersion,
+ UnifiedReputationChange as Rep, Versioned, View,
};
use polkadot_node_subsystem::{
jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan,
@@ -69,25 +71,63 @@ struct BitfieldGossipMessage {
}
impl BitfieldGossipMessage {
- fn into_validation_protocol(self) -> net_protocol::VersionedValidationProtocol {
- self.into_network_message().into()
+ fn into_validation_protocol(
+ self,
+ recipient_version: ProtocolVersion,
+ ) -> net_protocol::VersionedValidationProtocol {
+ self.into_network_message(recipient_version).into()
}
- fn into_network_message(self) -> net_protocol::BitfieldDistributionMessage {
- Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield(
- self.relay_parent,
- self.signed_availability.into(),
- ))
+ fn into_network_message(
+ self,
+ recipient_version: ProtocolVersion,
+ ) -> net_protocol::BitfieldDistributionMessage {
+ match recipient_version {
+ x if x == protocol_v1::VERSION =>
+ Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield(
+ self.relay_parent,
+ self.signed_availability.into(),
+ )),
+ x if x == protocol_vstaging::VERSION =>
+ Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield(
+ self.relay_parent,
+ self.signed_availability.into(),
+ )),
+ _ => {
+ never!("Peers should only have supported protocol versions.");
+
+ gum::warn!(
+ target: LOG_TARGET,
+ version = recipient_version,
+ "Unknown protocol version provided for message recipient"
+ );
+
+ // fall back to v1 to avoid
+ Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield(
+ self.relay_parent,
+ self.signed_availability.into(),
+ ))
+ },
+ }
}
}
+/// Data stored on a per-peer basis.
+#[derive(Debug)]
+pub struct PeerData {
+ /// The peer's view.
+ view: View,
+ /// The peer's protocol version.
+ version: ProtocolVersion,
+}
+
/// Data used to track information of peers and relay parents the
/// overseer ordered us to work on.
#[derive(Default, Debug)]
struct ProtocolState {
/// Track all active peers and their views
/// to determine what is relevant to them.
- peer_views: HashMap,
+ peer_data: HashMap,
/// The current and previous gossip topologies
topologies: SessionBoundGridTopologyStorage,
@@ -334,7 +374,7 @@ async fn handle_bitfield_distribution(
ctx,
job_data,
topology,
- &mut state.peer_views,
+ &mut state.peer_data,
validator,
msg,
required_routing,
@@ -353,7 +393,7 @@ async fn relay_message(
ctx: &mut Context,
job_data: &mut PerRelayParentData,
topology: &SessionGridTopology,
- peer_views: &mut HashMap,
+ peers: &mut HashMap,
validator: ValidatorId,
message: BitfieldGossipMessage,
required_routing: RequiredRouting,
@@ -371,16 +411,16 @@ async fn relay_message(
.await;
drop(_span);
- let total_peers = peer_views.len();
+ let total_peers = peers.len();
let mut random_routing: RandomRouting = Default::default();
let _span = span.child("interested-peers");
// pass on the bitfield distribution to all interested peers
- let interested_peers = peer_views
+ let interested_peers = peers
.iter()
- .filter_map(|(peer, view)| {
+ .filter_map(|(peer, data)| {
// check interest in the peer in this message's relay parent
- if view.contains(&message.relay_parent) {
+ if data.view.contains(&message.relay_parent) {
let message_needed =
job_data.message_from_validator_needed_by_peer(&peer, &validator);
if message_needed {
@@ -395,7 +435,7 @@ async fn relay_message(
};
if need_routing {
- Some(peer.clone())
+ Some((peer.clone(), data.version))
} else {
None
}
@@ -406,9 +446,9 @@ async fn relay_message(
None
}
})
- .collect::>();
+ .collect::>();
- interested_peers.iter().for_each(|peer| {
+ interested_peers.iter().for_each(|(peer, _)| {
// track the message as sent for this peer
job_data
.message_sent_to_peer
@@ -427,11 +467,33 @@ async fn relay_message(
);
} else {
let _span = span.child("gossip");
- ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
- interested_peers,
- message.into_validation_protocol(),
- ))
- .await;
+ let v1_interested_peers = interested_peers
+ .iter()
+ .filter(|(_, v)| v == &protocol_v1::VERSION)
+ .map(|(p, _)| p.clone())
+ .collect::>();
+
+ let vstaging_interested_peers = interested_peers
+ .iter()
+ .filter(|(_, v)| v == &protocol_vstaging::VERSION)
+ .map(|(p, _)| p.clone())
+ .collect::>();
+
+ if !v1_interested_peers.is_empty() {
+ ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
+ v1_interested_peers,
+ message.clone().into_validation_protocol(protocol_v1::VERSION),
+ ))
+ .await;
+ }
+
+ if !vstaging_interested_peers.is_empty() {
+ ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
+ vstaging_interested_peers,
+ message.into_validation_protocol(protocol_vstaging::VERSION),
+ ))
+ .await
+ }
}
}
@@ -442,10 +504,20 @@ async fn process_incoming_peer_message(
state: &mut ProtocolState,
metrics: &Metrics,
origin: PeerId,
- message: protocol_v1::BitfieldDistributionMessage,
+ message: net_protocol::BitfieldDistributionMessage,
rng: &mut (impl CryptoRng + Rng),
) {
- let protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) = message;
+ let (relay_parent, bitfield) = match message {
+ Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield(
+ relay_parent,
+ bitfield,
+ )) => (relay_parent, bitfield),
+ Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield(
+ relay_parent,
+ bitfield,
+ )) => (relay_parent, bitfield),
+ };
+
gum::trace!(
target: LOG_TARGET,
peer = %origin,
@@ -543,7 +615,7 @@ async fn process_incoming_peer_message(
ctx,
job_data,
topology,
- &mut state.peer_views,
+ &mut state.peer_data,
validator,
message,
required_routing,
@@ -567,15 +639,18 @@ async fn handle_network_msg(
let _timer = metrics.time_handle_network_msg();
match bridge_message {
- NetworkBridgeEvent::PeerConnected(peer, role, _, _) => {
+ NetworkBridgeEvent::PeerConnected(peer, role, version, _) => {
gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected");
// insert if none already present
- state.peer_views.entry(peer).or_default();
+ state
+ .peer_data
+ .entry(peer)
+ .or_insert_with(|| PeerData { view: View::default(), version });
},
NetworkBridgeEvent::PeerDisconnected(peer) => {
gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected");
// get rid of superfluous data
- state.peer_views.remove(&peer);
+ state.peer_data.remove(&peer);
},
NetworkBridgeEvent::NewGossipTopology(gossip_topology) => {
let session_index = gossip_topology.session;
@@ -590,12 +665,21 @@ async fn handle_network_msg(
);
for new_peer in newly_added {
- // in case we already knew that peer in the past
- // it might have had an existing view, we use to initialize
- // and minimize the delta on `PeerViewChange` to be sent
- if let Some(old_view) = state.peer_views.remove(&new_peer) {
- handle_peer_view_change(ctx, state, new_peer, old_view, rng).await;
- }
+ let old_view = match state.peer_data.get_mut(&new_peer) {
+ Some(d) => {
+ // in case we already knew that peer in the past
+ // it might have had an existing view, we use to initialize
+ // and minimize the delta on `PeerViewChange` to be sent
+ std::mem::replace(&mut d.view, Default::default())
+ },
+ None => {
+ // For peers which are currently unknown, we'll send topology-related
+ // messages to them when they connect and send their first view update.
+ continue
+ },
+ };
+
+ handle_peer_view_change(ctx, state, new_peer, old_view, rng).await;
}
},
NetworkBridgeEvent::PeerViewChange(peerid, new_view) => {
@@ -606,7 +690,7 @@ async fn handle_network_msg(
gum::trace!(target: LOG_TARGET, ?new_view, "Our view change");
handle_our_view_change(state, new_view);
},
- NetworkBridgeEvent::PeerMessage(remote, Versioned::V1(message)) =>
+ NetworkBridgeEvent::PeerMessage(remote, message) =>
process_incoming_peer_message(ctx, state, metrics, remote, message, rng).await,
}
}
@@ -635,6 +719,9 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) {
// Send the difference between two views which were not sent
// to that particular peer.
+//
+// This requires that there is an entry in the `peer_data` field for the
+// peer.
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
async fn handle_peer_view_change(
ctx: &mut Context,
@@ -643,13 +730,20 @@ async fn handle_peer_view_change(
view: View,
rng: &mut (impl CryptoRng + Rng),
) {
- let added = state
- .peer_views
- .entry(origin.clone())
- .or_default()
- .replace_difference(view)
- .cloned()
- .collect::>();
+ let peer_data = match state.peer_data.get_mut(&origin) {
+ None => {
+ gum::warn!(
+ target: LOG_TARGET,
+ peer = ?origin,
+ "Attempted to update peer view for unknown peer."
+ );
+
+ return
+ },
+ Some(pd) => pd,
+ };
+
+ let added = peer_data.view.replace_difference(view).cloned().collect::>();
let topology = state.topologies.get_current_topology();
let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &origin);
@@ -716,6 +810,9 @@ async fn send_tracked_gossip_message(
"Sending gossip message"
);
+ let version =
+ if let Some(peer_data) = state.peer_data.get(&dest) { peer_data.version } else { return };
+
job_data
.message_sent_to_peer
.entry(dest.clone())
@@ -724,7 +821,7 @@ async fn send_tracked_gossip_message(
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
vec![dest],
- message.into_validation_protocol(),
+ message.into_validation_protocol(version),
))
.await;
}
diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs
index 9bd17c542f8f..20ac01de0957 100644
--- a/node/network/bitfield-distribution/src/tests.rs
+++ b/node/network/bitfield-distribution/src/tests.rs
@@ -936,3 +936,10 @@ fn need_message_works() {
// also not ok for Bob
assert!(false == pretend_send(&mut state, peer_b, &validator_set[1]));
}
+
+// TODO [now]: vstaging peers are accepted.
+
+// TODO [now]: vstaging messages are accepted.
+
+// TODO [now]: always sends v1 messages to v1 peers.
+// TODO [now]: always sends vstaging messages to vstaging peers.
diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs
index f10f7fa5b67a..90dc5a4c0d2c 100644
--- a/node/network/bridge/src/lib.rs
+++ b/node/network/bridge/src/lib.rs
@@ -30,8 +30,8 @@ use sp_consensus::SyncOracle;
use polkadot_node_network_protocol::{
self as net_protocol,
peer_set::{PeerSet, PerPeerSet},
- v1 as protocol_v1, ObservedRole, OurView, PeerId, ProtocolVersion,
- UnifiedReputationChange as Rep, Versioned, View,
+ v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId,
+ ProtocolVersion, UnifiedReputationChange as Rep, Versioned, View,
};
use polkadot_node_subsystem::{
@@ -302,6 +302,12 @@ where
WireMessage::ProtocolMessage(msg),
&metrics,
),
+ Versioned::VStaging(msg) => send_validation_message_vstaging(
+ &mut network_service,
+ peers,
+ WireMessage::ProtocolMessage(msg),
+ &metrics,
+ ),
}
}
NetworkBridgeMessage::SendValidationMessages(msgs) => {
@@ -319,6 +325,12 @@ where
WireMessage::ProtocolMessage(msg),
&metrics,
),
+ Versioned::VStaging(msg) => send_validation_message_vstaging(
+ &mut network_service,
+ peers,
+ WireMessage::ProtocolMessage(msg),
+ &metrics,
+ ),
}
}
}
@@ -336,6 +348,12 @@ where
WireMessage::ProtocolMessage(msg),
&metrics,
),
+ Versioned::VStaging(msg) => send_collation_message_vstaging(
+ &mut network_service,
+ peers,
+ WireMessage::ProtocolMessage(msg),
+ &metrics,
+ ),
}
}
NetworkBridgeMessage::SendCollationMessages(msgs) => {
@@ -353,6 +371,12 @@ where
WireMessage::ProtocolMessage(msg),
&metrics,
),
+ Versioned::VStaging(msg) => send_collation_message_vstaging(
+ &mut network_service,
+ peers,
+ WireMessage::ProtocolMessage(msg),
+ &metrics,
+ ),
}
}
}
@@ -581,7 +605,7 @@ async fn handle_network_messages(
NetworkBridgeEvent::PeerConnected(
peer.clone(),
role,
- 1,
+ version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
@@ -590,14 +614,25 @@ async fn handle_network_messages(
)
.await;
- send_message(
- &mut network_service,
- vec![peer],
- PeerSet::Validation,
- version,
- WireMessage::::ViewUpdate(local_view),
- &metrics,
- );
+ match version {
+ x if x == protocol_v1::VERSION => send_message(
+ &mut network_service,
+ vec![peer],
+ PeerSet::Validation,
+ version,
+ WireMessage::::ViewUpdate(local_view),
+ &metrics,
+ ),
+ x if x == protocol_vstaging::VERSION => send_message(
+ &mut network_service,
+ vec![peer],
+ PeerSet::Validation,
+ version,
+ WireMessage::::ViewUpdate(local_view),
+ &metrics,
+ ),
+ _ => unreachable!("version has just been checked to fall into the allowable categories; qed"),
+ }
},
PeerSet::Collation => {
dispatch_collation_events_to_all(
@@ -605,7 +640,7 @@ async fn handle_network_messages(
NetworkBridgeEvent::PeerConnected(
peer.clone(),
role,
- 1,
+ version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
@@ -614,14 +649,25 @@ async fn handle_network_messages(
)
.await;
- send_message(
- &mut network_service,
- vec![peer],
- PeerSet::Collation,
- version,
- WireMessage::::ViewUpdate(local_view),
- &metrics,
- );
+ match version {
+ x if x == protocol_v1::VERSION => send_message(
+ &mut network_service,
+ vec![peer],
+ PeerSet::Collation,
+ version,
+ WireMessage::::ViewUpdate(local_view),
+ &metrics,
+ ),
+ x if x == protocol_vstaging::VERSION => send_message(
+ &mut network_service,
+ vec![peer],
+ PeerSet::Collation,
+ version,
+ WireMessage::::ViewUpdate(local_view),
+ &metrics,
+ ),
+ _ => unreachable!("version has just been checked to fall into the allowable categories; qed"),
+ }
},
}
},
@@ -758,8 +804,18 @@ async fn handle_network_messages(
if !v_messages.is_empty() {
let (events, reports) =
- if expected_versions[PeerSet::Validation] == Some(1) {
- handle_v1_peer_messages::(
+ if expected_versions[PeerSet::Validation] == Some(protocol_v1::VERSION) {
+ handle_peer_messages::(
+ remote.clone(),
+ PeerSet::Validation,
+ &mut shared.0.lock().validation_peers,
+ v_messages,
+ &metrics,
+ )
+ } else if expected_versions[PeerSet::Validation] ==
+ Some(protocol_vstaging::VERSION)
+ {
+ handle_peer_messages::(
remote.clone(),
PeerSet::Validation,
&mut shared.0.lock().validation_peers,
@@ -773,7 +829,7 @@ async fn handle_network_messages(
"Major logic bug. Peer somehow has unsupported validation protocol version."
);
- never!("Only version 1 is supported; peer set connection checked above; qed");
+ never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
// If a peer somehow triggers this, we'll disconnect them
// eventually.
@@ -789,8 +845,18 @@ async fn handle_network_messages(
if !c_messages.is_empty() {
let (events, reports) =
- if expected_versions[PeerSet::Collation] == Some(1) {
- handle_v1_peer_messages::(
+ if expected_versions[PeerSet::Collation] == Some(protocol_v1::VERSION) {
+ handle_peer_messages::(
+ remote.clone(),
+ PeerSet::Collation,
+ &mut shared.0.lock().collation_peers,
+ c_messages,
+ &metrics,
+ )
+ } else if expected_versions[PeerSet::Collation] ==
+ Some(protocol_vstaging::VERSION)
+ {
+ handle_peer_messages::(
remote.clone(),
PeerSet::Collation,
&mut shared.0.lock().collation_peers,
@@ -804,7 +870,7 @@ async fn handle_network_messages(
"Major logic bug. Peer somehow has unsupported collation protocol version."
);
- never!("Only version 1 is supported; peer set connection checked above; qed");
+ never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
// If a peer somehow triggers this, we'll disconnect them
// eventually.
@@ -939,19 +1005,61 @@ fn update_our_view(
}
(
- shared.validation_peers.keys().cloned().collect::>(),
- shared.collation_peers.keys().cloned().collect::>(),
+ shared
+ .validation_peers
+ .iter()
+ .map(|(peer_id, data)| (peer_id.clone(), data.version))
+ .collect::>(),
+ shared
+ .collation_peers
+ .iter()
+ .map(|(peer_id, data)| (peer_id.clone(), data.version))
+ .collect::>(),
)
};
+ let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| {
+ peers
+ .iter()
+ .filter(|(_, v)| v == &version)
+ .map(|(p, _)| p.clone())
+ .collect::>()
+ };
+
+ let v1_validation_peers = filter_by_version(&validation_peers, protocol_v1::VERSION);
+ let v1_collation_peers = filter_by_version(&collation_peers, protocol_v1::VERSION);
+
+ let vstaging_validation_peers =
+ filter_by_version(&validation_peers, protocol_vstaging::VERSION);
+ let vstaging_collation_peers = filter_by_version(&collation_peers, protocol_vstaging::VERSION);
+
send_validation_message_v1(
net,
- validation_peers,
+ v1_validation_peers,
+ WireMessage::ViewUpdate(new_view.clone()),
+ metrics,
+ );
+
+ send_validation_message_vstaging(
+ net,
+ vstaging_validation_peers,
WireMessage::ViewUpdate(new_view.clone()),
metrics,
);
- send_collation_message_v1(net, collation_peers, WireMessage::ViewUpdate(new_view), metrics);
+ send_collation_message_v1(
+ net,
+ v1_collation_peers,
+ WireMessage::ViewUpdate(new_view.clone()),
+ metrics,
+ );
+
+ send_collation_message_vstaging(
+ net,
+ vstaging_collation_peers,
+ WireMessage::ViewUpdate(new_view),
+ metrics,
+ );
let our_view = OurView::new(
live_heads.iter().take(MAX_VIEW_HEADS).cloned().map(|a| (a.hash, a.span)),
@@ -971,7 +1079,7 @@ fn update_our_view(
// Handle messages on a specific v1 peer-set. The peer is expected to be connected on that
// peer-set.
-fn handle_v1_peer_messages>(
+fn handle_peer_messages>(
peer: PeerId,
peer_set: PeerSet,
peers: &mut HashMap,
@@ -1028,7 +1136,7 @@ fn send_validation_message_v1(
message: WireMessage,
metrics: &Metrics,
) {
- send_message(net, peers, PeerSet::Validation, 1, message, metrics);
+ send_message(net, peers, PeerSet::Validation, protocol_v1::VERSION, message, metrics);
}
fn send_collation_message_v1(
@@ -1037,7 +1145,25 @@ fn send_collation_message_v1(
message: WireMessage,
metrics: &Metrics,
) {
- send_message(net, peers, PeerSet::Collation, 1, message, metrics)
+ send_message(net, peers, PeerSet::Collation, protocol_v1::VERSION, message, metrics)
+}
+
+fn send_validation_message_vstaging(
+ net: &mut impl Network,
+ peers: Vec,
+ message: WireMessage,
+ metrics: &Metrics,
+) {
+ send_message(net, peers, PeerSet::Validation, protocol_vstaging::VERSION, message, metrics);
+}
+
+fn send_collation_message_vstaging(
+ net: &mut impl Network,
+ peers: Vec,
+ message: WireMessage,
+ metrics: &Metrics,
+) {
+ send_message(net, peers, PeerSet::Collation, protocol_vstaging::VERSION, message, metrics)
}
async fn dispatch_validation_event_to_all(
diff --git a/node/network/bridge/src/tests.rs b/node/network/bridge/src/tests.rs
index 80929580d165..60fdcf5dc2e4 100644
--- a/node/network/bridge/src/tests.rs
+++ b/node/network/bridge/src/tests.rs
@@ -1243,3 +1243,12 @@ fn our_view_updates_decreasing_order_and_limited_to_max() {
virtual_overseer
});
}
+
+// TODO [now]: test that vstaging peers are accepted.
+
+// TODO [now]: test that vstaging peers are sent view update wire messages with the
+// vstaging format.
+
+// TODO [now]: check that v2 messages are sent to the correct subsystem.
+
+// TODO [now]: check that `send_validation_message_vstaging` and `send_collation_message_vstaging` work correctly.
diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs
index 66b404551c52..af7e068a367a 100644
--- a/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/node/network/collator-protocol/src/collator_side/mod.rs
@@ -33,7 +33,8 @@ use polkadot_node_network_protocol::{
v1::{self as request_v1, CollationFetchingRequest, CollationFetchingResponse},
IncomingRequest, IncomingRequestReceiver,
},
- v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View,
+ v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, ProtocolVersion,
+ UnifiedReputationChange as Rep, Versioned, View,
};
use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement};
use polkadot_node_subsystem::{
@@ -272,6 +273,11 @@ struct WaitingCollationFetches {
type ActiveCollationFetches =
FuturesUnordered + Send + 'static>>>;
+struct PeerData {
+ view: View,
+ version: ProtocolVersion,
+}
+
struct State {
/// Our network peer id.
local_peer_id: PeerId,
@@ -285,7 +291,7 @@ struct State {
/// Track all active peers and their views
/// to determine what is relevant to them.
- peer_views: HashMap,
+ peer_data: HashMap,
/// Our own view.
view: OurView,
@@ -332,7 +338,7 @@ impl State {
collator_pair,
metrics,
collating_on: Default::default(),
- peer_views: Default::default(),
+ peer_data: Default::default(),
view: Default::default(),
span_per_relay_parent: Default::default(),
collations: Default::default(),
@@ -344,12 +350,13 @@ impl State {
}
}
- /// Get all peers which have the given relay parent in their view.
- fn peers_interested_in_leaf(&self, relay_parent: &Hash) -> Vec {
- self.peer_views
+ /// Get all peers which have the given relay parent in their view along
+ /// with their protocol version.
+ fn peers_interested_in_leaf(&self, relay_parent: &Hash) -> Vec<(PeerId, ProtocolVersion)> {
+ self.peer_data
.iter()
- .filter(|(_, v)| v.contains(relay_parent))
- .map(|(peer, _)| *peer)
+ .filter(|(_, data)| data.view.contains(relay_parent))
+ .map(|(peer, data)| (*peer, data.version))
.collect()
}
}
@@ -451,8 +458,8 @@ async fn distribute_collation(
let interested = state.peers_interested_in_leaf(&relay_parent);
// Make sure already connected peers get collations:
- for peer_id in interested {
- advertise_collation(ctx, state, relay_parent, peer_id).await;
+ for (peer_id, version) in interested {
+ advertise_collation(ctx, state, relay_parent, peer_id, version).await;
}
Ok(())
@@ -521,9 +528,10 @@ async fn determine_our_validators(
Ok(current_validators)
}
-/// Issue a `Declare` collation message to the given `peer`.
+/// Issue a `Declare` collation message to the given `peer` on protocol version
+/// v1.
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
-async fn declare(ctx: &mut Context, state: &mut State, peer: PeerId) {
+async fn declare_v1(ctx: &mut Context, state: &mut State, peer: PeerId) {
let declare_signature_payload = protocol_v1::declare_signature_payload(&state.local_peer_id);
if let Some(para_id) = state.collating_on {
@@ -541,6 +549,30 @@ async fn declare(ctx: &mut Context, state: &mut State, peer: PeerId) {
}
}
+/// Issue a `Declare` collation message to the given `peer` on protocol version
+/// vstaging
+#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
+async fn declare_vstaging(ctx: &mut Context, state: &mut State, peer: PeerId) {
+ let declare_signature_payload =
+ protocol_vstaging::declare_signature_payload(&state.local_peer_id);
+
+ if let Some(para_id) = state.collating_on {
+ let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare(
+ state.collator_pair.public(),
+ para_id,
+ state.collator_pair.sign(&declare_signature_payload),
+ );
+
+ ctx.send_message(NetworkBridgeMessage::SendCollationMessage(
+ vec![peer],
+ Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
+ wire_message,
+ )),
+ ))
+ .await;
+ }
+}
+
/// Issue a connection request to a set of validators and
/// revoke the previous connection request.
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
@@ -569,6 +601,7 @@ async fn advertise_collation(
state: &mut State,
relay_parent: Hash,
peer: PeerId,
+ version: ProtocolVersion,
) {
let should_advertise = state
.our_validators_groups
@@ -606,13 +639,26 @@ async fn advertise_collation(
},
}
- let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
+ if version == protocol_v1::VERSION {
+ let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
- ctx.send_message(NetworkBridgeMessage::SendCollationMessage(
- vec![peer.clone()],
- Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
- ))
- .await;
+ ctx.send_message(NetworkBridgeMessage::SendCollationMessage(
+ vec![peer.clone()],
+ Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
+ ))
+ .await;
+ } else if version == protocol_vstaging::VERSION {
+ let wire_message =
+ protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
+
+ ctx.send_message(NetworkBridgeMessage::SendCollationMessage(
+ vec![peer.clone()],
+ Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
+ wire_message,
+ )),
+ ))
+ .await;
+ }
if let Some(validators) = state.our_validators_groups.get_mut(&relay_parent) {
validators.advertised_to_peer(&state.peer_ids, &peer);
@@ -738,12 +784,11 @@ async fn handle_incoming_peer_message(
runtime: &mut RuntimeInfo,
state: &mut State,
origin: PeerId,
- msg: protocol_v1::CollatorProtocolMessage,
+ msg: net_protocol::CollatorProtocolMessage,
) -> Result<()> {
- use protocol_v1::CollatorProtocolMessage::*;
-
match msg {
- Declare(_, _, _) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare(_, _, _)) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare(_, _, _)) => {
gum::trace!(
target: LOG_TARGET,
?origin,
@@ -754,7 +799,8 @@ async fn handle_incoming_peer_message(
ctx.send_message(NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation))
.await;
},
- AdvertiseCollation(_) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::AdvertiseCollation(_)) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation(_)) => {
gum::trace!(
target: LOG_TARGET,
?origin,
@@ -771,7 +817,14 @@ async fn handle_incoming_peer_message(
ctx.send_message(NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation))
.await;
},
- CollationSeconded(relay_parent, statement) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::CollationSeconded(
+ relay_parent,
+ statement,
+ )) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::CollationSeconded(
+ relay_parent,
+ statement,
+ )) =>
if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) {
gum::warn!(
target: LOG_TARGET,
@@ -804,8 +857,7 @@ async fn handle_incoming_peer_message(
"received an unexpected `CollationSeconded`: unknown statement",
);
}
- }
- },
+ },
}
Ok(())
@@ -892,14 +944,23 @@ async fn handle_peer_view_change(
peer_id: PeerId,
view: View,
) {
- let current = state.peer_views.entry(peer_id.clone()).or_default();
+ let (added, version) = {
+ let peer_data = match state.peer_data.get_mut(&peer_id) {
+ Some(pd) => pd,
+ None => return,
+ };
- let added: Vec = view.difference(&*current).cloned().collect();
+ let current = &mut peer_data.view;
- *current = view;
+ let added: Vec = view.difference(&*current).cloned().collect();
+
+ *current = view;
+
+ (added, peer_data.version)
+ };
for added in added.into_iter() {
- advertise_collation(ctx, state, added, peer_id.clone()).await;
+ advertise_collation(ctx, state, added, peer_id.clone(), version).await;
}
}
@@ -914,7 +975,7 @@ async fn handle_network_msg(
use NetworkBridgeEvent::*;
match bridge_message {
- PeerConnected(peer_id, observed_role, _, maybe_authority) => {
+ PeerConnected(peer_id, observed_role, version, maybe_authority) => {
// If it is possible that a disconnected validator would attempt a reconnect
// it should be handled here.
gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected");
@@ -926,8 +987,13 @@ async fn handle_network_msg(
"Connected to requested validator"
);
state.peer_ids.insert(peer_id, authority_ids);
+ state.peer_data.insert(peer_id, PeerData { view: View::default(), version });
- declare(ctx, state, peer_id).await;
+ if version == protocol_v1::VERSION {
+ declare_v1(ctx, state, peer_id).await;
+ } else if version == protocol_vstaging::VERSION {
+ declare_vstaging(ctx, state, peer_id).await;
+ }
}
},
PeerViewChange(peer_id, view) => {
@@ -936,14 +1002,14 @@ async fn handle_network_msg(
},
PeerDisconnected(peer_id) => {
gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
- state.peer_views.remove(&peer_id);
+ state.peer_data.remove(&peer_id);
state.peer_ids.remove(&peer_id);
},
OurViewChange(view) => {
gum::trace!(target: LOG_TARGET, ?view, "Own view change");
handle_our_view_change(state, view).await?;
},
- PeerMessage(remote, Versioned::V1(msg)) => {
+ PeerMessage(remote, msg) => {
handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?;
},
NewGossipTopology { .. } => {
diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs
index 592feaf9124a..f65a3aa4a0bc 100644
--- a/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/node/network/collator-protocol/src/validator_side/mod.rs
@@ -41,7 +41,8 @@ use polkadot_node_network_protocol::{
v1::{CollationFetchingRequest, CollationFetchingResponse},
OutgoingRequest, Requests,
},
- v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View,
+ v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, ProtocolVersion,
+ UnifiedReputationChange as Rep, Versioned, View,
};
use polkadot_node_primitives::{PoV, SignedFullStatement};
use polkadot_node_subsystem::{
@@ -53,7 +54,10 @@ use polkadot_node_subsystem::{
overseer, FromOrchestra, OverseerSignal, PerLeafSpan, SubsystemSender,
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
-use polkadot_primitives::v2::{CandidateReceipt, CollatorId, Hash, Id as ParaId};
+use polkadot_primitives::v2::{
+ CandidateReceipt, CollatorId, Hash, Id as ParaId, OccupiedCoreAssumption,
+ PersistedValidationData,
+};
use crate::error::Result;
@@ -244,11 +248,12 @@ enum AdvertisementError {
struct PeerData {
view: View,
state: PeerState,
+ version: ProtocolVersion,
}
impl PeerData {
- fn new(view: View) -> Self {
- PeerData { view, state: PeerState::Connected(Instant::now()) }
+ fn new(view: View, version: ProtocolVersion) -> Self {
+ PeerData { view, state: PeerState::Connected(Instant::now()), version }
}
/// Update the view, clearing all advertisements that are no longer in the
@@ -343,12 +348,6 @@ impl PeerData {
}
}
-impl Default for PeerData {
- fn default() -> Self {
- PeerData::new(Default::default())
- }
-}
-
struct GroupAssignments {
current: Option,
}
@@ -706,17 +705,33 @@ async fn note_good_collation(
async fn notify_collation_seconded(
sender: &mut impl overseer::CollatorProtocolSenderTrait,
peer_id: PeerId,
+ peer_version: ProtocolVersion,
relay_parent: Hash,
statement: SignedFullStatement,
) {
- let wire_message =
- protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement.into());
- sender
- .send_message(NetworkBridgeMessage::SendCollationMessage(
- vec![peer_id],
- Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
- ))
- .await;
+ if peer_version == protocol_v1::VERSION {
+ let wire_message =
+ protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement.into());
+ sender
+ .send_message(NetworkBridgeMessage::SendCollationMessage(
+ vec![peer_id],
+ Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
+ ))
+ .await;
+ } else if peer_version == protocol_vstaging::VERSION {
+ let wire_message = protocol_vstaging::CollatorProtocolMessage::CollationSeconded(
+ relay_parent,
+ statement.into(),
+ );
+ sender
+ .send_message(NetworkBridgeMessage::SendCollationMessage(
+ vec![peer_id],
+ Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
+ wire_message,
+ )),
+ ))
+ .await;
+ }
modify_reputation(sender, peer_id, BENEFIT_NOTIFY_GOOD).await;
}
@@ -724,8 +739,13 @@ async fn notify_collation_seconded(
/// A peer's view has changed. A number of things should be done:
/// - Ongoing collation requests have to be canceled.
/// - Advertisements by this peer that are no longer relevant have to be removed.
+///
+/// This requires that the peer has an entry in the peer-data map.
async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> {
- let peer_data = state.peer_data.entry(peer_id.clone()).or_default();
+ let peer_data = match state.peer_data.get_mut(&peer_id) {
+ None => return Ok(()),
+ Some(pd) => pd,
+ };
peer_data.update_view(view);
state
@@ -813,12 +833,20 @@ async fn process_incoming_peer_message(
ctx: &mut Context,
state: &mut State,
origin: PeerId,
- msg: protocol_v1::CollatorProtocolMessage,
+ msg: net_protocol::CollatorProtocolMessage,
) {
- use protocol_v1::CollatorProtocolMessage::*;
use sp_runtime::traits::AppVerify;
match msg {
- Declare(collator_id, para_id, signature) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare(
+ collator_id,
+ para_id,
+ signature,
+ )) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare(
+ collator_id,
+ para_id,
+ signature,
+ )) => {
if collator_peer_id(&state.peer_data, &collator_id).is_some() {
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
return
@@ -884,7 +912,10 @@ async fn process_incoming_peer_message(
disconnect_peer(ctx.sender(), origin).await;
}
},
- AdvertiseCollation(relay_parent) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent)) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation(
+ relay_parent,
+ )) => {
let _span = state
.span_per_relay_parent
.get(&relay_parent)
@@ -972,7 +1003,11 @@ async fn process_incoming_peer_message(
},
}
},
- CollationSeconded(_, _) => {
+ Versioned::V1(protocol_v1::CollatorProtocolMessage::CollationSeconded(_, _)) |
+ Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::CollationSeconded(
+ _,
+ _,
+ )) => {
gum::warn!(
target: LOG_TARGET,
peer_id = ?origin,
@@ -1061,8 +1096,11 @@ async fn handle_network_msg(
use NetworkBridgeEvent::*;
match bridge_message {
- PeerConnected(peer_id, _role, _version, _) => {
- state.peer_data.entry(peer_id).or_default();
+ PeerConnected(peer_id, _role, version, _) => {
+ state
+ .peer_data
+ .entry(peer_id)
+ .or_insert_with(|| PeerData::new(View::default(), version));
state.metrics.note_collator_peer_count(state.peer_data.len());
},
PeerDisconnected(peer_id) => {
@@ -1078,7 +1116,7 @@ async fn handle_network_msg(
OurViewChange(view) => {
handle_our_view_change(ctx, state, keystore, view).await?;
},
- PeerMessage(remote, Versioned::V1(msg)) => {
+ PeerMessage(remote, msg) => {
process_incoming_peer_message(ctx, state, remote, msg).await;
},
}
@@ -1129,7 +1167,16 @@ async fn process_msg(
let (collator_id, pending_collation) = collation_event;
let PendingCollation { relay_parent, peer_id, .. } = pending_collation;
note_good_collation(ctx.sender(), &state.peer_data, collator_id).await;
- notify_collation_seconded(ctx.sender(), peer_id, relay_parent, stmt).await;
+ if let Some(pd) = state.peer_data.get(&peer_id) {
+ notify_collation_seconded(
+ ctx.sender(),
+ peer_id,
+ pd.version,
+ relay_parent,
+ stmt,
+ )
+ .await;
+ }
if let Some(collations) = state.collations_per_relay_parent.get_mut(&parent) {
collations.status = CollationStatus::Seconded;
@@ -1307,6 +1354,39 @@ async fn dequeue_next_collation_and_fetch(
}
}
+#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
+async fn request_persisted_validation_data(
+ ctx: &mut Context,
+ relay_parent: Hash,
+ para_id: ParaId,
+) -> Option {
+ // TODO [https://github.com/paritytech/polkadot/issues/5054]
+ //
+ // As of https://github.com/paritytech/polkadot/pull/5557 the
+ // `Second` message requires the `PersistedValidationData` to be
+ // supplied.
+ //
+ // Without asynchronous backing, this can be easily fetched from the
+ // chain state.
+ //
+ // This assumes the core is _scheduled_, in keeping with the effective
+ // current behavior. If the core is occupied, we simply don't return
+ // anything. Likewise with runtime API errors, which are rare.
+ let res = polkadot_node_subsystem_util::request_persisted_validation_data(
+ relay_parent,
+ para_id,
+ OccupiedCoreAssumption::Free,
+ ctx.sender(),
+ )
+ .await
+ .await;
+
+ match res {
+ Ok(Ok(Some(pvd))) => Some(pvd),
+ _ => None,
+ }
+}
+
/// Handle a fetched collation result.
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
async fn handle_collation_fetched_result(
@@ -1351,13 +1431,31 @@ async fn handle_collation_fetched_result(
if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) {
collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash);
- ctx.sender()
- .send_message(CandidateBackingMessage::Second(
+
+ if let Some(pvd) = request_persisted_validation_data(
+ ctx,
+ candidate_receipt.descriptor().relay_parent,
+ candidate_receipt.descriptor().para_id,
+ )
+ .await
+ {
+ // TODO [https://github.com/paritytech/polkadot/issues/5054]
+ //
+ // If PVD isn't available (core occupied) then we'll silently
+ // just not second this. But prior to asynchronous backing
+ // we wouldn't second anyway because the core is occupied.
+ //
+ // The proper refactoring would be to accept declares from collators
+ // but not even fetch from them if the core is occupied. Given 5054,
+ // there's no reason to do this right now.
+ ctx.send_message(CandidateBackingMessage::Second(
relay_parent.clone(),
candidate_receipt,
+ pvd,
pov,
))
.await;
+ }
entry.insert(collation_event);
} else {
diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs
index 1cebcc64f78e..eaf42c550b86 100644
--- a/node/network/gossip-support/src/lib.rs
+++ b/node/network/gossip-support/src/lib.rs
@@ -402,8 +402,12 @@ where
NetworkBridgeEvent::OurViewChange(_) => {},
NetworkBridgeEvent::PeerViewChange(_, _) => {},
NetworkBridgeEvent::NewGossipTopology { .. } => {},
- NetworkBridgeEvent::PeerMessage(_, Versioned::V1(v)) => {
- match v {};
+ NetworkBridgeEvent::PeerMessage(_, message) => {
+ // match void -> LLVM unreachable
+ match message {
+ Versioned::V1(m) => match m {},
+ Versioned::VStaging(m) => match m {},
+ }
},
}
}
diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml
index a6554d048ba0..973be709ba6e 100644
--- a/node/network/protocol/Cargo.toml
+++ b/node/network/protocol/Cargo.toml
@@ -23,3 +23,6 @@ gum = { package = "tracing-gum", path = "../../gum" }
[dev-dependencies]
rand_chacha = "0.3.1"
+
+[features]
+network-protocol-staging = []
diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs
index 78727ae67e83..0fb51f8f4315 100644
--- a/node/network/protocol/src/lib.rs
+++ b/node/network/protocol/src/lib.rs
@@ -253,22 +253,26 @@ impl View {
/// A protocol-versioned type.
#[derive(Debug, Clone, PartialEq, Eq)]
-pub enum Versioned {
+pub enum Versioned {
/// V1 type.
V1(V1),
+ /// VStaging type.
+ VStaging(VStaging),
}
-impl Versioned<&'_ V1> {
+impl Versioned<&'_ V1, &'_ VStaging> {
/// Convert to a fully-owned version of the message.
- pub fn clone_inner(&self) -> Versioned {
+ pub fn clone_inner(&self) -> Versioned {
match *self {
Versioned::V1(inner) => Versioned::V1(inner.clone()),
+ Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()),
}
}
}
/// All supported versions of the validation protocol message.
-pub type VersionedValidationProtocol = Versioned;
+pub type VersionedValidationProtocol =
+ Versioned;
impl From for VersionedValidationProtocol {
fn from(v1: v1::ValidationProtocol) -> Self {
@@ -276,8 +280,14 @@ impl From for VersionedValidationProtocol {
}
}
+impl From for VersionedValidationProtocol {
+ fn from(vstaging: vstaging::ValidationProtocol) -> Self {
+ VersionedValidationProtocol::VStaging(vstaging)
+ }
+}
+
/// All supported versions of the collation protocol message.
-pub type VersionedCollationProtocol = Versioned;
+pub type VersionedCollationProtocol = Versioned;
impl From for VersionedCollationProtocol {
fn from(v1: v1::CollationProtocol) -> Self {
@@ -285,12 +295,19 @@ impl From for VersionedCollationProtocol {
}
}
+impl From for VersionedCollationProtocol {
+ fn from(vstaging: vstaging::CollationProtocol) -> Self {
+ VersionedCollationProtocol::VStaging(vstaging)
+ }
+}
+
macro_rules! impl_versioned_full_protocol_from {
($from:ty, $out:ty, $variant:ident) => {
impl From<$from> for $out {
fn from(versioned_from: $from) -> $out {
match versioned_from {
Versioned::V1(x) => Versioned::V1(x.into()),
+ Versioned::VStaging(x) => Versioned::VStaging(x.into()),
}
}
}
@@ -300,7 +317,12 @@ macro_rules! impl_versioned_full_protocol_from {
/// Implement `TryFrom` for one versioned enum variant into the inner type.
/// `$m_ty::$variant(inner) -> Ok(inner)`
macro_rules! impl_versioned_try_from {
- ($from:ty, $out:ty, $v1_pat:pat => $v1_out:expr) => {
+ (
+ $from:ty,
+ $out:ty,
+ $v1_pat:pat => $v1_out:expr,
+ $vstaging_pat:pat => $vstaging_out:expr
+ ) => {
impl TryFrom<$from> for $out {
type Error = crate::WrongVariant;
@@ -308,6 +330,7 @@ macro_rules! impl_versioned_try_from {
#[allow(unreachable_patterns)] // when there is only one variant
match x {
Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)),
+ Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)),
_ => Err(crate::WrongVariant),
}
}
@@ -320,6 +343,8 @@ macro_rules! impl_versioned_try_from {
#[allow(unreachable_patterns)] // when there is only one variant
match x {
Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())),
+ Versioned::VStaging($vstaging_pat) =>
+ Ok(Versioned::VStaging($vstaging_out.clone())),
_ => Err(crate::WrongVariant),
}
}
@@ -328,7 +353,8 @@ macro_rules! impl_versioned_try_from {
}
/// Version-annotated messages used by the bitfield distribution subsystem.
-pub type BitfieldDistributionMessage = Versioned;
+pub type BitfieldDistributionMessage =
+ Versioned;
impl_versioned_full_protocol_from!(
BitfieldDistributionMessage,
VersionedValidationProtocol,
@@ -337,11 +363,13 @@ impl_versioned_full_protocol_from!(
impl_versioned_try_from!(
VersionedValidationProtocol,
BitfieldDistributionMessage,
- v1::ValidationProtocol::BitfieldDistribution(x) => x
+ v1::ValidationProtocol::BitfieldDistribution(x) => x,
+ vstaging::ValidationProtocol::BitfieldDistribution(x) => x
);
/// Version-annotated messages used by the statement distribution subsystem.
-pub type StatementDistributionMessage = Versioned;
+pub type StatementDistributionMessage =
+ Versioned;
impl_versioned_full_protocol_from!(
StatementDistributionMessage,
VersionedValidationProtocol,
@@ -350,11 +378,13 @@ impl_versioned_full_protocol_from!(
impl_versioned_try_from!(
VersionedValidationProtocol,
StatementDistributionMessage,
- v1::ValidationProtocol::StatementDistribution(x) => x
+ v1::ValidationProtocol::StatementDistribution(x) => x,
+ vstaging::ValidationProtocol::StatementDistribution(x) => x
);
/// Version-annotated messages used by the approval distribution subsystem.
-pub type ApprovalDistributionMessage = Versioned;
+pub type ApprovalDistributionMessage =
+ Versioned;
impl_versioned_full_protocol_from!(
ApprovalDistributionMessage,
VersionedValidationProtocol,
@@ -363,11 +393,14 @@ impl_versioned_full_protocol_from!(
impl_versioned_try_from!(
VersionedValidationProtocol,
ApprovalDistributionMessage,
- v1::ValidationProtocol::ApprovalDistribution(x) => x
+ v1::ValidationProtocol::ApprovalDistribution(x) => x,
+ vstaging::ValidationProtocol::ApprovalDistribution(x) => x
+
);
/// Version-annotated messages used by the gossip-support subsystem (this is void).
-pub type GossipSupportNetworkMessage = Versioned;
+pub type GossipSupportNetworkMessage =
+ Versioned;
// This is a void enum placeholder, so never gets sent over the wire.
impl TryFrom for GossipSupportNetworkMessage {
type Error = WrongVariant;
@@ -384,7 +417,8 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag
}
/// Version-annotated messages used by the bitfield distribution subsystem.
-pub type CollatorProtocolMessage = Versioned;
+pub type CollatorProtocolMessage =
+ Versioned;
impl_versioned_full_protocol_from!(
CollatorProtocolMessage,
VersionedCollationProtocol,
@@ -393,7 +427,8 @@ impl_versioned_full_protocol_from!(
impl_versioned_try_from!(
VersionedCollationProtocol,
CollatorProtocolMessage,
- v1::CollationProtocol::CollatorProtocol(x) => x
+ v1::CollationProtocol::CollatorProtocol(x) => x,
+ vstaging::CollationProtocol::CollatorProtocol(x) => x
);
/// v1 notification protocol types.
@@ -410,6 +445,174 @@ pub mod v1 {
UncheckedSignedFullStatement,
};
+ use super::ProtocolVersion;
+
+ /// The version of the v1 network protocol.
+ pub const VERSION: ProtocolVersion = 1;
+
+ /// Network messages used by the bitfield distribution subsystem.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
+ pub enum BitfieldDistributionMessage {
+ /// A signed availability bitfield for a given relay-parent hash.
+ #[codec(index = 0)]
+ Bitfield(Hash, UncheckedSignedAvailabilityBitfield),
+ }
+
+ /// Network messages used by the statement distribution subsystem.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
+ pub enum StatementDistributionMessage {
+ /// A signed full statement under a given relay-parent.
+ #[codec(index = 0)]
+ Statement(Hash, UncheckedSignedFullStatement),
+ /// Seconded statement with large payload (e.g. containing a runtime upgrade).
+ ///
+ /// We only gossip the hash in that case, actual payloads can be fetched from sending node
+ /// via request/response.
+ #[codec(index = 1)]
+ LargeStatement(StatementMetadata),
+ }
+
+ /// Data that makes a statement unique.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, Hash)]
+ pub struct StatementMetadata {
+ /// Relay parent this statement is relevant under.
+ pub relay_parent: Hash,
+ /// Hash of the candidate that got validated.
+ pub candidate_hash: CandidateHash,
+ /// Validator that attested the validity.
+ pub signed_by: ValidatorIndex,
+ /// Signature of seconding validator.
+ pub signature: ValidatorSignature,
+ }
+
+ impl StatementDistributionMessage {
+ /// Get fingerprint describing the contained statement uniquely.
+ pub fn get_fingerprint(&self) -> (CompactStatement, ValidatorIndex) {
+ match self {
+ Self::Statement(_, statement) => (
+ statement.unchecked_payload().to_compact(),
+ statement.unchecked_validator_index(),
+ ),
+ Self::LargeStatement(meta) =>
+ (CompactStatement::Seconded(meta.candidate_hash), meta.signed_by),
+ }
+ }
+
+ /// Get the signature from the statement.
+ pub fn get_signature(&self) -> ValidatorSignature {
+ match self {
+ Self::Statement(_, statement) => statement.unchecked_signature().clone(),
+ Self::LargeStatement(metadata) => metadata.signature.clone(),
+ }
+ }
+
+ /// Get contained relay parent.
+ pub fn get_relay_parent(&self) -> Hash {
+ match self {
+ Self::Statement(r, _) => *r,
+ Self::LargeStatement(meta) => meta.relay_parent,
+ }
+ }
+
+ /// Whether this message contains a large statement.
+ pub fn is_large_statement(&self) -> bool {
+ if let Self::LargeStatement(_) = self {
+ true
+ } else {
+ false
+ }
+ }
+ }
+
+ /// Network messages used by the approval distribution subsystem.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
+ pub enum ApprovalDistributionMessage {
+ /// Assignments for candidates in recent, unfinalized blocks.
+ ///
+ /// Actually checking the assignment may yield a different result.
+ #[codec(index = 0)]
+ Assignments(Vec<(IndirectAssignmentCert, CandidateIndex)>),
+ /// Approvals for candidates in some recent, unfinalized block.
+ #[codec(index = 1)]
+ Approvals(Vec),
+ }
+
+ /// Dummy network message type, so we will receive connect/disconnect events.
+ #[derive(Debug, Clone, PartialEq, Eq)]
+ pub enum GossipSupportNetworkMessage {}
+
+ /// Network messages used by the collator protocol subsystem
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
+ pub enum CollatorProtocolMessage {
+ /// Declare the intent to advertise collations under a collator ID, attaching a
+ /// signature of the `PeerId` of the node using the given collator ID key.
+ #[codec(index = 0)]
+ Declare(CollatorId, ParaId, CollatorSignature),
+ /// Advertise a collation to a validator. Can only be sent once the peer has
+ /// declared that they are a collator with given ID.
+ #[codec(index = 1)]
+ AdvertiseCollation(Hash),
+ /// A collation sent to a validator was seconded.
+ #[codec(index = 4)]
+ CollationSeconded(Hash, UncheckedSignedFullStatement),
+ }
+
+ /// All network messages on the validation peer-set.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)]
+ pub enum ValidationProtocol {
+ /// Bitfield distribution messages
+ #[codec(index = 1)]
+ #[from]
+ BitfieldDistribution(BitfieldDistributionMessage),
+ /// Statement distribution messages
+ #[codec(index = 3)]
+ #[from]
+ StatementDistribution(StatementDistributionMessage),
+ /// Approval distribution messages
+ #[codec(index = 4)]
+ #[from]
+ ApprovalDistribution(ApprovalDistributionMessage),
+ }
+
+ /// All network messages on the collation peer-set.
+ #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)]
+ pub enum CollationProtocol {
+ /// Collator protocol messages
+ #[codec(index = 0)]
+ #[from]
+ CollatorProtocol(CollatorProtocolMessage),
+ }
+
+ /// Get the payload that should be signed and included in a `Declare` message.
+ ///
+ /// The payload is the local peer id of the node, which serves to prove that it
+ /// controls the collator key it is declaring an intention to collate under.
+ pub fn declare_signature_payload(peer_id: &sc_network::PeerId) -> Vec {
+ let mut payload = peer_id.to_bytes();
+ payload.extend_from_slice(b"COLL");
+ payload
+ }
+}
+
+/// vstaging network protocol types.
+pub mod vstaging {
+ use parity_scale_codec::{Decode, Encode};
+
+ use polkadot_primitives::vstaging::{
+ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, CompactStatement, Hash,
+ Id as ParaId, UncheckedSignedAvailabilityBitfield, ValidatorIndex, ValidatorSignature,
+ };
+
+ use polkadot_node_primitives::{
+ approval::{IndirectAssignmentCert, IndirectSignedApprovalVote},
+ UncheckedSignedFullStatement,
+ };
+
+ use super::ProtocolVersion;
+
+ /// The version of the vstaging network protocol.
+ pub const VERSION: ProtocolVersion = 2;
+
/// Network messages used by the bitfield distribution subsystem.
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub enum BitfieldDistributionMessage {
diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs
index 400b36e3d4c5..4a13fb766554 100644
--- a/node/network/protocol/src/peer_set.rs
+++ b/node/network/protocol/src/peer_set.rs
@@ -16,7 +16,7 @@
//! All peersets and protocols used for parachains.
-use super::ProtocolVersion;
+use super::{v1 as protocol_v1, vstaging as protocol_vstaging, ProtocolVersion};
use sc_network::config::{NonDefaultSetConfig, SetConfig};
use std::{
borrow::Cow,
@@ -28,11 +28,24 @@ use strum::{EnumIter, IntoEnumIterator};
const VALIDATION_PROTOCOL_V1: &str = "/polkadot/validation/1";
const COLLATION_PROTOCOL_V1: &str = "/polkadot/collation/1";
+const VALIDATION_PROTOCOL_VSTAGING: &str = "/polkadot/validation/2";
+const COLLATION_PROTOCOL_VSTAGING: &str = "/polkadot/collation/2";
+
/// The default validation protocol version.
-pub const DEFAULT_VALIDATION_PROTOCOL_VERSION: ProtocolVersion = 1;
+pub const DEFAULT_VALIDATION_PROTOCOL_VERSION: ProtocolVersion =
+ if cfg!(feature = "network-protocol-staging") {
+ protocol_vstaging::VERSION
+ } else {
+ protocol_v1::VERSION
+ };
/// The default collation protocol version.
-pub const DEFAULT_COLLATION_PROTOCOL_VERSION: ProtocolVersion = 1;
+pub const DEFAULT_COLLATION_PROTOCOL_VERSION: ProtocolVersion =
+ if cfg!(feature = "network-protocol-staging") {
+ protocol_vstaging::VERSION
+ } else {
+ protocol_v1::VERSION
+ };
/// The peer-sets and thus the protocols which are used for the network.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)]
@@ -70,7 +83,11 @@ impl PeerSet {
match self {
PeerSet::Validation => NonDefaultSetConfig {
notifications_protocol: protocol,
- fallback_names: Vec::new(),
+ fallback_names: if cfg!(feature = "network-protocol-staging") {
+ vec![VALIDATION_PROTOCOL_V1.into()]
+ } else {
+ Vec::new()
+ },
max_notification_size,
set_config: sc_network::config::SetConfig {
// we allow full nodes to connect to validators for gossip
@@ -85,7 +102,11 @@ impl PeerSet {
},
PeerSet::Collation => NonDefaultSetConfig {
notifications_protocol: protocol,
- fallback_names: Vec::new(),
+ fallback_names: if cfg!(feature = "network-protocol-staging") {
+ vec![COLLATION_PROTOCOL_V1.into()]
+ } else {
+ Vec::new()
+ },
max_notification_size,
set_config: SetConfig {
// Non-authority nodes don't need to accept incoming connections on this peer set:
@@ -112,18 +133,31 @@ impl PeerSet {
/// Get the default protocol name as a static str.
pub const fn get_default_protocol_name(self) -> &'static str {
+ #[cfg(not(feature = "network-protocol-staging"))]
match self {
PeerSet::Validation => VALIDATION_PROTOCOL_V1,
PeerSet::Collation => COLLATION_PROTOCOL_V1,
}
+
+ #[cfg(feature = "network-protocol-staging")]
+ match self {
+ PeerSet::Validation => VALIDATION_PROTOCOL_VSTAGING,
+ PeerSet::Collation => COLLATION_PROTOCOL_VSTAGING,
+ }
}
/// Get the protocol name associated with each peer set
/// and the given version, if any, as static str.
pub const fn get_protocol_name_static(self, version: ProtocolVersion) -> Option<&'static str> {
match (self, version) {
+ // v1
(PeerSet::Validation, 1) => Some(VALIDATION_PROTOCOL_V1),
(PeerSet::Collation, 1) => Some(COLLATION_PROTOCOL_V1),
+
+ // vstaging
+ (PeerSet::Validation, 2) => Some(VALIDATION_PROTOCOL_VSTAGING),
+ (PeerSet::Collation, 2) => Some(COLLATION_PROTOCOL_VSTAGING),
+
_ => None,
}
}
@@ -144,8 +178,16 @@ impl PeerSet {
/// This only succeeds on supported versions.
pub fn try_from_protocol_name(name: &Cow<'static, str>) -> Option<(PeerSet, ProtocolVersion)> {
match name {
- n if n == VALIDATION_PROTOCOL_V1 => Some((PeerSet::Validation, 1)),
- n if n == COLLATION_PROTOCOL_V1 => Some((PeerSet::Collation, 1)),
+ // v1
+ n if n == VALIDATION_PROTOCOL_V1 => Some((PeerSet::Validation, protocol_v1::VERSION)),
+ n if n == COLLATION_PROTOCOL_V1 => Some((PeerSet::Collation, protocol_v1::VERSION)),
+
+ // vstaging
+ n if n == VALIDATION_PROTOCOL_VSTAGING =>
+ Some((PeerSet::Validation, protocol_vstaging::VERSION)),
+ n if n == COLLATION_PROTOCOL_VSTAGING =>
+ Some((PeerSet::Collation, protocol_vstaging::VERSION)),
+
_ => None,
}
}
diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs
index 01b2efd53b86..869652a1974d 100644
--- a/node/network/statement-distribution/src/error.rs
+++ b/node/network/statement-distribution/src/error.rs
@@ -19,7 +19,9 @@
use polkadot_node_network_protocol::PeerId;
use polkadot_node_subsystem::SubsystemError;
-use polkadot_node_subsystem_util::runtime;
+use polkadot_node_subsystem_util::{
+ backing_implicit_view::FetchError as ImplicitViewFetchError, runtime,
+};
use polkadot_primitives::v2::{CandidateHash, Hash};
use crate::LOG_TARGET;
@@ -76,6 +78,9 @@ pub enum Error {
// Responder no longer waits for our data. (Should not happen right now.)
#[error("Oneshot `GetData` channel closed")]
ResponderGetDataCanceled,
+
+ #[error("Failed to load implicit view for leaf.")]
+ ImplicitViewFetchError(Hash, ImplicitViewFetchError),
}
/// Utility for eating top level errors and log them.
diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs
index 2abb765f392b..e5ca6a44c744 100644
--- a/node/network/statement-distribution/src/lib.rs
+++ b/node/network/statement-distribution/src/lib.rs
@@ -31,7 +31,8 @@ use polkadot_node_network_protocol::{
peer_set::{IsAuthority, PeerSet},
request_response::{v1 as request_v1, IncomingRequestReceiver},
v1::{self as protocol_v1, StatementMetadata},
- IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View,
+ vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep,
+ Versioned, View,
};
use polkadot_node_primitives::{SignedFullStatement, Statement, UncheckedSignedFullStatement};
use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS};
@@ -75,6 +76,8 @@ use requester::{fetch, RequesterMessage};
mod responder;
use responder::{respond, ResponderMessage};
+mod view;
+
/// Metrics for the statement distribution
pub(crate) mod metrics;
use metrics::Metrics;
@@ -605,7 +608,7 @@ struct FetchingInfo {
///
/// We use an `IndexMap` here to preserve the ordering of peers sending us messages. This is
/// desirable because we reward first sending peers with reputation.
- available_peers: IndexMap>,
+ available_peers: IndexMap>,
/// Peers left to try in case the background task needs it.
peers_to_try: Vec,
/// Sender for sending fresh peers to the fetching task in case of failure.
@@ -1207,11 +1210,11 @@ async fn retrieve_statement_from_message<'a, Context>(
let is_new_peer = match info.available_peers.entry(peer) {
IEntry::Occupied(mut occupied) => {
- occupied.get_mut().push(message);
+ occupied.get_mut().push(Versioned::V1(message));
false
},
IEntry::Vacant(vacant) => {
- vacant.insert(vec![message]);
+ vacant.insert(vec![Versioned::V1(message)]);
true
},
};
@@ -1289,7 +1292,10 @@ async fn launch_request(
}
let available_peers = {
let mut m = IndexMap::new();
- m.insert(peer, vec![protocol_v1::StatementDistributionMessage::LargeStatement(meta)]);
+ m.insert(
+ peer,
+ vec![Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(meta))],
+ );
m
};
Some(LargeStatementStatus::Fetching(FetchingInfo {
@@ -1309,7 +1315,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>(
active_heads: &'a mut HashMap,
recent_outdated_heads: &RecentOutdatedHeads,
ctx: &mut Context,
- message: protocol_v1::StatementDistributionMessage,
+ message: net_protocol::StatementDistributionMessage,
req_sender: &mpsc::Sender,
metrics: &Metrics,
runtime: &mut RuntimeInfo,
@@ -1342,7 +1348,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>(
// statement before a `Seconded` statement. `Seconded` statements are the only ones
// that require dependents. Thus, if this is a `Seconded` statement for a candidate we
// were not aware of before, we cannot have any dependent statements from the candidate.
- let _ = metrics.time_network_bridge_update_v1("circulate_statement");
+ let _ = metrics.time_network_bridge_update("circulate_statement");
let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await;
let topology = match session_index {
@@ -1388,12 +1394,19 @@ async fn handle_incoming_message<'a, Context>(
active_heads: &'a mut HashMap,
recent_outdated_heads: &RecentOutdatedHeads,
ctx: &mut Context,
- message: protocol_v1::StatementDistributionMessage,
+ message: net_protocol::StatementDistributionMessage,
req_sender: &mpsc::Sender,
metrics: &Metrics,
) -> Option<(Hash, StoredStatement<'a>)> {
+ let _ = metrics.time_network_bridge_update("handle_incoming_message");
+
+ // TODO [now] handle vstaging messages
+ let message = match message {
+ Versioned::V1(m) => m,
+ Versioned::VStaging(_) => unimplemented!(),
+ };
+
let relay_parent = message.get_relay_parent();
- let _ = metrics.time_network_bridge_update_v1("handle_incoming_message");
let active_head = match active_heads.get_mut(&relay_parent) {
Some(h) => h,
@@ -1569,7 +1582,7 @@ async fn handle_incoming_message<'a, Context>(
// candidate backing subsystem.
ctx.send_message(CandidateBackingMessage::Statement(
relay_parent,
- statement.statement.clone(),
+ unimplemented!(), // TODO [now]: fixme
))
.await;
@@ -1664,7 +1677,7 @@ async fn handle_network_update(
}
},
NetworkBridgeEvent::NewGossipTopology(topology) => {
- let _ = metrics.time_network_bridge_update_v1("new_gossip_topology");
+ let _ = metrics.time_network_bridge_update("new_gossip_topology");
let new_session_index = topology.session;
let new_topology: SessionGridTopology = topology.into();
@@ -1688,7 +1701,7 @@ async fn handle_network_update(
}
}
},
- NetworkBridgeEvent::PeerMessage(peer, Versioned::V1(message)) => {
+ NetworkBridgeEvent::PeerMessage(peer, message) => {
handle_incoming_message_and_circulate(
peer,
topology_storage,
@@ -1705,7 +1718,7 @@ async fn handle_network_update(
.await;
},
NetworkBridgeEvent::PeerViewChange(peer, view) => {
- let _ = metrics.time_network_bridge_update_v1("peer_view_change");
+ let _ = metrics.time_network_bridge_update("peer_view_change");
gum::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change");
match peers.get_mut(&peer) {
Some(data) =>
diff --git a/node/network/statement-distribution/src/metrics.rs b/node/network/statement-distribution/src/metrics.rs
index 6bc6f724ae09..f0e9d3be7efb 100644
--- a/node/network/statement-distribution/src/metrics.rs
+++ b/node/network/statement-distribution/src/metrics.rs
@@ -27,7 +27,7 @@ struct MetricsInner {
received_responses: prometheus::CounterVec,
active_leaves_update: prometheus::Histogram,
share: prometheus::Histogram,
- network_bridge_update_v1: prometheus::HistogramVec,
+ network_bridge_update: prometheus::HistogramVec,
statements_unexpected: prometheus::CounterVec,
created_message_size: prometheus::Gauge,
}
@@ -75,16 +75,13 @@ impl Metrics {
self.0.as_ref().map(|metrics| metrics.share.start_timer())
}
- /// Provide a timer for `network_bridge_update_v1` which observes on drop.
- pub fn time_network_bridge_update_v1(
+ /// Provide a timer for `network_bridge_update` which observes on drop.
+ pub fn time_network_bridge_update(
&self,
message_type: &'static str,
) -> Option {
self.0.as_ref().map(|metrics| {
- metrics
- .network_bridge_update_v1
- .with_label_values(&[message_type])
- .start_timer()
+ metrics.network_bridge_update.with_label_values(&[message_type]).start_timer()
})
}
@@ -166,11 +163,11 @@ impl metrics::Metrics for Metrics {
)?,
registry,
)?,
- network_bridge_update_v1: prometheus::register(
+ network_bridge_update: prometheus::register(
prometheus::HistogramVec::new(
prometheus::HistogramOpts::new(
- "polkadot_parachain_statement_distribution_network_bridge_update_v1",
- "Time spent within `statement_distribution::network_bridge_update_v1`",
+ "polkadot_parachain_statement_distribution_network_bridge_update",
+ "Time spent within `statement_distribution::network_bridge_update`",
)
.buckets(HISTOGRAM_LATENCY_BUCKETS.into()),
&["message_type"],
diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs
index 9f5b4f6de326..25cb1a983178 100644
--- a/node/network/statement-distribution/src/tests.rs
+++ b/node/network/statement-distribution/src/tests.rs
@@ -44,2256 +44,2266 @@ use sp_keyring::Sr25519Keyring;
use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr};
use std::{iter::FromIterator as _, sync::Arc, time::Duration};
-#[test]
-fn active_head_accepts_only_2_seconded_per_validator() {
- let validators = vec![
- Sr25519Keyring::Alice.public().into(),
- Sr25519Keyring::Bob.public().into(),
- Sr25519Keyring::Charlie.public().into(),
- ];
- let parent_hash: Hash = [1; 32].into();
-
- let session_index = 1;
- let signing_context = SigningContext { parent_hash, session_index };
-
- let candidate_a = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = parent_hash;
- c.descriptor.para_id = 1.into();
- c
- };
-
- let candidate_b = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = parent_hash;
- c.descriptor.para_id = 2.into();
- c
- };
-
- let candidate_c = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = parent_hash;
- c.descriptor.para_id = 3.into();
- c
- };
-
- let mut head_data = ActiveHeadData::new(
- validators,
- session_index,
- PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"),
- );
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = SyncCryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .unwrap();
- let bob_public = SyncCryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Bob.to_seed()),
- )
- .unwrap();
-
- // note A
- let a_seconded_val_0 = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate_a.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(head_data
- .check_useful_or_unknown(&a_seconded_val_0.clone().convert_payload().into())
- .is_ok());
- let noted = head_data.note_statement(a_seconded_val_0.clone());
-
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- // note A (duplicate)
- assert_eq!(
- head_data.check_useful_or_unknown(&a_seconded_val_0.clone().convert_payload().into()),
- Err(DeniedStatement::UsefulButKnown),
- );
- let noted = head_data.note_statement(a_seconded_val_0);
-
- assert_matches!(noted, NotedStatement::UsefulButKnown);
-
- // note B
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate_b.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(head_data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = head_data.note_statement(statement);
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- // note C (beyond 2 - ignored)
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate_c.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert_eq!(
- head_data.check_useful_or_unknown(&statement.clone().convert_payload().into()),
- Err(DeniedStatement::NotUseful),
- );
- let noted = head_data.note_statement(statement);
- assert_matches!(noted, NotedStatement::NotUseful);
-
- // note B (new validator)
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate_b.clone()),
- &signing_context,
- ValidatorIndex(1),
- &bob_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(head_data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = head_data.note_statement(statement);
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- // note C (new validator)
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate_c.clone()),
- &signing_context,
- ValidatorIndex(1),
- &bob_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(head_data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = head_data.note_statement(statement);
- assert_matches!(noted, NotedStatement::Fresh(_));
-}
-
-#[test]
-fn note_local_works() {
- let hash_a = CandidateHash([1; 32].into());
- let hash_b = CandidateHash([2; 32].into());
-
- let mut per_peer_tracker = VcPerPeerTracker::default();
- per_peer_tracker.note_local(hash_a.clone());
- per_peer_tracker.note_local(hash_b.clone());
-
- assert!(per_peer_tracker.local_observed.contains(&hash_a));
- assert!(per_peer_tracker.local_observed.contains(&hash_b));
-
- assert!(!per_peer_tracker.remote_observed.contains(&hash_a));
- assert!(!per_peer_tracker.remote_observed.contains(&hash_b));
-}
-
-#[test]
-fn note_remote_works() {
- let hash_a = CandidateHash([1; 32].into());
- let hash_b = CandidateHash([2; 32].into());
- let hash_c = CandidateHash([3; 32].into());
-
- let mut per_peer_tracker = VcPerPeerTracker::default();
- assert!(per_peer_tracker.note_remote(hash_a.clone()));
- assert!(per_peer_tracker.note_remote(hash_b.clone()));
- assert!(!per_peer_tracker.note_remote(hash_c.clone()));
-
- assert!(per_peer_tracker.remote_observed.contains(&hash_a));
- assert!(per_peer_tracker.remote_observed.contains(&hash_b));
- assert!(!per_peer_tracker.remote_observed.contains(&hash_c));
-
- assert!(!per_peer_tracker.local_observed.contains(&hash_a));
- assert!(!per_peer_tracker.local_observed.contains(&hash_b));
- assert!(!per_peer_tracker.local_observed.contains(&hash_c));
-}
-
-#[test]
-fn per_peer_relay_parent_knowledge_send() {
- let mut knowledge = PeerRelayParentKnowledge::default();
-
- let hash_a = CandidateHash([1; 32].into());
-
- // Sending an un-pinned statement should not work and should have no effect.
- assert!(!knowledge.can_send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))));
- assert!(!knowledge.is_known_candidate(&hash_a));
- assert!(knowledge.sent_statements.is_empty());
- assert!(knowledge.received_statements.is_empty());
- assert!(knowledge.seconded_counts.is_empty());
- assert!(knowledge.received_message_count.is_empty());
-
- // Make the peer aware of the candidate.
- assert_eq!(knowledge.send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0))), true);
- assert_eq!(knowledge.send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(1))), false);
- assert!(knowledge.is_known_candidate(&hash_a));
- assert_eq!(knowledge.sent_statements.len(), 2);
- assert!(knowledge.received_statements.is_empty());
- assert_eq!(knowledge.seconded_counts.len(), 2);
- assert!(knowledge.received_message_count.get(&hash_a).is_none());
-
- // And now it should accept the dependent message.
- assert_eq!(knowledge.send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))), false);
- assert!(knowledge.is_known_candidate(&hash_a));
- assert_eq!(knowledge.sent_statements.len(), 3);
- assert!(knowledge.received_statements.is_empty());
- assert_eq!(knowledge.seconded_counts.len(), 2);
- assert!(knowledge.received_message_count.get(&hash_a).is_none());
-}
-
-#[test]
-fn cant_send_after_receiving() {
- let mut knowledge = PeerRelayParentKnowledge::default();
-
- let hash_a = CandidateHash([1; 32].into());
- assert!(knowledge
- .check_can_receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
- .is_ok());
- assert!(knowledge
- .receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
- .unwrap());
- assert!(!knowledge.can_send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0))));
-}
-
-#[test]
-fn per_peer_relay_parent_knowledge_receive() {
- let mut knowledge = PeerRelayParentKnowledge::default();
-
- let hash_a = CandidateHash([1; 32].into());
-
- assert_eq!(
- knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(0)), 3),
- Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE),
- );
- assert_eq!(
- knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(0)), 3),
- Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE),
- );
-
- assert!(knowledge
- .check_can_receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
- .is_ok());
- assert_eq!(
- knowledge.receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3),
- Ok(true),
- );
-
- // Push statements up to the flood limit.
- assert!(knowledge
- .check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(1)), 3)
- .is_ok());
- assert_eq!(
- knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(1)), 3),
- Ok(false),
- );
-
- assert!(knowledge.is_known_candidate(&hash_a));
- assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 2);
-
- assert!(knowledge
- .check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3)
- .is_ok());
- assert_eq!(
- knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
- Ok(false),
- );
-
- assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3);
-
- assert_eq!(
- knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(7)), 3),
- Err(COST_APPARENT_FLOOD),
- );
- assert_eq!(
- knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(7)), 3),
- Err(COST_APPARENT_FLOOD),
- );
-
- assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3);
- assert_eq!(knowledge.received_statements.len(), 3); // number of prior `Ok`s.
-
- // Now make sure that the seconding limit is respected.
- let hash_b = CandidateHash([2; 32].into());
- let hash_c = CandidateHash([3; 32].into());
-
- assert!(knowledge
- .check_can_receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3)
- .is_ok());
- assert_eq!(
- knowledge.receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
- Ok(true),
- );
-
- assert_eq!(
- knowledge.check_can_receive(&(CompactStatement::Seconded(hash_c), ValidatorIndex(0)), 3),
- Err(COST_UNEXPECTED_STATEMENT_REMOTE),
- );
- assert_eq!(
- knowledge.receive(&(CompactStatement::Seconded(hash_c), ValidatorIndex(0)), 3),
- Err(COST_UNEXPECTED_STATEMENT_REMOTE),
- );
-
- // Last, make sure that already-known statements are disregarded.
- assert_eq!(
- knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
- Err(COST_DUPLICATE_STATEMENT),
- );
- assert_eq!(
- knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
- Err(COST_DUPLICATE_STATEMENT),
- );
-
- assert_eq!(
- knowledge.check_can_receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
- Err(COST_DUPLICATE_STATEMENT),
- );
- assert_eq!(
- knowledge.receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
- Err(COST_DUPLICATE_STATEMENT),
- );
-}
-
-#[test]
-fn peer_view_update_sends_messages() {
- let hash_a = Hash::repeat_byte(1);
- let hash_b = Hash::repeat_byte(2);
- let hash_c = Hash::repeat_byte(3);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_c;
- c.descriptor.para_id = 1.into();
- c
- };
- let candidate_hash = candidate.hash();
-
- let old_view = view![hash_a, hash_b];
- let new_view = view![hash_b, hash_c];
-
- let mut active_heads = HashMap::new();
- let validators = vec![
- Sr25519Keyring::Alice.public().into(),
- Sr25519Keyring::Bob.public().into(),
- Sr25519Keyring::Charlie.public().into(),
- ];
-
- let session_index = 1;
- let signing_context = SigningContext { parent_hash: hash_c, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
-
- let alice_public = SyncCryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .unwrap();
- let bob_public = SyncCryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Bob.to_seed()),
- )
- .unwrap();
- let charlie_public = SyncCryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Charlie.to_seed()),
- )
- .unwrap();
-
- let new_head_data = {
- let mut data = ActiveHeadData::new(
- validators,
- session_index,
- PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"),
- );
-
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = data.note_statement(statement);
-
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Valid(candidate_hash),
- &signing_context,
- ValidatorIndex(1),
- &bob_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = data.note_statement(statement);
-
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- let statement = block_on(SignedFullStatement::sign(
- &keystore,
- Statement::Valid(candidate_hash),
- &signing_context,
- ValidatorIndex(2),
- &charlie_public.into(),
- ))
- .ok()
- .flatten()
- .expect("should be signed");
- assert!(data
- .check_useful_or_unknown(&statement.clone().convert_payload().into())
- .is_ok());
- let noted = data.note_statement(statement);
- assert_matches!(noted, NotedStatement::Fresh(_));
-
- data
- };
-
- active_heads.insert(hash_c, new_head_data);
-
- let mut peer_data = PeerData {
- view: old_view,
- view_knowledge: {
- let mut k = HashMap::new();
-
- k.insert(hash_a, Default::default());
- k.insert(hash_b, Default::default());
-
- k
- },
- maybe_authority: None,
- };
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
- StatementDistributionMessage,
- _,
- >(pool);
- let peer = PeerId::random();
-
- executor::block_on(async move {
- let mut topology: SessionGridTopology = Default::default();
- topology.peers_x = HashSet::from_iter(vec![peer.clone()].into_iter());
- update_peer_view_and_maybe_send_unlocked(
- peer.clone(),
- &topology,
- &mut peer_data,
- &mut ctx,
- &active_heads,
- new_view.clone(),
- &Default::default(),
- &mut AlwaysZeroRng,
- )
- .await;
-
- assert_eq!(peer_data.view, new_view);
- assert!(!peer_data.view_knowledge.contains_key(&hash_a));
- assert!(peer_data.view_knowledge.contains_key(&hash_b));
-
- let c_knowledge = peer_data.view_knowledge.get(&hash_c).unwrap();
-
- assert!(c_knowledge.is_known_candidate(&candidate_hash));
- assert!(c_knowledge
- .sent_statements
- .contains(&(CompactStatement::Seconded(candidate_hash), ValidatorIndex(0))));
- assert!(c_knowledge
- .sent_statements
- .contains(&(CompactStatement::Valid(candidate_hash), ValidatorIndex(1))));
- assert!(c_knowledge
- .sent_statements
- .contains(&(CompactStatement::Valid(candidate_hash), ValidatorIndex(2))));
-
- // now see if we got the 3 messages from the active head data.
- let active_head = active_heads.get(&hash_c).unwrap();
-
- // semi-fragile because hashmap iterator ordering is undefined, but in practice
- // it will not change between runs of the program.
- for statement in active_head.statements_about(candidate_hash) {
- let message = handle.recv().await;
- let expected_to = vec![peer.clone()];
- let expected_payload =
- statement_message(hash_c, statement.statement.clone(), &Metrics::default());
-
- assert_matches!(
- message,
- AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
- to,
- payload,
- )) => {
- assert_eq!(to, expected_to);
- assert_eq!(payload, expected_payload)
- }
- )
- }
- });
-}
-
-#[test]
-fn circulated_statement_goes_to_all_peers_with_view() {
- let hash_a = Hash::repeat_byte(1);
- let hash_b = Hash::repeat_byte(2);
- let hash_c = Hash::repeat_byte(3);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_b;
- c.descriptor.para_id = 1.into();
- c
- };
-
- let peer_a = PeerId::random();
- let peer_b = PeerId::random();
- let peer_c = PeerId::random();
-
- let peer_a_view = view![hash_a];
- let peer_b_view = view![hash_a, hash_b];
- let peer_c_view = view![hash_b, hash_c];
-
- let session_index = 1;
-
- let peer_data_from_view = |view: View| PeerData {
- view: view.clone(),
- view_knowledge: view.iter().map(|v| (v.clone(), Default::default())).collect(),
- maybe_authority: None,
- };
-
- let mut peer_data: HashMap<_, _> = vec![
- (peer_a.clone(), peer_data_from_view(peer_a_view)),
- (peer_b.clone(), peer_data_from_view(peer_b_view)),
- (peer_c.clone(), peer_data_from_view(peer_c_view)),
- ]
- .into_iter()
- .collect();
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
- StatementDistributionMessage,
- _,
- >(pool);
-
- executor::block_on(async move {
- let signing_context = SigningContext { parent_hash: hash_b, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- let statement = SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed");
-
- let comparator = StoredStatementComparator {
- compact: statement.payload().to_compact(),
- validator_index: ValidatorIndex(0),
- signature: statement.signature().clone(),
- };
- let statement = StoredStatement { comparator: &comparator, statement: &statement };
-
- let mut topology: SessionGridTopology = Default::default();
- topology.peers_x =
- HashSet::from_iter(vec![peer_a.clone(), peer_b.clone(), peer_c.clone()].into_iter());
- let needs_dependents = circulate_statement(
- RequiredRouting::GridXY,
- &topology,
- &mut peer_data,
- &mut ctx,
- hash_b,
- statement,
- Vec::new(),
- &Metrics::default(),
- &mut AlwaysZeroRng,
- )
- .await;
-
- {
- assert_eq!(needs_dependents.len(), 2);
- assert!(needs_dependents.contains(&peer_b));
- assert!(needs_dependents.contains(&peer_c));
- }
-
- let fingerprint = (statement.compact().clone(), ValidatorIndex(0));
-
- assert!(peer_data
- .get(&peer_b)
- .unwrap()
- .view_knowledge
- .get(&hash_b)
- .unwrap()
- .sent_statements
- .contains(&fingerprint));
-
- assert!(peer_data
- .get(&peer_c)
- .unwrap()
- .view_knowledge
- .get(&hash_b)
- .unwrap()
- .sent_statements
- .contains(&fingerprint));
-
- let message = handle.recv().await;
- assert_matches!(
- message,
- AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
- to,
- payload,
- )) => {
- assert_eq!(to.len(), 2);
- assert!(to.contains(&peer_b));
- assert!(to.contains(&peer_c));
-
- assert_eq!(
- payload,
- statement_message(hash_b, statement.statement.clone(), &Metrics::default()),
- );
- }
- )
- });
-}
-
-#[test]
-fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
- let hash_a = Hash::repeat_byte(1);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
- c
- };
-
- let peer_a = PeerId::random();
- let peer_b = PeerId::random();
-
- let validators = vec![
- Sr25519Keyring::Alice.pair(),
- Sr25519Keyring::Bob.pair(),
- Sr25519Keyring::Charlie.pair(),
- ];
-
- let session_info = make_session_info(validators, vec![]);
-
- let session_index = 1;
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
- let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
-
- let bg = async move {
- let s = StatementDistributionSubsystem::new(
- Arc::new(LocalKeystore::in_memory()),
- statement_req_receiver,
- Default::default(),
- AlwaysZeroRng,
- );
- s.run(ctx).await.unwrap();
- };
-
- let test_fut = async move {
- // register our active heads.
- handle
- .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
- ActiveLeavesUpdate::start_work(ActivatedLeaf {
- hash: hash_a,
- number: 1,
- status: LeafStatus::Fresh,
- span: Arc::new(jaeger::Span::Disabled),
- }),
- )))
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
- )
- if r == hash_a
- => {
- let _ = tx.send(Ok(session_index));
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
- )
- if r == hash_a && sess_index == session_index
- => {
- let _ = tx.send(Ok(Some(session_info)));
- }
- );
-
- // notify of peers and view
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(peer_a.clone(), ObservedRole::Full, 1, None),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full, 1, None),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
- ),
- })
- .await;
-
- // receive a seconded statement from peer A. it should be propagated onwards to peer B and to
- // candidate backing.
- let statement = {
- let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_a.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
- hash_a,
- statement.clone().into(),
- )),
- ),
- ),
- })
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {}
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::CandidateBacking(
- CandidateBackingMessage::Statement(r, s)
- ) if r == hash_a && s == statement => {}
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendValidationMessage(
- recipients,
- Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
- protocol_v1::StatementDistributionMessage::Statement(r, s)
- )),
- )
- ) => {
- assert_eq!(recipients, vec![peer_b.clone()]);
- assert_eq!(r, hash_a);
- assert_eq!(s, statement.into());
- }
- );
- handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
- };
-
- futures::pin_mut!(test_fut);
- futures::pin_mut!(bg);
-
- executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() {
- sp_tracing::try_init_simple();
- let hash_a = Hash::repeat_byte(1);
- let hash_b = Hash::repeat_byte(2);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
- c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
- c
- };
-
- let peer_a = PeerId::random(); // Alice
- let peer_b = PeerId::random(); // Bob
- let peer_c = PeerId::random(); // Charlie
- let peer_bad = PeerId::random(); // No validator
-
- let validators = vec![
- Sr25519Keyring::Alice.pair(),
- Sr25519Keyring::Bob.pair(),
- Sr25519Keyring::Charlie.pair(),
- // We:
- Sr25519Keyring::Ferdie.pair(),
- ];
-
- let session_info = make_session_info(validators, vec![vec![0, 1, 2, 4], vec![3]]);
-
- let session_index = 1;
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
- let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver();
-
- let bg = async move {
- let s = StatementDistributionSubsystem::new(
- make_ferdie_keystore(),
- statement_req_receiver,
- Default::default(),
- AlwaysZeroRng,
- );
- s.run(ctx).await.unwrap();
- };
-
- let test_fut = async move {
- // register our active heads.
- handle
- .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
- ActiveLeavesUpdate::start_work(ActivatedLeaf {
- hash: hash_a,
- number: 1,
- status: LeafStatus::Fresh,
- span: Arc::new(jaeger::Span::Disabled),
- }),
- )))
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
- )
- if r == hash_a
- => {
- let _ = tx.send(Ok(session_index));
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
- )
- if r == hash_a && sess_index == session_index
- => {
- let _ = tx.send(Ok(Some(session_info)));
- }
- );
-
- // notify of peers and view
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_a.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
- ),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_b.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_c.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_bad.clone(),
- ObservedRole::Full,
- 1,
- None,
- ),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]),
- ),
- })
- .await;
-
- // receive a seconded statement from peer A, which does not provide the request data,
- // then get that data from peer C. It should be propagated onwards to peer B and to
- // candidate backing.
- let statement = {
- let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_a.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
- metadata.clone(),
- )),
- ),
- ),
- })
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
- // Just drop request - should trigger error.
- }
- );
-
- // There is a race between request handler asking for more peers and processing of the
- // coming `PeerMessage`s, we want the request handler to ask first here for better test
- // coverage:
- Delay::new(Duration::from_millis(20)).await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_c.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
- metadata.clone(),
- )),
- ),
- ),
- })
- .await;
-
- // Malicious peer:
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_bad.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
- metadata.clone(),
- )),
- ),
- ),
- })
- .await;
-
- // Let c fail once too:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
- }
- );
-
- // a fails again:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- // On retry, we should have reverse order:
- assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
- }
- );
-
- // Send invalid response (all other peers have been tried now):
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- assert_eq!(outgoing.peer, Recipient::Peer(peer_bad));
- let bad_candidate = {
- let mut bad = candidate.clone();
- bad.descriptor.para_id = 0xeadbeaf.into();
- bad
- };
- let response = StatementFetchingResponse::Statement(bad_candidate);
- outgoing.pending_response.send(Ok(response.encode())).unwrap();
- }
- );
-
- // Should get punished and never tried again:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) if p == peer_bad && r == COST_WRONG_HASH => {}
- );
-
- // a is tried again (retried in reverse order):
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- // On retry, we should have reverse order:
- assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
- }
- );
-
- // c succeeds now:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendRequests(
- mut reqs, IfDisconnected::ImmediateError
- )
- ) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- // On retry, we should have reverse order:
- assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
- let response = StatementFetchingResponse::Statement(candidate.clone());
- outgoing.pending_response.send(Ok(response.encode())).unwrap();
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) if p == peer_a && r == COST_FETCH_FAIL => {}
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) if p == peer_c && r == BENEFIT_VALID_RESPONSE => {}
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {}
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::CandidateBacking(
- CandidateBackingMessage::Statement(r, s)
- ) if r == hash_a && s == statement => {}
- );
-
- // Now messages should go out:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendValidationMessage(
- mut recipients,
- Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
- protocol_v1::StatementDistributionMessage::LargeStatement(meta)
- )),
- )
- ) => {
- gum::debug!(
- target: LOG_TARGET,
- ?recipients,
- "Recipients received"
- );
- recipients.sort();
- let mut expected = vec![peer_b, peer_c, peer_bad];
- expected.sort();
- assert_eq!(recipients, expected);
- assert_eq!(meta.relay_parent, hash_a);
- assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
- assert_eq!(meta.signed_by, statement.validator_index());
- assert_eq!(&meta.signature, statement.signature());
- }
- );
-
- // Now that it has the candidate it should answer requests accordingly (even after a
- // failed request):
-
- // Failing request first (wrong relay parent hash):
- let (pending_response, response_rx) = oneshot::channel();
- let inner_req = StatementFetchingRequest {
- relay_parent: hash_b,
- candidate_hash: metadata.candidate_hash,
- };
- let req = sc_network::config::IncomingRequest {
- peer: peer_b,
- payload: inner_req.encode(),
- pending_response,
- };
- req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
- assert_matches!(
- response_rx.await.unwrap().result,
- Err(()) => {}
- );
-
- // Another failing request (peer_a never received a statement from us, so it is not
- // allowed to request the data):
- let (pending_response, response_rx) = oneshot::channel();
- let inner_req = StatementFetchingRequest {
- relay_parent: metadata.relay_parent,
- candidate_hash: metadata.candidate_hash,
- };
- let req = sc_network::config::IncomingRequest {
- peer: peer_a,
- payload: inner_req.encode(),
- pending_response,
- };
- req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
- assert_matches!(
- response_rx.await.unwrap().result,
- Err(()) => {}
- );
-
- // And now the succeding request from peer_b:
- let (pending_response, response_rx) = oneshot::channel();
- let inner_req = StatementFetchingRequest {
- relay_parent: metadata.relay_parent,
- candidate_hash: metadata.candidate_hash,
- };
- let req = sc_network::config::IncomingRequest {
- peer: peer_b,
- payload: inner_req.encode(),
- pending_response,
- };
- req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
- let StatementFetchingResponse::Statement(committed) =
- Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
- assert_eq!(committed, candidate);
-
- handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
- };
-
- futures::pin_mut!(test_fut);
- futures::pin_mut!(bg);
-
- executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn share_prioritizes_backing_group() {
- sp_tracing::try_init_simple();
- let hash_a = Hash::repeat_byte(1);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
- c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
- c
- };
-
- let peer_a = PeerId::random(); // Alice
- let peer_b = PeerId::random(); // Bob
- let peer_c = PeerId::random(); // Charlie
- let peer_bad = PeerId::random(); // No validator
- let peer_other_group = PeerId::random(); //Ferdie
-
- let mut validators = vec![
- Sr25519Keyring::Alice.pair(),
- Sr25519Keyring::Bob.pair(),
- Sr25519Keyring::Charlie.pair(),
- // other group
- Sr25519Keyring::Dave.pair(),
- // We:
- Sr25519Keyring::Ferdie.pair(),
- ];
-
- // Strictly speaking we only need MIN_GOSSIP_PEERS - 3 to make sure only priority peers
- // will be served, but by using a larger value we test for overflow errors:
- let dummy_count = MIN_GOSSIP_PEERS;
-
- // We artificially inflate our group, so there won't be any free slots for other peers. (We
- // want to test that our group is prioritized):
- let dummy_pairs: Vec<_> =
- std::iter::repeat_with(|| Pair::generate().0).take(dummy_count).collect();
- let dummy_peers: Vec<_> =
- std::iter::repeat_with(|| PeerId::random()).take(dummy_count).collect();
-
- validators = validators.into_iter().chain(dummy_pairs.clone()).collect();
-
- let mut first_group = vec![0, 1, 2, 4];
- first_group.append(&mut (0..dummy_count as u32).map(|v| v + 5).collect());
- let session_info = make_session_info(validators, vec![first_group, vec![3]]);
-
- let session_index = 1;
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
- let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver();
-
- let bg = async move {
- let s = StatementDistributionSubsystem::new(
- make_ferdie_keystore(),
- statement_req_receiver,
- Default::default(),
- AlwaysZeroRng,
- );
- s.run(ctx).await.unwrap();
- };
-
- let test_fut = async move {
- // register our active heads.
- handle
- .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
- ActiveLeavesUpdate::start_work(ActivatedLeaf {
- hash: hash_a,
- number: 1,
- status: LeafStatus::Fresh,
- span: Arc::new(jaeger::Span::Disabled),
- }),
- )))
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
- )
- if r == hash_a
- => {
- let _ = tx.send(Ok(session_index));
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
- )
- if r == hash_a && sess_index == session_index
- => {
- let _ = tx.send(Ok(Some(session_info)));
- }
- );
-
- // notify of dummy peers and view
- for (peer, pair) in dummy_peers.clone().into_iter().zip(dummy_pairs) {
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer,
- ObservedRole::Full,
- 1,
- Some(HashSet::from([pair.public().into()])),
- ),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer, view![hash_a]),
- ),
- })
- .await;
- }
-
- // notify of peers and view
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_a.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_b.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_c.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_bad.clone(),
- ObservedRole::Full,
- 1,
- None,
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_other_group.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Dave.public().into()])),
- ),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_other_group.clone(), view![hash_a]),
- ),
- })
- .await;
-
- // receive a seconded statement from peer A, which does not provide the request data,
- // then get that data from peer C. It should be propagated onwards to peer B and to
- // candidate backing.
- let statement = {
- let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let ferdie_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Ferdie.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate.clone()),
- &signing_context,
- ValidatorIndex(4),
- &ferdie_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::Share(hash_a, statement.clone()),
- })
- .await;
-
- // Messages should go out:
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendValidationMessage(
- mut recipients,
- Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
- protocol_v1::StatementDistributionMessage::LargeStatement(meta)
- )),
- )
- ) => {
- gum::debug!(
- target: LOG_TARGET,
- ?recipients,
- "Recipients received"
- );
- recipients.sort();
- // We expect only our backing group to be the recipients, du to the inflated
- // test group above:
- let mut expected: Vec<_> = vec![peer_a, peer_b, peer_c].into_iter().chain(dummy_peers).collect();
- expected.sort();
- assert_eq!(recipients.len(), expected.len());
- assert_eq!(recipients, expected);
- assert_eq!(meta.relay_parent, hash_a);
- assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
- assert_eq!(meta.signed_by, statement.validator_index());
- assert_eq!(&meta.signature, statement.signature());
- }
- );
-
- // Now that it has the candidate it should answer requests accordingly:
-
- let (pending_response, response_rx) = oneshot::channel();
- let inner_req = StatementFetchingRequest {
- relay_parent: metadata.relay_parent,
- candidate_hash: metadata.candidate_hash,
- };
- let req = sc_network::config::IncomingRequest {
- peer: peer_b,
- payload: inner_req.encode(),
- pending_response,
- };
- req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
- let StatementFetchingResponse::Statement(committed) =
- Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
- assert_eq!(committed, candidate);
-
- handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
- };
-
- futures::pin_mut!(test_fut);
- futures::pin_mut!(bg);
-
- executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn peer_cant_flood_with_large_statements() {
- sp_tracing::try_init_simple();
- let hash_a = Hash::repeat_byte(1);
-
- let candidate = {
- let mut c = dummy_committed_candidate_receipt(dummy_hash());
- c.descriptor.relay_parent = hash_a;
- c.descriptor.para_id = 1.into();
- c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
- c
- };
-
- let peer_a = PeerId::random(); // Alice
-
- let validators = vec![
- Sr25519Keyring::Alice.pair(),
- Sr25519Keyring::Bob.pair(),
- Sr25519Keyring::Charlie.pair(),
- // other group
- Sr25519Keyring::Dave.pair(),
- // We:
- Sr25519Keyring::Ferdie.pair(),
- ];
-
- let first_group = vec![0, 1, 2, 4];
- let session_info = make_session_info(validators, vec![first_group, vec![3]]);
-
- let session_index = 1;
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
- let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
- let bg = async move {
- let s = StatementDistributionSubsystem::new(
- make_ferdie_keystore(),
- statement_req_receiver,
- Default::default(),
- AlwaysZeroRng,
- );
- s.run(ctx).await.unwrap();
- };
-
- let test_fut = async move {
- // register our active heads.
- handle
- .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
- ActiveLeavesUpdate::start_work(ActivatedLeaf {
- hash: hash_a,
- number: 1,
- status: LeafStatus::Fresh,
- span: Arc::new(jaeger::Span::Disabled),
- }),
- )))
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
- )
- if r == hash_a
- => {
- let _ = tx.send(Ok(session_index));
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
- )
- if r == hash_a && sess_index == session_index
- => {
- let _ = tx.send(Ok(Some(session_info)));
- }
- );
-
- // notify of peers and view
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer_a.clone(),
- ObservedRole::Full,
- 1,
- Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
- ),
- ),
- })
- .await;
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
- ),
- })
- .await;
-
- // receive a seconded statement from peer A.
- let statement = {
- let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
- for _ in 0..MAX_LARGE_STATEMENTS_PER_SENDER + 1 {
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_a.clone(),
- Versioned::V1(
- protocol_v1::StatementDistributionMessage::LargeStatement(
- metadata.clone(),
- ),
- ),
- ),
- ),
- })
- .await;
- }
-
- // We should try to fetch the data and punish the peer (but we don't know what comes
- // first):
- let mut requested = false;
- let mut punished = false;
- for _ in 0..2 {
- match handle.recv().await {
- AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
- mut reqs,
- IfDisconnected::ImmediateError,
- )) => {
- let reqs = reqs.pop().unwrap();
- let outgoing = match reqs {
- Requests::StatementFetchingV1(outgoing) => outgoing,
- _ => panic!("Unexpected request"),
- };
- let req = outgoing.payload;
- assert_eq!(req.relay_parent, metadata.relay_parent);
- assert_eq!(req.candidate_hash, metadata.candidate_hash);
- assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
- // Just drop request - should trigger error.
- requested = true;
- },
-
- AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(p, r))
- if p == peer_a && r == COST_APPARENT_FLOOD =>
- {
- punished = true;
- },
-
- m => panic!("Unexpected message: {:?}", m),
- }
- }
- assert!(requested, "large data has not been requested.");
- assert!(punished, "Peer should have been punished for flooding.");
-
- handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
- };
-
- futures::pin_mut!(test_fut);
- futures::pin_mut!(bg);
-
- executor::block_on(future::join(test_fut, bg));
-}
-
-// This test addresses an issue when received knowledge is not updated on a
-// subsequent `Seconded` statements
-// See https://github.com/paritytech/polkadot/pull/5177
-#[test]
-fn handle_multiple_seconded_statements() {
- let relay_parent_hash = Hash::repeat_byte(1);
-
- let candidate = dummy_committed_candidate_receipt(relay_parent_hash);
- let candidate_hash = candidate.hash();
-
- // We want to ensure that our peers are not lucky
- let mut all_peers: Vec = Vec::with_capacity(MIN_GOSSIP_PEERS + 4);
- let peer_a = PeerId::random();
- let peer_b = PeerId::random();
- assert_ne!(peer_a, peer_b);
-
- for _ in 0..MIN_GOSSIP_PEERS + 2 {
- all_peers.push(PeerId::random());
- }
- all_peers.push(peer_a.clone());
- all_peers.push(peer_b.clone());
-
- let mut lucky_peers = all_peers.clone();
- util::choose_random_subset_with_rng(
- |_| false,
- &mut lucky_peers,
- &mut AlwaysZeroRng,
- MIN_GOSSIP_PEERS,
- );
- lucky_peers.sort();
- assert_eq!(lucky_peers.len(), MIN_GOSSIP_PEERS);
- assert!(!lucky_peers.contains(&peer_a));
- assert!(!lucky_peers.contains(&peer_b));
-
- let validators = vec![
- Sr25519Keyring::Alice.pair(),
- Sr25519Keyring::Bob.pair(),
- Sr25519Keyring::Charlie.pair(),
- ];
-
- let session_info = make_session_info(validators, vec![]);
-
- let session_index = 1;
-
- let pool = sp_core::testing::TaskExecutor::new();
- let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
- let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
-
- let virtual_overseer_fut = async move {
- let s = StatementDistributionSubsystem::new(
- Arc::new(LocalKeystore::in_memory()),
- statement_req_receiver,
- Default::default(),
- AlwaysZeroRng,
- );
- s.run(ctx).await.unwrap();
- };
-
- let test_fut = async move {
- // register our active heads.
- handle
- .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
- ActiveLeavesUpdate::start_work(ActivatedLeaf {
- hash: relay_parent_hash,
- number: 1,
- status: LeafStatus::Fresh,
- span: Arc::new(jaeger::Span::Disabled),
- }),
- )))
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
- )
- if r == relay_parent_hash
- => {
- let _ = tx.send(Ok(session_index));
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::RuntimeApi(
- RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
- )
- if r == relay_parent_hash && sess_index == session_index
- => {
- let _ = tx.send(Ok(Some(session_info)));
- }
- );
-
- // notify of peers and view
- for peer in all_peers.iter() {
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerConnected(
- peer.clone(),
- ObservedRole::Full,
- 1,
- None,
- ),
- ),
- })
- .await;
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerViewChange(peer.clone(), view![relay_parent_hash]),
- ),
- })
- .await;
- }
-
- // Explicitly add all `lucky` peers to the gossip peers to ensure that neither `peerA` not `peerB`
- // receive statements
- let gossip_topology = {
- let mut t = network_bridge_event::NewGossipTopology {
- session: 1,
- our_neighbors_x: HashMap::new(),
- our_neighbors_y: HashMap::new(),
- };
-
- // Create a topology to ensure that we send messages not to `peer_a`/`peer_b`
- for (i, peer) in lucky_peers.iter().enumerate() {
- let authority_id = AuthorityPair::generate().0.public();
- t.our_neighbors_y.insert(
- authority_id,
- network_bridge_event::TopologyPeerInfo {
- peer_ids: vec![peer.clone()],
- validator_index: (i as u32 + 2_u32).into(),
- },
- );
- }
- t.our_neighbors_x.insert(
- AuthorityPair::generate().0.public(),
- network_bridge_event::TopologyPeerInfo {
- peer_ids: vec![peer_a.clone()],
- validator_index: 0_u32.into(),
- },
- );
- t.our_neighbors_x.insert(
- AuthorityPair::generate().0.public(),
- network_bridge_event::TopologyPeerInfo {
- peer_ids: vec![peer_b.clone()],
- validator_index: 1_u32.into(),
- },
- );
-
- t
- };
-
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::NewGossipTopology(gossip_topology),
- ),
- })
- .await;
-
- // receive a seconded statement from peer A. it should be propagated onwards to peer B and to
- // candidate backing.
- let statement = {
- let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Seconded(candidate.clone()),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- // `PeerA` sends a `Seconded` message
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_a.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
- relay_parent_hash,
- statement.clone().into(),
- )),
- ),
- ),
- })
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) => {
- assert_eq!(p, peer_a);
- assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST);
- }
- );
-
- // After the first valid statement, we expect messages to be circulated
- assert_matches!(
- handle.recv().await,
- AllMessages::CandidateBacking(
- CandidateBackingMessage::Statement(r, s)
- ) => {
- assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement);
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendValidationMessage(
- recipients,
- Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
- protocol_v1::StatementDistributionMessage::Statement(r, s)
- )),
- )
- ) => {
- assert!(!recipients.contains(&peer_b));
- assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement.clone().into());
- }
- );
-
- // `PeerB` sends a `Seconded` message: valid but known
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_b.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
- relay_parent_hash,
- statement.clone().into(),
- )),
- ),
- ),
- })
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) => {
- assert_eq!(p, peer_b);
- assert_eq!(r, BENEFIT_VALID_STATEMENT);
- }
- );
-
- // Create a `Valid` statement
- let statement = {
- let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
-
- let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
- let alice_public = CryptoStore::sr25519_generate_new(
- &*keystore,
- ValidatorId::ID,
- Some(&Sr25519Keyring::Alice.to_seed()),
- )
- .await
- .unwrap();
-
- SignedFullStatement::sign(
- &keystore,
- Statement::Valid(candidate_hash),
- &signing_context,
- ValidatorIndex(0),
- &alice_public.into(),
- )
- .await
- .ok()
- .flatten()
- .expect("should be signed")
- };
-
- // `PeerA` sends a `Valid` message
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_a.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
- relay_parent_hash,
- statement.clone().into(),
- )),
- ),
- ),
- })
- .await;
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) => {
- assert_eq!(p, peer_a);
- assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST);
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::CandidateBacking(
- CandidateBackingMessage::Statement(r, s)
- ) => {
- assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement);
- }
- );
-
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::SendValidationMessage(
- recipients,
- Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
- protocol_v1::StatementDistributionMessage::Statement(r, s)
- )),
- )
- ) => {
- assert!(!recipients.contains(&peer_b));
- assert_eq!(r, relay_parent_hash);
- assert_eq!(s, statement.clone().into());
- }
- );
-
- // `PeerB` sends a `Valid` message
- handle
- .send(FromOrchestra::Communication {
- msg: StatementDistributionMessage::NetworkBridgeUpdate(
- NetworkBridgeEvent::PeerMessage(
- peer_b.clone(),
- Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
- relay_parent_hash,
- statement.clone().into(),
- )),
- ),
- ),
- })
- .await;
-
- // We expect that this is still valid despite the fact that `PeerB` was not
- // the first when sending `Seconded`
- assert_matches!(
- handle.recv().await,
- AllMessages::NetworkBridge(
- NetworkBridgeMessage::ReportPeer(p, r)
- ) => {
- assert_eq!(p, peer_b);
- assert_eq!(r, BENEFIT_VALID_STATEMENT);
- }
- );
-
- handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
- };
-
- futures::pin_mut!(test_fut);
- futures::pin_mut!(virtual_overseer_fut);
-
- executor::block_on(future::join(test_fut, virtual_overseer_fut));
-}
-
-fn make_session_info(validators: Vec, groups: Vec>) -> SessionInfo {
- let validator_groups: Vec> = groups
- .iter()
- .map(|g| g.into_iter().map(|v| ValidatorIndex(*v)).collect())
- .collect();
-
- SessionInfo {
- discovery_keys: validators.iter().map(|k| k.public().into()).collect(),
- // Not used:
- n_cores: validator_groups.len() as u32,
- validator_groups,
- validators: validators.iter().map(|k| k.public().into()).collect(),
- // Not used values:
- assignment_keys: Vec::new(),
- zeroth_delay_tranche_width: 0,
- relay_vrf_modulo_samples: 0,
- n_delay_tranches: 0,
- no_show_slots: 0,
- needed_approvals: 0,
- active_validator_indices: Vec::new(),
- dispute_period: 6,
- random_seed: [0u8; 32],
- }
-}
-
-fn derive_metadata_assuming_seconded(
- hash: Hash,
- statement: UncheckedSignedFullStatement,
-) -> protocol_v1::StatementMetadata {
- protocol_v1::StatementMetadata {
- relay_parent: hash,
- candidate_hash: statement.unchecked_payload().candidate_hash(),
- signed_by: statement.unchecked_validator_index(),
- signature: statement.unchecked_signature().clone(),
- }
-}
+// TODO [now]: remove when unnecessary
+use sc_network as _;
+use sp_core as _;
+use sp_tracing as _;
+
+// TODO [now]:
+//
+// 1. Update tests to only be subsystem tests.
+// 2. Separate tests by pre-asynchronous-backing and post
+//
+// #[test]
+// fn active_head_accepts_only_2_seconded_per_validator() {
+// let validators = vec![
+// Sr25519Keyring::Alice.public().into(),
+// Sr25519Keyring::Bob.public().into(),
+// Sr25519Keyring::Charlie.public().into(),
+// ];
+// let parent_hash: Hash = [1; 32].into();
+
+// let session_index = 1;
+// let signing_context = SigningContext { parent_hash, session_index };
+
+// let candidate_a = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = parent_hash;
+// c.descriptor.para_id = 1.into();
+// c
+// };
+
+// let candidate_b = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = parent_hash;
+// c.descriptor.para_id = 2.into();
+// c
+// };
+
+// let candidate_c = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = parent_hash;
+// c.descriptor.para_id = 3.into();
+// c
+// };
+
+// let mut head_data = ActiveHeadData::new(
+// validators,
+// session_index,
+// PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"),
+// );
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = SyncCryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .unwrap();
+// let bob_public = SyncCryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Bob.to_seed()),
+// )
+// .unwrap();
+
+// // note A
+// let a_seconded_val_0 = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate_a.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(head_data
+// .check_useful_or_unknown(&a_seconded_val_0.clone().convert_payload().into())
+// .is_ok());
+// let noted = head_data.note_statement(a_seconded_val_0.clone());
+
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// // note A (duplicate)
+// assert_eq!(
+// head_data.check_useful_or_unknown(&a_seconded_val_0.clone().convert_payload().into()),
+// Err(DeniedStatement::UsefulButKnown),
+// );
+// let noted = head_data.note_statement(a_seconded_val_0);
+
+// assert_matches!(noted, NotedStatement::UsefulButKnown);
+
+// // note B
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate_b.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(head_data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = head_data.note_statement(statement);
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// // note C (beyond 2 - ignored)
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate_c.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert_eq!(
+// head_data.check_useful_or_unknown(&statement.clone().convert_payload().into()),
+// Err(DeniedStatement::NotUseful),
+// );
+// let noted = head_data.note_statement(statement);
+// assert_matches!(noted, NotedStatement::NotUseful);
+
+// // note B (new validator)
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate_b.clone()),
+// &signing_context,
+// ValidatorIndex(1),
+// &bob_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(head_data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = head_data.note_statement(statement);
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// // note C (new validator)
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate_c.clone()),
+// &signing_context,
+// ValidatorIndex(1),
+// &bob_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(head_data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = head_data.note_statement(statement);
+// assert_matches!(noted, NotedStatement::Fresh(_));
+// }
+
+// #[test]
+// fn note_local_works() {
+// let hash_a = CandidateHash([1; 32].into());
+// let hash_b = CandidateHash([2; 32].into());
+
+// let mut per_peer_tracker = VcPerPeerTracker::default();
+// per_peer_tracker.note_local(hash_a.clone());
+// per_peer_tracker.note_local(hash_b.clone());
+
+// assert!(per_peer_tracker.local_observed.contains(&hash_a));
+// assert!(per_peer_tracker.local_observed.contains(&hash_b));
+
+// assert!(!per_peer_tracker.remote_observed.contains(&hash_a));
+// assert!(!per_peer_tracker.remote_observed.contains(&hash_b));
+// }
+
+// #[test]
+// fn note_remote_works() {
+// let hash_a = CandidateHash([1; 32].into());
+// let hash_b = CandidateHash([2; 32].into());
+// let hash_c = CandidateHash([3; 32].into());
+
+// let mut per_peer_tracker = VcPerPeerTracker::default();
+// assert!(per_peer_tracker.note_remote(hash_a.clone()));
+// assert!(per_peer_tracker.note_remote(hash_b.clone()));
+// assert!(!per_peer_tracker.note_remote(hash_c.clone()));
+
+// assert!(per_peer_tracker.remote_observed.contains(&hash_a));
+// assert!(per_peer_tracker.remote_observed.contains(&hash_b));
+// assert!(!per_peer_tracker.remote_observed.contains(&hash_c));
+
+// assert!(!per_peer_tracker.local_observed.contains(&hash_a));
+// assert!(!per_peer_tracker.local_observed.contains(&hash_b));
+// assert!(!per_peer_tracker.local_observed.contains(&hash_c));
+// }
+
+// #[test]
+// fn per_peer_relay_parent_knowledge_send() {
+// let mut knowledge = PeerRelayParentKnowledge::default();
+
+// let hash_a = CandidateHash([1; 32].into());
+
+// // Sending an un-pinned statement should not work and should have no effect.
+// assert!(!knowledge.can_send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))));
+// assert!(!knowledge.is_known_candidate(&hash_a));
+// assert!(knowledge.sent_statements.is_empty());
+// assert!(knowledge.received_statements.is_empty());
+// assert!(knowledge.seconded_counts.is_empty());
+// assert!(knowledge.received_message_count.is_empty());
+
+// // Make the peer aware of the candidate.
+// assert_eq!(knowledge.send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0))), true);
+// assert_eq!(knowledge.send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(1))), false);
+// assert!(knowledge.is_known_candidate(&hash_a));
+// assert_eq!(knowledge.sent_statements.len(), 2);
+// assert!(knowledge.received_statements.is_empty());
+// assert_eq!(knowledge.seconded_counts.len(), 2);
+// assert!(knowledge.received_message_count.get(&hash_a).is_none());
+
+// // And now it should accept the dependent message.
+// assert_eq!(knowledge.send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))), false);
+// assert!(knowledge.is_known_candidate(&hash_a));
+// assert_eq!(knowledge.sent_statements.len(), 3);
+// assert!(knowledge.received_statements.is_empty());
+// assert_eq!(knowledge.seconded_counts.len(), 2);
+// assert!(knowledge.received_message_count.get(&hash_a).is_none());
+// }
+
+// #[test]
+// fn cant_send_after_receiving() {
+// let mut knowledge = PeerRelayParentKnowledge::default();
+
+// let hash_a = CandidateHash([1; 32].into());
+// assert!(knowledge
+// .check_can_receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
+// .is_ok());
+// assert!(knowledge
+// .receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
+// .unwrap());
+// assert!(!knowledge.can_send(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0))));
+// }
+
+// #[test]
+// fn per_peer_relay_parent_knowledge_receive() {
+// let mut knowledge = PeerRelayParentKnowledge::default();
+
+// let hash_a = CandidateHash([1; 32].into());
+
+// assert_eq!(
+// knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(0)), 3),
+// Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE),
+// );
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(0)), 3),
+// Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE),
+// );
+
+// assert!(knowledge
+// .check_can_receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3)
+// .is_ok());
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Seconded(hash_a), ValidatorIndex(0)), 3),
+// Ok(true),
+// );
+
+// // Push statements up to the flood limit.
+// assert!(knowledge
+// .check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(1)), 3)
+// .is_ok());
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(1)), 3),
+// Ok(false),
+// );
+
+// assert!(knowledge.is_known_candidate(&hash_a));
+// assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 2);
+
+// assert!(knowledge
+// .check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3)
+// .is_ok());
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
+// Ok(false),
+// );
+
+// assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3);
+
+// assert_eq!(
+// knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(7)), 3),
+// Err(COST_APPARENT_FLOOD),
+// );
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(7)), 3),
+// Err(COST_APPARENT_FLOOD),
+// );
+
+// assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3);
+// assert_eq!(knowledge.received_statements.len(), 3); // number of prior `Ok`s.
+
+// // Now make sure that the seconding limit is respected.
+// let hash_b = CandidateHash([2; 32].into());
+// let hash_c = CandidateHash([3; 32].into());
+
+// assert!(knowledge
+// .check_can_receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3)
+// .is_ok());
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
+// Ok(true),
+// );
+
+// assert_eq!(
+// knowledge.check_can_receive(&(CompactStatement::Seconded(hash_c), ValidatorIndex(0)), 3),
+// Err(COST_UNEXPECTED_STATEMENT_REMOTE),
+// );
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Seconded(hash_c), ValidatorIndex(0)), 3),
+// Err(COST_UNEXPECTED_STATEMENT_REMOTE),
+// );
+
+// // Last, make sure that already-known statements are disregarded.
+// assert_eq!(
+// knowledge.check_can_receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
+// Err(COST_DUPLICATE_STATEMENT),
+// );
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
+// Err(COST_DUPLICATE_STATEMENT),
+// );
+
+// assert_eq!(
+// knowledge.check_can_receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
+// Err(COST_DUPLICATE_STATEMENT),
+// );
+// assert_eq!(
+// knowledge.receive(&(CompactStatement::Seconded(hash_b), ValidatorIndex(0)), 3),
+// Err(COST_DUPLICATE_STATEMENT),
+// );
+// }
+
+// #[test]
+// fn peer_view_update_sends_messages() {
+// let hash_a = Hash::repeat_byte(1);
+// let hash_b = Hash::repeat_byte(2);
+// let hash_c = Hash::repeat_byte(3);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_c;
+// c.descriptor.para_id = 1.into();
+// c
+// };
+// let candidate_hash = candidate.hash();
+
+// let old_view = view![hash_a, hash_b];
+// let new_view = view![hash_b, hash_c];
+
+// let mut active_heads = HashMap::new();
+// let validators = vec![
+// Sr25519Keyring::Alice.public().into(),
+// Sr25519Keyring::Bob.public().into(),
+// Sr25519Keyring::Charlie.public().into(),
+// ];
+
+// let session_index = 1;
+// let signing_context = SigningContext { parent_hash: hash_c, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+
+// let alice_public = SyncCryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .unwrap();
+// let bob_public = SyncCryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Bob.to_seed()),
+// )
+// .unwrap();
+// let charlie_public = SyncCryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Charlie.to_seed()),
+// )
+// .unwrap();
+
+// let new_head_data = {
+// let mut data = ActiveHeadData::new(
+// validators,
+// session_index,
+// PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"),
+// );
+
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = data.note_statement(statement);
+
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Valid(candidate_hash),
+// &signing_context,
+// ValidatorIndex(1),
+// &bob_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = data.note_statement(statement);
+
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// let statement = block_on(SignedFullStatement::sign(
+// &keystore,
+// Statement::Valid(candidate_hash),
+// &signing_context,
+// ValidatorIndex(2),
+// &charlie_public.into(),
+// ))
+// .ok()
+// .flatten()
+// .expect("should be signed");
+// assert!(data
+// .check_useful_or_unknown(&statement.clone().convert_payload().into())
+// .is_ok());
+// let noted = data.note_statement(statement);
+// assert_matches!(noted, NotedStatement::Fresh(_));
+
+// data
+// };
+
+// active_heads.insert(hash_c, new_head_data);
+
+// let mut peer_data = PeerData {
+// view: old_view,
+// view_knowledge: {
+// let mut k = HashMap::new();
+
+// k.insert(hash_a, Default::default());
+// k.insert(hash_b, Default::default());
+
+// k
+// },
+// maybe_authority: None,
+// };
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
+// StatementDistributionMessage,
+// _,
+// >(pool);
+// let peer = PeerId::random();
+
+// executor::block_on(async move {
+// let mut topology: SessionGridTopology = Default::default();
+// topology.peers_x = HashSet::from_iter(vec![peer.clone()].into_iter());
+// update_peer_view_and_maybe_send_unlocked(
+// peer.clone(),
+// &topology,
+// &mut peer_data,
+// &mut ctx,
+// &active_heads,
+// new_view.clone(),
+// &Default::default(),
+// &mut AlwaysZeroRng,
+// )
+// .await;
+
+// assert_eq!(peer_data.view, new_view);
+// assert!(!peer_data.view_knowledge.contains_key(&hash_a));
+// assert!(peer_data.view_knowledge.contains_key(&hash_b));
+
+// let c_knowledge = peer_data.view_knowledge.get(&hash_c).unwrap();
+
+// assert!(c_knowledge.is_known_candidate(&candidate_hash));
+// assert!(c_knowledge
+// .sent_statements
+// .contains(&(CompactStatement::Seconded(candidate_hash), ValidatorIndex(0))));
+// assert!(c_knowledge
+// .sent_statements
+// .contains(&(CompactStatement::Valid(candidate_hash), ValidatorIndex(1))));
+// assert!(c_knowledge
+// .sent_statements
+// .contains(&(CompactStatement::Valid(candidate_hash), ValidatorIndex(2))));
+
+// // now see if we got the 3 messages from the active head data.
+// let active_head = active_heads.get(&hash_c).unwrap();
+
+// // semi-fragile because hashmap iterator ordering is undefined, but in practice
+// // it will not change between runs of the program.
+// for statement in active_head.statements_about(candidate_hash) {
+// let message = handle.recv().await;
+// let expected_to = vec![peer.clone()];
+// let expected_payload =
+// statement_message(hash_c, statement.statement.clone(), &Metrics::default());
+
+// assert_matches!(
+// message,
+// AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
+// to,
+// payload,
+// )) => {
+// assert_eq!(to, expected_to);
+// assert_eq!(payload, expected_payload)
+// }
+// )
+// }
+// });
+// }
+
+// #[test]
+// fn circulated_statement_goes_to_all_peers_with_view() {
+// let hash_a = Hash::repeat_byte(1);
+// let hash_b = Hash::repeat_byte(2);
+// let hash_c = Hash::repeat_byte(3);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_b;
+// c.descriptor.para_id = 1.into();
+// c
+// };
+
+// let peer_a = PeerId::random();
+// let peer_b = PeerId::random();
+// let peer_c = PeerId::random();
+
+// let peer_a_view = view![hash_a];
+// let peer_b_view = view![hash_a, hash_b];
+// let peer_c_view = view![hash_b, hash_c];
+
+// let session_index = 1;
+
+// let peer_data_from_view = |view: View| PeerData {
+// view: view.clone(),
+// view_knowledge: view.iter().map(|v| (v.clone(), Default::default())).collect(),
+// maybe_authority: None,
+// };
+
+// let mut peer_data: HashMap<_, _> = vec![
+// (peer_a.clone(), peer_data_from_view(peer_a_view)),
+// (peer_b.clone(), peer_data_from_view(peer_b_view)),
+// (peer_c.clone(), peer_data_from_view(peer_c_view)),
+// ]
+// .into_iter()
+// .collect();
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
+// StatementDistributionMessage,
+// _,
+// >(pool);
+
+// executor::block_on(async move {
+// let signing_context = SigningContext { parent_hash: hash_b, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// let statement = SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed");
+
+// let comparator = StoredStatementComparator {
+// compact: statement.payload().to_compact(),
+// validator_index: ValidatorIndex(0),
+// signature: statement.signature().clone(),
+// };
+// let statement = StoredStatement { comparator: &comparator, statement: &statement };
+
+// let mut topology: SessionGridTopology = Default::default();
+// topology.peers_x =
+// HashSet::from_iter(vec![peer_a.clone(), peer_b.clone(), peer_c.clone()].into_iter());
+// let needs_dependents = circulate_statement(
+// RequiredRouting::GridXY,
+// &topology,
+// &mut peer_data,
+// &mut ctx,
+// hash_b,
+// statement,
+// Vec::new(),
+// &Metrics::default(),
+// &mut AlwaysZeroRng,
+// )
+// .await;
+
+// {
+// assert_eq!(needs_dependents.len(), 2);
+// assert!(needs_dependents.contains(&peer_b));
+// assert!(needs_dependents.contains(&peer_c));
+// }
+
+// let fingerprint = (statement.compact().clone(), ValidatorIndex(0));
+
+// assert!(peer_data
+// .get(&peer_b)
+// .unwrap()
+// .view_knowledge
+// .get(&hash_b)
+// .unwrap()
+// .sent_statements
+// .contains(&fingerprint));
+
+// assert!(peer_data
+// .get(&peer_c)
+// .unwrap()
+// .view_knowledge
+// .get(&hash_b)
+// .unwrap()
+// .sent_statements
+// .contains(&fingerprint));
+
+// let message = handle.recv().await;
+// assert_matches!(
+// message,
+// AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
+// to,
+// payload,
+// )) => {
+// assert_eq!(to.len(), 2);
+// assert!(to.contains(&peer_b));
+// assert!(to.contains(&peer_c));
+
+// assert_eq!(
+// payload,
+// statement_message(hash_b, statement.statement.clone(), &Metrics::default()),
+// );
+// }
+// )
+// });
+// }
+
+// #[test]
+// fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
+// let hash_a = Hash::repeat_byte(1);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_a;
+// c.descriptor.para_id = 1.into();
+// c
+// };
+
+// let peer_a = PeerId::random();
+// let peer_b = PeerId::random();
+
+// let validators = vec![
+// Sr25519Keyring::Alice.pair(),
+// Sr25519Keyring::Bob.pair(),
+// Sr25519Keyring::Charlie.pair(),
+// ];
+
+// let session_info = make_session_info(validators, vec![]);
+
+// let session_index = 1;
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
+
+// let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
+
+// let bg = async move {
+// let s = StatementDistributionSubsystem::new(
+// Arc::new(LocalKeystore::in_memory()),
+// statement_req_receiver,
+// Default::default(),
+// AlwaysZeroRng,
+// );
+// s.run(ctx).await.unwrap();
+// };
+
+// let test_fut = async move {
+// // register our active heads.
+// handle
+// .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
+// ActiveLeavesUpdate::start_work(ActivatedLeaf {
+// hash: hash_a,
+// number: 1,
+// status: LeafStatus::Fresh,
+// span: Arc::new(jaeger::Span::Disabled),
+// }),
+// )))
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
+// )
+// if r == hash_a
+// => {
+// let _ = tx.send(Ok(session_index));
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
+// )
+// if r == hash_a && sess_index == session_index
+// => {
+// let _ = tx.send(Ok(Some(session_info)));
+// }
+// );
+
+// // notify of peers and view
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(peer_a.clone(), ObservedRole::Full, 1, None),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full, 1, None),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// // receive a seconded statement from peer A. it should be propagated onwards to peer B and to
+// // candidate backing.
+// let statement = {
+// let signing_context = SigningContext { parent_hash: hash_a, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_a.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
+// hash_a,
+// statement.clone().into(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {}
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::CandidateBacking(
+// CandidateBackingMessage::Statement(r, s)
+// ) if r == hash_a && s == statement => {}
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendValidationMessage(
+// recipients,
+// Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
+// protocol_v1::StatementDistributionMessage::Statement(r, s)
+// )),
+// )
+// ) => {
+// assert_eq!(recipients, vec![peer_b.clone()]);
+// assert_eq!(r, hash_a);
+// assert_eq!(s, statement.into());
+// }
+// );
+// handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+// };
+
+// futures::pin_mut!(test_fut);
+// futures::pin_mut!(bg);
+
+// executor::block_on(future::join(test_fut, bg));
+// }
+
+// #[test]
+// fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() {
+// sp_tracing::try_init_simple();
+// let hash_a = Hash::repeat_byte(1);
+// let hash_b = Hash::repeat_byte(2);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_a;
+// c.descriptor.para_id = 1.into();
+// c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
+// c
+// };
+
+// let peer_a = PeerId::random(); // Alice
+// let peer_b = PeerId::random(); // Bob
+// let peer_c = PeerId::random(); // Charlie
+// let peer_bad = PeerId::random(); // No validator
+
+// let validators = vec![
+// Sr25519Keyring::Alice.pair(),
+// Sr25519Keyring::Bob.pair(),
+// Sr25519Keyring::Charlie.pair(),
+// // We:
+// Sr25519Keyring::Ferdie.pair(),
+// ];
+
+// let session_info = make_session_info(validators, vec![vec![0, 1, 2, 4], vec![3]]);
+
+// let session_index = 1;
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
+
+// let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver();
+
+// let bg = async move {
+// let s = StatementDistributionSubsystem::new(
+// make_ferdie_keystore(),
+// statement_req_receiver,
+// Default::default(),
+// AlwaysZeroRng,
+// );
+// s.run(ctx).await.unwrap();
+// };
+
+// let test_fut = async move {
+// // register our active heads.
+// handle
+// .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
+// ActiveLeavesUpdate::start_work(ActivatedLeaf {
+// hash: hash_a,
+// number: 1,
+// status: LeafStatus::Fresh,
+// span: Arc::new(jaeger::Span::Disabled),
+// }),
+// )))
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
+// )
+// if r == hash_a
+// => {
+// let _ = tx.send(Ok(session_index));
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
+// )
+// if r == hash_a && sess_index == session_index
+// => {
+// let _ = tx.send(Ok(Some(session_info)));
+// }
+// );
+
+// // notify of peers and view
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_a.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
+// ),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_b.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_c.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_bad.clone(),
+// ObservedRole::Full,
+// 1,
+// None,
+// ),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// // receive a seconded statement from peer A, which does not provide the request data,
+// // then get that data from peer C. It should be propagated onwards to peer B and to
+// // candidate backing.
+// let statement = {
+// let signing_context = SigningContext { parent_hash: hash_a, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_a.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
+// metadata.clone(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
+// // Just drop request - should trigger error.
+// }
+// );
+
+// // There is a race between request handler asking for more peers and processing of the
+// // coming `PeerMessage`s, we want the request handler to ask first here for better test
+// // coverage:
+// Delay::new(Duration::from_millis(20)).await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_c.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
+// metadata.clone(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// // Malicious peer:
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_bad.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
+// metadata.clone(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// // Let c fail once too:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
+// }
+// );
+
+// // a fails again:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// // On retry, we should have reverse order:
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
+// }
+// );
+
+// // Send invalid response (all other peers have been tried now):
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_bad));
+// let bad_candidate = {
+// let mut bad = candidate.clone();
+// bad.descriptor.para_id = 0xeadbeaf.into();
+// bad
+// };
+// let response = StatementFetchingResponse::Statement(bad_candidate);
+// outgoing.pending_response.send(Ok(response.encode())).unwrap();
+// }
+// );
+
+// // Should get punished and never tried again:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) if p == peer_bad && r == COST_WRONG_HASH => {}
+// );
+
+// // a is tried again (retried in reverse order):
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// // On retry, we should have reverse order:
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
+// }
+// );
+
+// // c succeeds now:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendRequests(
+// mut reqs, IfDisconnected::ImmediateError
+// )
+// ) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// // On retry, we should have reverse order:
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
+// let response = StatementFetchingResponse::Statement(candidate.clone());
+// outgoing.pending_response.send(Ok(response.encode())).unwrap();
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) if p == peer_a && r == COST_FETCH_FAIL => {}
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) if p == peer_c && r == BENEFIT_VALID_RESPONSE => {}
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {}
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::CandidateBacking(
+// CandidateBackingMessage::Statement(r, s)
+// ) if r == hash_a && s == statement => {}
+// );
+
+// // Now messages should go out:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendValidationMessage(
+// mut recipients,
+// Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
+// protocol_v1::StatementDistributionMessage::LargeStatement(meta)
+// )),
+// )
+// ) => {
+// gum::debug!(
+// target: LOG_TARGET,
+// ?recipients,
+// "Recipients received"
+// );
+// recipients.sort();
+// let mut expected = vec![peer_b, peer_c, peer_bad];
+// expected.sort();
+// assert_eq!(recipients, expected);
+// assert_eq!(meta.relay_parent, hash_a);
+// assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
+// assert_eq!(meta.signed_by, statement.validator_index());
+// assert_eq!(&meta.signature, statement.signature());
+// }
+// );
+
+// // Now that it has the candidate it should answer requests accordingly (even after a
+// // failed request):
+
+// // Failing request first (wrong relay parent hash):
+// let (pending_response, response_rx) = oneshot::channel();
+// let inner_req = StatementFetchingRequest {
+// relay_parent: hash_b,
+// candidate_hash: metadata.candidate_hash,
+// };
+// let req = sc_network::config::IncomingRequest {
+// peer: peer_b,
+// payload: inner_req.encode(),
+// pending_response,
+// };
+// req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
+// assert_matches!(
+// response_rx.await.unwrap().result,
+// Err(()) => {}
+// );
+
+// // Another failing request (peer_a never received a statement from us, so it is not
+// // allowed to request the data):
+// let (pending_response, response_rx) = oneshot::channel();
+// let inner_req = StatementFetchingRequest {
+// relay_parent: metadata.relay_parent,
+// candidate_hash: metadata.candidate_hash,
+// };
+// let req = sc_network::config::IncomingRequest {
+// peer: peer_a,
+// payload: inner_req.encode(),
+// pending_response,
+// };
+// req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
+// assert_matches!(
+// response_rx.await.unwrap().result,
+// Err(()) => {}
+// );
+
+// // And now the succeding request from peer_b:
+// let (pending_response, response_rx) = oneshot::channel();
+// let inner_req = StatementFetchingRequest {
+// relay_parent: metadata.relay_parent,
+// candidate_hash: metadata.candidate_hash,
+// };
+// let req = sc_network::config::IncomingRequest {
+// peer: peer_b,
+// payload: inner_req.encode(),
+// pending_response,
+// };
+// req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
+// let StatementFetchingResponse::Statement(committed) =
+// Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
+// assert_eq!(committed, candidate);
+
+// handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+// };
+
+// futures::pin_mut!(test_fut);
+// futures::pin_mut!(bg);
+
+// executor::block_on(future::join(test_fut, bg));
+// }
+
+// #[test]
+// fn share_prioritizes_backing_group() {
+// sp_tracing::try_init_simple();
+// let hash_a = Hash::repeat_byte(1);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_a;
+// c.descriptor.para_id = 1.into();
+// c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
+// c
+// };
+
+// let peer_a = PeerId::random(); // Alice
+// let peer_b = PeerId::random(); // Bob
+// let peer_c = PeerId::random(); // Charlie
+// let peer_bad = PeerId::random(); // No validator
+// let peer_other_group = PeerId::random(); //Ferdie
+
+// let mut validators = vec![
+// Sr25519Keyring::Alice.pair(),
+// Sr25519Keyring::Bob.pair(),
+// Sr25519Keyring::Charlie.pair(),
+// // other group
+// Sr25519Keyring::Dave.pair(),
+// // We:
+// Sr25519Keyring::Ferdie.pair(),
+// ];
+
+// // Strictly speaking we only need MIN_GOSSIP_PEERS - 3 to make sure only priority peers
+// // will be served, but by using a larger value we test for overflow errors:
+// let dummy_count = MIN_GOSSIP_PEERS;
+
+// // We artificially inflate our group, so there won't be any free slots for other peers. (We
+// // want to test that our group is prioritized):
+// let dummy_pairs: Vec<_> =
+// std::iter::repeat_with(|| Pair::generate().0).take(dummy_count).collect();
+// let dummy_peers: Vec<_> =
+// std::iter::repeat_with(|| PeerId::random()).take(dummy_count).collect();
+
+// validators = validators.into_iter().chain(dummy_pairs.clone()).collect();
+
+// let mut first_group = vec![0, 1, 2, 4];
+// first_group.append(&mut (0..dummy_count as u32).map(|v| v + 5).collect());
+// let session_info = make_session_info(validators, vec![first_group, vec![3]]);
+
+// let session_index = 1;
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
+
+// let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver();
+
+// let bg = async move {
+// let s = StatementDistributionSubsystem::new(
+// make_ferdie_keystore(),
+// statement_req_receiver,
+// Default::default(),
+// AlwaysZeroRng,
+// );
+// s.run(ctx).await.unwrap();
+// };
+
+// let test_fut = async move {
+// // register our active heads.
+// handle
+// .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
+// ActiveLeavesUpdate::start_work(ActivatedLeaf {
+// hash: hash_a,
+// number: 1,
+// status: LeafStatus::Fresh,
+// span: Arc::new(jaeger::Span::Disabled),
+// }),
+// )))
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
+// )
+// if r == hash_a
+// => {
+// let _ = tx.send(Ok(session_index));
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
+// )
+// if r == hash_a && sess_index == session_index
+// => {
+// let _ = tx.send(Ok(Some(session_info)));
+// }
+// );
+
+// // notify of dummy peers and view
+// for (peer, pair) in dummy_peers.clone().into_iter().zip(dummy_pairs) {
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer,
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([pair.public().into()])),
+// ),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer, view![hash_a]),
+// ),
+// })
+// .await;
+// }
+
+// // notify of peers and view
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_a.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_b.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_c.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_bad.clone(),
+// ObservedRole::Full,
+// 1,
+// None,
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_other_group.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Dave.public().into()])),
+// ),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_other_group.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// // receive a seconded statement from peer A, which does not provide the request data,
+// // then get that data from peer C. It should be propagated onwards to peer B and to
+// // candidate backing.
+// let statement = {
+// let signing_context = SigningContext { parent_hash: hash_a, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let ferdie_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Ferdie.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate.clone()),
+// &signing_context,
+// ValidatorIndex(4),
+// &ferdie_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::Share(hash_a, statement.clone()),
+// })
+// .await;
+
+// // Messages should go out:
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendValidationMessage(
+// mut recipients,
+// Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
+// protocol_v1::StatementDistributionMessage::LargeStatement(meta)
+// )),
+// )
+// ) => {
+// gum::debug!(
+// target: LOG_TARGET,
+// ?recipients,
+// "Recipients received"
+// );
+// recipients.sort();
+// // We expect only our backing group to be the recipients, du to the inflated
+// // test group above:
+// let mut expected: Vec<_> = vec![peer_a, peer_b, peer_c].into_iter().chain(dummy_peers).collect();
+// expected.sort();
+// assert_eq!(recipients.len(), expected.len());
+// assert_eq!(recipients, expected);
+// assert_eq!(meta.relay_parent, hash_a);
+// assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
+// assert_eq!(meta.signed_by, statement.validator_index());
+// assert_eq!(&meta.signature, statement.signature());
+// }
+// );
+
+// // Now that it has the candidate it should answer requests accordingly:
+
+// let (pending_response, response_rx) = oneshot::channel();
+// let inner_req = StatementFetchingRequest {
+// relay_parent: metadata.relay_parent,
+// candidate_hash: metadata.candidate_hash,
+// };
+// let req = sc_network::config::IncomingRequest {
+// peer: peer_b,
+// payload: inner_req.encode(),
+// pending_response,
+// };
+// req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
+// let StatementFetchingResponse::Statement(committed) =
+// Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
+// assert_eq!(committed, candidate);
+
+// handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+// };
+
+// futures::pin_mut!(test_fut);
+// futures::pin_mut!(bg);
+
+// executor::block_on(future::join(test_fut, bg));
+// }
+
+// #[test]
+// fn peer_cant_flood_with_large_statements() {
+// sp_tracing::try_init_simple();
+// let hash_a = Hash::repeat_byte(1);
+
+// let candidate = {
+// let mut c = dummy_committed_candidate_receipt(dummy_hash());
+// c.descriptor.relay_parent = hash_a;
+// c.descriptor.para_id = 1.into();
+// c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
+// c
+// };
+
+// let peer_a = PeerId::random(); // Alice
+
+// let validators = vec![
+// Sr25519Keyring::Alice.pair(),
+// Sr25519Keyring::Bob.pair(),
+// Sr25519Keyring::Charlie.pair(),
+// // other group
+// Sr25519Keyring::Dave.pair(),
+// // We:
+// Sr25519Keyring::Ferdie.pair(),
+// ];
+
+// let first_group = vec![0, 1, 2, 4];
+// let session_info = make_session_info(validators, vec![first_group, vec![3]]);
+
+// let session_index = 1;
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
+
+// let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
+// let bg = async move {
+// let s = StatementDistributionSubsystem::new(
+// make_ferdie_keystore(),
+// statement_req_receiver,
+// Default::default(),
+// AlwaysZeroRng,
+// );
+// s.run(ctx).await.unwrap();
+// };
+
+// let test_fut = async move {
+// // register our active heads.
+// handle
+// .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
+// ActiveLeavesUpdate::start_work(ActivatedLeaf {
+// hash: hash_a,
+// number: 1,
+// status: LeafStatus::Fresh,
+// span: Arc::new(jaeger::Span::Disabled),
+// }),
+// )))
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
+// )
+// if r == hash_a
+// => {
+// let _ = tx.send(Ok(session_index));
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
+// )
+// if r == hash_a && sess_index == session_index
+// => {
+// let _ = tx.send(Ok(Some(session_info)));
+// }
+// );
+
+// // notify of peers and view
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer_a.clone(),
+// ObservedRole::Full,
+// 1,
+// Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
+// ),
+// ),
+// })
+// .await;
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]),
+// ),
+// })
+// .await;
+
+// // receive a seconded statement from peer A.
+// let statement = {
+// let signing_context = SigningContext { parent_hash: hash_a, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
+
+// for _ in 0..MAX_LARGE_STATEMENTS_PER_SENDER + 1 {
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_a.clone(),
+// Versioned::V1(
+// protocol_v1::StatementDistributionMessage::LargeStatement(
+// metadata.clone(),
+// ),
+// ),
+// ),
+// ),
+// })
+// .await;
+// }
+
+// // We should try to fetch the data and punish the peer (but we don't know what comes
+// // first):
+// let mut requested = false;
+// let mut punished = false;
+// for _ in 0..2 {
+// match handle.recv().await {
+// AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
+// mut reqs,
+// IfDisconnected::ImmediateError,
+// )) => {
+// let reqs = reqs.pop().unwrap();
+// let outgoing = match reqs {
+// Requests::StatementFetchingV1(outgoing) => outgoing,
+// _ => panic!("Unexpected request"),
+// };
+// let req = outgoing.payload;
+// assert_eq!(req.relay_parent, metadata.relay_parent);
+// assert_eq!(req.candidate_hash, metadata.candidate_hash);
+// assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
+// // Just drop request - should trigger error.
+// requested = true;
+// },
+
+// AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(p, r))
+// if p == peer_a && r == COST_APPARENT_FLOOD =>
+// {
+// punished = true;
+// },
+
+// m => panic!("Unexpected message: {:?}", m),
+// }
+// }
+// assert!(requested, "large data has not been requested.");
+// assert!(punished, "Peer should have been punished for flooding.");
+
+// handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+// };
+
+// futures::pin_mut!(test_fut);
+// futures::pin_mut!(bg);
+
+// executor::block_on(future::join(test_fut, bg));
+// }
+
+// // This test addresses an issue when received knowledge is not updated on a
+// // subsequent `Seconded` statements
+// // See https://github.com/paritytech/polkadot/pull/5177
+// #[test]
+// fn handle_multiple_seconded_statements() {
+// let relay_parent_hash = Hash::repeat_byte(1);
+
+// let candidate = dummy_committed_candidate_receipt(relay_parent_hash);
+// let candidate_hash = candidate.hash();
+
+// // We want to ensure that our peers are not lucky
+// let mut all_peers: Vec = Vec::with_capacity(MIN_GOSSIP_PEERS + 4);
+// let peer_a = PeerId::random();
+// let peer_b = PeerId::random();
+// assert_ne!(peer_a, peer_b);
+
+// for _ in 0..MIN_GOSSIP_PEERS + 2 {
+// all_peers.push(PeerId::random());
+// }
+// all_peers.push(peer_a.clone());
+// all_peers.push(peer_b.clone());
+
+// let mut lucky_peers = all_peers.clone();
+// util::choose_random_subset_with_rng(
+// |_| false,
+// &mut lucky_peers,
+// &mut AlwaysZeroRng,
+// MIN_GOSSIP_PEERS,
+// );
+// lucky_peers.sort();
+// assert_eq!(lucky_peers.len(), MIN_GOSSIP_PEERS);
+// assert!(!lucky_peers.contains(&peer_a));
+// assert!(!lucky_peers.contains(&peer_b));
+
+// let validators = vec![
+// Sr25519Keyring::Alice.pair(),
+// Sr25519Keyring::Bob.pair(),
+// Sr25519Keyring::Charlie.pair(),
+// ];
+
+// let session_info = make_session_info(validators, vec![]);
+
+// let session_index = 1;
+
+// let pool = sp_core::testing::TaskExecutor::new();
+// let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
+
+// let (statement_req_receiver, _) = IncomingRequest::get_config_receiver();
+
+// let virtual_overseer_fut = async move {
+// let s = StatementDistributionSubsystem::new(
+// Arc::new(LocalKeystore::in_memory()),
+// statement_req_receiver,
+// Default::default(),
+// AlwaysZeroRng,
+// );
+// s.run(ctx).await.unwrap();
+// };
+
+// let test_fut = async move {
+// // register our active heads.
+// handle
+// .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
+// ActiveLeavesUpdate::start_work(ActivatedLeaf {
+// hash: relay_parent_hash,
+// number: 1,
+// status: LeafStatus::Fresh,
+// span: Arc::new(jaeger::Span::Disabled),
+// }),
+// )))
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
+// )
+// if r == relay_parent_hash
+// => {
+// let _ = tx.send(Ok(session_index));
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::RuntimeApi(
+// RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
+// )
+// if r == relay_parent_hash && sess_index == session_index
+// => {
+// let _ = tx.send(Ok(Some(session_info)));
+// }
+// );
+
+// // notify of peers and view
+// for peer in all_peers.iter() {
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerConnected(
+// peer.clone(),
+// ObservedRole::Full,
+// 1,
+// None,
+// ),
+// ),
+// })
+// .await;
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerViewChange(peer.clone(), view![relay_parent_hash]),
+// ),
+// })
+// .await;
+// }
+
+// // Explicitly add all `lucky` peers to the gossip peers to ensure that neither `peerA` not `peerB`
+// // receive statements
+// let gossip_topology = {
+// let mut t = network_bridge_event::NewGossipTopology {
+// session: 1,
+// our_neighbors_x: HashMap::new(),
+// our_neighbors_y: HashMap::new(),
+// };
+
+// // Create a topology to ensure that we send messages not to `peer_a`/`peer_b`
+// for (i, peer) in lucky_peers.iter().enumerate() {
+// let authority_id = AuthorityPair::generate().0.public();
+// t.our_neighbors_y.insert(
+// authority_id,
+// network_bridge_event::TopologyPeerInfo {
+// peer_ids: vec![peer.clone()],
+// validator_index: (i as u32 + 2_u32).into(),
+// },
+// );
+// }
+// t.our_neighbors_x.insert(
+// AuthorityPair::generate().0.public(),
+// network_bridge_event::TopologyPeerInfo {
+// peer_ids: vec![peer_a.clone()],
+// validator_index: 0_u32.into(),
+// },
+// );
+// t.our_neighbors_x.insert(
+// AuthorityPair::generate().0.public(),
+// network_bridge_event::TopologyPeerInfo {
+// peer_ids: vec![peer_b.clone()],
+// validator_index: 1_u32.into(),
+// },
+// );
+
+// t
+// };
+
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::NewGossipTopology(gossip_topology),
+// ),
+// })
+// .await;
+
+// // receive a seconded statement from peer A. it should be propagated onwards to peer B and to
+// // candidate backing.
+// let statement = {
+// let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Seconded(candidate.clone()),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// // `PeerA` sends a `Seconded` message
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_a.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
+// relay_parent_hash,
+// statement.clone().into(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) => {
+// assert_eq!(p, peer_a);
+// assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST);
+// }
+// );
+
+// // After the first valid statement, we expect messages to be circulated
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::CandidateBacking(
+// CandidateBackingMessage::Statement(r, s)
+// ) => {
+// assert_eq!(r, relay_parent_hash);
+// assert_eq!(s, statement);
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendValidationMessage(
+// recipients,
+// Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
+// protocol_v1::StatementDistributionMessage::Statement(r, s)
+// )),
+// )
+// ) => {
+// assert!(!recipients.contains(&peer_b));
+// assert_eq!(r, relay_parent_hash);
+// assert_eq!(s, statement.clone().into());
+// }
+// );
+
+// // `PeerB` sends a `Seconded` message: valid but known
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_b.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
+// relay_parent_hash,
+// statement.clone().into(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) => {
+// assert_eq!(p, peer_b);
+// assert_eq!(r, BENEFIT_VALID_STATEMENT);
+// }
+// );
+
+// // Create a `Valid` statement
+// let statement = {
+// let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
+
+// let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+// let alice_public = CryptoStore::sr25519_generate_new(
+// &*keystore,
+// ValidatorId::ID,
+// Some(&Sr25519Keyring::Alice.to_seed()),
+// )
+// .await
+// .unwrap();
+
+// SignedFullStatement::sign(
+// &keystore,
+// Statement::Valid(candidate_hash),
+// &signing_context,
+// ValidatorIndex(0),
+// &alice_public.into(),
+// )
+// .await
+// .ok()
+// .flatten()
+// .expect("should be signed")
+// };
+
+// // `PeerA` sends a `Valid` message
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_a.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
+// relay_parent_hash,
+// statement.clone().into(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) => {
+// assert_eq!(p, peer_a);
+// assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST);
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::CandidateBacking(
+// CandidateBackingMessage::Statement(r, s)
+// ) => {
+// assert_eq!(r, relay_parent_hash);
+// assert_eq!(s, statement);
+// }
+// );
+
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::SendValidationMessage(
+// recipients,
+// Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
+// protocol_v1::StatementDistributionMessage::Statement(r, s)
+// )),
+// )
+// ) => {
+// assert!(!recipients.contains(&peer_b));
+// assert_eq!(r, relay_parent_hash);
+// assert_eq!(s, statement.clone().into());
+// }
+// );
+
+// // `PeerB` sends a `Valid` message
+// handle
+// .send(FromOrchestra::Communication {
+// msg: StatementDistributionMessage::NetworkBridgeUpdate(
+// NetworkBridgeEvent::PeerMessage(
+// peer_b.clone(),
+// Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
+// relay_parent_hash,
+// statement.clone().into(),
+// )),
+// ),
+// ),
+// })
+// .await;
+
+// // We expect that this is still valid despite the fact that `PeerB` was not
+// // the first when sending `Seconded`
+// assert_matches!(
+// handle.recv().await,
+// AllMessages::NetworkBridge(
+// NetworkBridgeMessage::ReportPeer(p, r)
+// ) => {
+// assert_eq!(p, peer_b);
+// assert_eq!(r, BENEFIT_VALID_STATEMENT);
+// }
+// );
+
+// handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+// };
+
+// futures::pin_mut!(test_fut);
+// futures::pin_mut!(virtual_overseer_fut);
+
+// executor::block_on(future::join(test_fut, virtual_overseer_fut));
+// }
+
+// fn make_session_info(validators: Vec, groups: Vec>) -> SessionInfo {
+// let validator_groups: Vec> = groups
+// .iter()
+// .map(|g| g.into_iter().map(|v| ValidatorIndex(*v)).collect())
+// .collect();
+
+// SessionInfo {
+// discovery_keys: validators.iter().map(|k| k.public().into()).collect(),
+// // Not used:
+// n_cores: validator_groups.len() as u32,
+// validator_groups,
+// validators: validators.iter().map(|k| k.public().into()).collect(),
+// // Not used values:
+// assignment_keys: Vec::new(),
+// zeroth_delay_tranche_width: 0,
+// relay_vrf_modulo_samples: 0,
+// n_delay_tranches: 0,
+// no_show_slots: 0,
+// needed_approvals: 0,
+// active_validator_indices: Vec::new(),
+// dispute_period: 6,
+// random_seed: [0u8; 32],
+// }
+// }
+
+// fn derive_metadata_assuming_seconded(
+// hash: Hash,
+// statement: UncheckedSignedFullStatement,
+// ) -> protocol_v1::StatementMetadata {
+// protocol_v1::StatementMetadata {
+// relay_parent: hash,
+// candidate_hash: statement.unchecked_payload().candidate_hash(),
+// signed_by: statement.unchecked_validator_index(),
+// signature: statement.unchecked_signature().clone(),
+// }
+// }
diff --git a/node/network/statement-distribution/src/view/mod.rs b/node/network/statement-distribution/src/view/mod.rs
new file mode 100644
index 000000000000..52dea8ae9138
--- /dev/null
+++ b/node/network/statement-distribution/src/view/mod.rs
@@ -0,0 +1,401 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Handles the local node's view of the relay-chain state, allowed relay
+//! parents of active leaves, and implements a message store for all known
+//! statements.
+
+// TODO [now]: remove at end
+#![allow(unused)]
+
+use futures::{
+ channel::{mpsc, oneshot},
+ future::RemoteHandle,
+ prelude::*,
+ stream::FuturesUnordered,
+};
+use indexmap::IndexMap;
+
+use polkadot_node_network_protocol::{self as net_protocol, PeerId, View as ActiveLeavesView};
+use polkadot_node_primitives::SignedFullStatement;
+use polkadot_node_subsystem::{
+ messages::{RuntimeApiMessage, RuntimeApiRequest},
+ overseer, ActivatedLeaf, ActiveLeavesUpdate, Span,
+};
+use polkadot_node_subsystem_util::{
+ backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView},
+ runtime::{self, RuntimeInfo},
+};
+use polkadot_primitives::v2::{
+ CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, Id as ParaId,
+ OccupiedCoreAssumption, PersistedValidationData, UncheckedSignedStatement, ValidatorId,
+ ValidatorIndex, ValidatorSignature,
+};
+
+use std::collections::{HashMap, HashSet};
+
+use crate::{Error, LOG_TARGET, VC_THRESHOLD};
+
+mod with_prospective;
+mod without_prospective;
+
+/// The local node's view of the protocol state and messages.
+pub struct View {
+ // The view for all explicit and implicit view relay-parents which
+ // support prospective parachains. Relay-parents in here are exclusive
+ // with those in `without_prospective`.
+ with_prospective: with_prospective::View,
+ // The view for all explicit view relay-parents which don't support
+ // prospective parachains.
+ // Relay-parents in here are exclusive with those in `with_prospectives`.
+ without_prospective: without_prospective::View,
+}
+
+/// A peer's view of the protocol state and messages.
+///
+/// The [`PeerView`] should be synchronized with the [`View`] every time the
+/// [`View`] changes by using [`PeerView::synchronize_with_our_view`].
+///
+/// When the peer updates their active leaves,
+pub struct PeerView {
+ active_leaves: ActiveLeavesView,
+ mode: ProspectiveParachainsMode,
+ with_prospective: with_prospective::PeerView,
+ without_prospective: without_prospective::PeerView,
+}
+
+impl PeerView {
+ /// Create a new [`PeerView`]. The mode should be set according to the
+ /// peer's network protocol version at a higher level.
+ ///
+ /// Peers which don't support prospective parachains never will, at least
+ /// until they reconnect.
+ pub fn new(mode: ProspectiveParachainsMode) -> Self {
+ PeerView {
+ active_leaves: Default::default(),
+ mode,
+ with_prospective: Default::default(),
+ without_prospective: Default::default(),
+ }
+ }
+
+ /// Synchronize a peer view with our own. This should be called for each
+ /// peer after our active leaves are updated.
+ ///
+ /// This will prune and update internal state within the peer view and may return a set of
+ /// [`StatementFingerprint`]s from our view which are relevant to the peer. It doesn't automatically
+ /// send the statements to the peer, as higher-level network-topology should determine
+ /// what is actually sent to the peer. Everything returned is guaranteed to pass
+ /// `can_send` checks.
+ pub fn synchronize_with_our_view(&mut self, our_view: &View) -> Vec {
+ // No synchronization needed when prospective parachains are
+ // disabled for the peer as every leaf is a blank slate.
+ if let ProspectiveParachainsMode::Disabled = self.mode {
+ return Vec::new()
+ }
+
+ // TODO [now]
+ // If mode is prospective, then we prune the peer view to only
+ // contain candidates matching our own needs. If there are leaves in our
+ // view which we previously didn't recognize, then we figure out everything we
+ // can now send and send that.
+ unimplemented!()
+ }
+
+ /// Update a peer's active leaves. This should be called every time the peer
+ /// issues a view update over the network.
+ ///
+ /// This will prune and update internal state within the peer view and may return a set of
+ /// [`StatementFingerprint`]s from our view which are relevant to the peer. It doesn't automatically
+ /// send the statements to the peer, as higher-level network-topology should determine
+ /// what is actually sent to the peer. Everything returned is guaranteed to pass
+ /// `can_send` checks.
+ pub fn handle_view_update(
+ &mut self,
+ our_view: &View,
+ new_active_leaves: ActiveLeavesView,
+ ) -> Vec {
+ // TODO [now]: update the local active-leaves view.
+
+ // TODO [now]: prune & create fresh entries accordingly in the
+ // without-prospective.
+
+ if let ProspectiveParachainsMode::Disabled = self.mode {
+ return Vec::new()
+ }
+
+ // TODO [now]: For with-prospective, we do a few things:
+ // 1. clean up old per-relay-parent / per-active-leaf
+ // 2. create new per-relay-parent state only for leaves we have in
+ // common. initialize the depths of sent/received candidates based on
+ // what we know in our view.
+
+ unimplemented!()
+ }
+}
+
+/// A light fingerprint of a statement.
+pub type StatementFingerprint = (CompactStatement, ValidatorIndex);
+
+/// Whether a leaf has prospective parachains enabled.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum ProspectiveParachainsMode {
+ /// Prospective parachains are enabled at the leaf.
+ Enabled,
+ /// Prospective parachains are disabled at the leaf.
+ Disabled,
+}
+
+#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
+async fn prospective_parachains_mode(
+ ctx: &mut Context,
+ leaf_hash: Hash,
+) -> Result {
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx)))
+ .await;
+
+ let version = runtime::recv_runtime(rx).await?;
+
+ // TODO [now]: proper staging API logic.
+ // based on https://github.com/paritytech/substrate/issues/11577#issuecomment-1145347025
+ // this is likely final & correct but we should make thes constants.
+ if version == 3 {
+ Ok(ProspectiveParachainsMode::Enabled)
+ } else {
+ if version != 2 {
+ gum::warn!(
+ target: LOG_TARGET,
+ "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled",
+ version
+ );
+ }
+ Ok(ProspectiveParachainsMode::Disabled)
+ }
+}
+
+/// Handle an active leaves update and update the view.
+#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
+pub async fn handle_view_active_leaves_update(
+ ctx: &mut Context,
+ runtime: &mut RuntimeInfo,
+ view: &mut View,
+ update: ActiveLeavesUpdate,
+) -> Result<(), Error> {
+ enum LeafHasProspectiveParachains {
+ Enabled(Result, ImplicitViewFetchError>),
+ Disabled,
+ }
+
+ // Activate in implicit view before deactivate, per the docs on ImplicitView,
+ // this is more efficient and also preserves more old data that can be
+ // useful for understanding peers' views.
+ let res = if let Some(leaf) = update.activated {
+ // Only activate in implicit view if prospective
+ // parachains are enabled.
+ let mode = prospective_parachains_mode(ctx, leaf.hash).await?;
+ let leaf_hash = leaf.hash;
+ Some((
+ leaf,
+ match mode {
+ ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled,
+ ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled(
+ view.with_prospective
+ .implicit_view_mut()
+ .activate_leaf(ctx.sender(), leaf_hash)
+ .await,
+ ),
+ },
+ ))
+ } else {
+ None
+ };
+
+ for deactivated in update.deactivated {
+ view.without_prospective.deactivate_leaf(&deactivated);
+ // TODO [now] clean up implicit view
+ }
+
+ // Get relay parents which might be fresh but might be known already
+ // that are explicit or implicit from the new active leaf.
+ let fresh_relay_parents = match res {
+ None => return Ok(()),
+ Some((leaf, LeafHasProspectiveParachains::Disabled)) => {
+ // defensive in this case - for enabled, this manifests as an error.
+ if view.without_prospective.contains(&leaf.hash) {
+ return Ok(())
+ }
+
+ vec![(leaf.hash, ProspectiveParachainsMode::Disabled)]
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => {
+ // TODO [now]: discover fresh relay parents, clean up old candidates,
+ // etc.
+ unimplemented!();
+ },
+ Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) =>
+ return Err(Error::ImplicitViewFetchError(leaf.hash, e)),
+ };
+
+ for (relay_parent, mode) in fresh_relay_parents {
+ match mode {
+ ProspectiveParachainsMode::Enabled => {
+ // TODO [now]
+ unimplemented!()
+ },
+ ProspectiveParachainsMode::Disabled => {
+ let relay_parent_info =
+ construct_per_relay_parent_info_without_prospective(ctx, runtime, relay_parent)
+ .await?;
+
+ view.without_prospective.activate_leaf(relay_parent, relay_parent_info);
+ },
+ }
+ }
+ Ok(())
+}
+
+#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
+async fn construct_per_relay_parent_info_without_prospective(
+ ctx: &mut Context,
+ runtime: &mut RuntimeInfo,
+ relay_parent: Hash,
+) -> Result {
+ let span = Span::new(&relay_parent, "statement-distribution-no-prospective");
+
+ // Retrieve the parachain validators at the child of the head we track.
+ let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await?;
+ let info = runtime
+ .get_session_info_by_index(ctx.sender(), relay_parent, session_index)
+ .await?;
+ let session_info = &info.session_info;
+
+ let valid_pvds = fetch_allowed_pvds_without_prospective(ctx, relay_parent).await?;
+
+ Ok(without_prospective::RelayParentInfo::new(
+ session_info.validators.clone(),
+ session_index,
+ valid_pvds,
+ span,
+ ))
+}
+
+#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
+async fn fetch_allowed_pvds_without_prospective(
+ ctx: &mut Context,
+ relay_parent: Hash,
+) -> Result, Error> {
+ // Load availability cores
+ let availability_cores = {
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(
+ relay_parent,
+ RuntimeApiRequest::AvailabilityCores(tx),
+ ))
+ .await;
+
+ let cores = runtime::recv_runtime(rx).await?;
+ cores
+ };
+
+ // determine persisted validation data for
+ // all parachains at this relay-parent.
+ let mut valid_pvds = HashMap::new();
+ let mut responses = FuturesUnordered::new();
+ for core in availability_cores {
+ let para_id = match core.para_id() {
+ Some(p) => p,
+ None => continue,
+ };
+
+ let assumption = match core.is_occupied() {
+ true => {
+ // note - this means that we'll reject candidates
+ // building on top of a timed-out assumption but
+ // that rarely happens in practice. limit code
+ // complexity at the cost of adding an extra block's
+ // wait time for backing after a timeout (same as status quo).
+ OccupiedCoreAssumption::Included
+ },
+ false => OccupiedCoreAssumption::Free,
+ };
+
+ let (tx, rx) = oneshot::channel();
+
+ ctx.send_message(RuntimeApiMessage::Request(
+ relay_parent,
+ RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
+ ))
+ .await;
+
+ responses.push(runtime::recv_runtime(rx).map_ok(move |pvd| (para_id, pvd)));
+ }
+
+ while let Some(res) = responses.next().await {
+ let (para_id, maybe_pvd) = res?;
+ match maybe_pvd {
+ None => gum::warn!(
+ target: LOG_TARGET,
+ ?relay_parent,
+ ?para_id,
+ "Potential runtime issue: ccupied core assumption lead to no PVD from runtime API"
+ ),
+ Some(pvd) => {
+ let _ = valid_pvds.insert(para_id, pvd);
+ },
+ }
+ }
+
+ Ok(valid_pvds)
+}
+
+// A value used for comparison of stored statements to each other.
+//
+// The compact version of the statement, the validator index, and the signature of the validator
+// is enough to differentiate between all types of equivocations, as long as the signature is
+// actually checked to be valid. The same statement with 2 signatures and 2 statements with
+// different (or same) signatures wll all be correctly judged to be unequal with this comparator.
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+struct StoredStatementComparator {
+ compact: CompactStatement,
+ validator_index: ValidatorIndex,
+ signature: ValidatorSignature,
+}
+
+impl<'a> From<(&'a StoredStatementComparator, &'a SignedFullStatement)> for StoredStatement<'a> {
+ fn from(
+ (comparator, statement): (&'a StoredStatementComparator, &'a SignedFullStatement),
+ ) -> Self {
+ Self { comparator, statement }
+ }
+}
+
+// A statement stored while a relay chain head is active.
+#[derive(Debug, Copy, Clone)]
+struct StoredStatement<'a> {
+ comparator: &'a StoredStatementComparator,
+ statement: &'a SignedFullStatement,
+}
+
+impl<'a> StoredStatement<'a> {
+ fn compact(&self) -> &'a CompactStatement {
+ &self.comparator.compact
+ }
+
+ fn fingerprint(&self) -> (CompactStatement, ValidatorIndex) {
+ (self.comparator.compact.clone(), self.statement.validator_index())
+ }
+}
diff --git a/node/network/statement-distribution/src/view/with_prospective.rs b/node/network/statement-distribution/src/view/with_prospective.rs
new file mode 100644
index 000000000000..8364177be7f2
--- /dev/null
+++ b/node/network/statement-distribution/src/view/with_prospective.rs
@@ -0,0 +1,160 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+use futures::{
+ channel::{mpsc, oneshot},
+ future::RemoteHandle,
+ prelude::*,
+};
+use indexmap::IndexMap;
+
+use polkadot_node_network_protocol::{
+ self as net_protocol, PeerId, UnifiedReputationChange as Rep,
+};
+use polkadot_node_primitives::SignedFullStatement;
+use polkadot_node_subsystem::Span;
+use polkadot_node_subsystem_util::backing_implicit_view::View as ImplicitView;
+use polkadot_primitives::v2::{
+ CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, Id as ParaId,
+ PersistedValidationData, UncheckedSignedStatement, ValidatorId, ValidatorIndex,
+ ValidatorSignature,
+};
+
+use std::collections::{HashMap, HashSet};
+
+use super::{StatementFingerprint, StoredStatement, StoredStatementComparator};
+use crate::{LOG_TARGET, VC_THRESHOLD};
+
+pub(crate) struct View {
+ implicit_view: ImplicitView,
+ per_active_leaf: HashMap,
+ candidate_store: CandidateStore,
+}
+
+impl View {
+ /// Get a mutable handle to the implicit view.
+ pub(crate) fn implicit_view_mut(&mut self) -> &mut ImplicitView {
+ &mut self.implicit_view
+ }
+
+ /// Whether the view contains a given relay-parent.
+ pub(crate) fn contains(&self, leaf_hash: &Hash) -> bool {
+ // TODO [now]
+ unimplemented!()
+ }
+
+ /// Deactivate the given leaf in the view, if it exists, and
+ /// clean up after it.
+ pub(crate) fn deactivate_leaf(&mut self, leaf_hash: &Hash) {
+ self.implicit_view.deactivate_leaf(*leaf_hash);
+ // TODO [now]: clean up un-anchored candidates in the store.
+ }
+
+ /// Activate the given relay-parent in the view. This overwrites
+ /// any existing entry, and should only be called for fresh leaves.
+ pub(crate) fn activate_leaf(&mut self, leaf_hash: Hash) {
+ // TODO [now] unimplemented
+ }
+}
+
+struct CandidateStore {
+ per_candidate: HashMap,
+}
+
+// Data stored per active leaf.
+struct PerActiveLeaf {
+ live_candidates: HashMap<(ValidatorId, usize), Vec>,
+
+ // Allowed relay-parents for each para.
+ relay_parents_by_para: HashMap>,
+}
+
+struct CandidateMetadata {
+ para_id: ParaId,
+ candidate_hash: CandidateHash,
+ relay_parent: Hash,
+ persisted_validation_data: PersistedValidationData,
+}
+
+// Data stored per candidate.
+struct PerCandidate {
+ metadata: CandidateMetadata,
+ acceptance_status: AcceptanceStatus,
+
+ // all the statements we've received about the candidate, stored in insertion order
+ // so `Seconded` messages are first.
+ statements: IndexMap,
+}
+
+enum AcceptanceStatus {
+ Accepted, // by backing / prospective parachains.
+ PendingAcceptance, // by backing / prospective parachains
+}
+
+/// Per-peer view of the protocol state.
+#[derive(Default)]
+pub(crate) struct PeerView {
+ /// candidates that the peer is aware of because we sent statements to it. This indicates that we can
+ /// send other statements pertaining to that candidate.
+ sent_candidates: HashSet