From 930126320fa515f0d7950953fafef2258d6b696c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 12 Sep 2022 23:19:29 -0500 Subject: [PATCH 001/220] add notification types for v2 statement-distribution --- Cargo.lock | 1 + node/network/protocol/Cargo.toml | 1 + node/network/protocol/src/lib.rs | 37 +++++++++++++++++-- .../network/statement-distribution/src/lib.rs | 2 +- 4 files changed, 37 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 363c7b0c0501..bbe3ce7a2b1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6763,6 +6763,7 @@ name = "polkadot-node-network-protocol" version = "0.9.29" dependencies = [ "async-trait", + "bitvec 1.0.0", "derive_more", "fatality", "futures", diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index c3ccec1770c8..c799ffb649cb 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -21,6 +21,7 @@ fatality = "0.0.6" rand = "0.8" derive_more = "0.99" gum = { package = "tracing-gum", path = "../../gum" } +bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index a45bca82df49..c57e6d8d110c 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -589,11 +589,12 @@ pub mod v1 { /// vstaging network protocol types. pub mod vstaging { + use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, - UncheckedSignedAvailabilityBitfield, + CandidateIndex, CandidateHash, CollatorId, CollatorSignature, Hash, Id as ParaId, + UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; use polkadot_node_primitives::{ @@ -612,7 +613,37 @@ pub mod vstaging { /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { - // TODO [now]: notifications for v2 + /// A notification of a signed statement in compact form. + #[codec(index = 0)] + Statement(Hash, UncheckedSignedStatement), + + /// A notification of a backed candidate being known by the + /// sending node, for the purpose of being requested by the receiving node + /// if needed. + #[codec(index = 1)] + BackedCandidateInv { + /// The relay-parent of the candidate. + relay_parent: Hash, + /// The hash of the candidate. + candidate_hash: CandidateHash, + /// The para that the candidate is assigned to. + para_id: ParaId, + /// The head-data corresponding to the candidate. + parent_head_data_hash: Hash, + /// A bitfield which indicates which validators in the para's + /// group at the relay-parent have seconded this candidate. + /// + /// This MUST have the minimum amount of bytes + /// necessary to represent the number of validators in the + /// assigned backing group as-of the relay-parent. + seconded_in_group: BitVec, + }, + + /// A notification of a backed candidate being known by the sending known, + /// for the purpose of informing a receiving node which already has the candidate. + #[codec(index = 2)] + BackedCandidateKnown(Hash, CandidateHash), + /// All messages for V1 for compatibility with the statement distribution /// protocol, for relay-parents that don't support asynchronous backing. /// diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 9731818a459a..37f50aeb3417 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -242,7 +242,7 @@ impl StatementDistributionSubsystem { &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, Versioned::V1(_) => true, - // TODO [now]: _ => false, + Versioned::VStaging(_) => false, }, _ => true, }; From 45e2b6818930817aa314f6aeca632eac631d658a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Sep 2022 18:26:35 -0500 Subject: [PATCH 002/220] improve protocol docs --- node/network/protocol/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index c57e6d8d110c..5f865a55dfb4 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -621,7 +621,7 @@ pub mod vstaging { /// sending node, for the purpose of being requested by the receiving node /// if needed. #[codec(index = 1)] - BackedCandidateInv { + BackedCandidateInventory { /// The relay-parent of the candidate. relay_parent: Hash, /// The hash of the candidate. @@ -639,7 +639,7 @@ pub mod vstaging { seconded_in_group: BitVec, }, - /// A notification of a backed candidate being known by the sending known, + /// A notification of a backed candidate being known by the sending node, /// for the purpose of informing a receiving node which already has the candidate. #[codec(index = 2)] BackedCandidateKnown(Hash, CandidateHash), @@ -650,7 +650,7 @@ pub mod vstaging { /// These are illegal to send to V1 peers, and illegal to send concerning relay-parents /// which support asynchronous backing. This backwards compatibility should be /// considered immediately deprecated and can be removed once the node software - /// is not required to support asynchronous backing anymore. + /// is not required to support logic from before asynchronous backing anymore. #[codec(index = 255)] V1Compatibility(crate::v1::StatementDistributionMessage), } From d7fdad8174488118c51c7056740c23d2c0b49363 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Sep 2022 18:30:04 -0500 Subject: [PATCH 003/220] add empty vstaging module --- node/network/statement-distribution/src/lib.rs | 2 ++ .../statement-distribution/src/vstaging/mod.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/mod.rs diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 37f50aeb3417..36b1ef4a956a 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -53,6 +53,8 @@ use legacy_v1::{ ResponderMessage as V1ResponderMessage, }; +mod vstaging; + const LOG_TARGET: &str = "parachain::statement-distribution"; /// The statement distribution subsystem. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs new file mode 100644 index 000000000000..85a20e96292f --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the v2 statement distribution protocol, +//! designed for asynchronous backing. From 2ab18ff88d5844c970799ab22c8ee6df956e35f6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Sep 2022 22:12:33 -0500 Subject: [PATCH 004/220] fmt --- .../statement-distribution/src/legacy_v1/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index e07f4489dfba..104716213cee 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -1433,6 +1433,16 @@ async fn handle_incoming_message<'a, Context>( Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( m, )) => m, + Versioned::VStaging(_) => { + // The higher-level subsystem code is supposed to filter out + // all non v1 messages. + gum::debug!( + target: LOG_TARGET, + "Legacy statement-distribution code received unintended v2 message" + ); + + return None + }, }; let relay_parent = message.get_relay_parent(); From 6ccdfc51415845b92fb70ecd30ebf6a990addd3d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 13 Sep 2022 22:12:40 -0500 Subject: [PATCH 005/220] add backed candidate packet request types --- node/network/protocol/src/lib.rs | 2 +- .../protocol/src/request_response/mod.rs | 84 ++++++++++++++++--- .../protocol/src/request_response/vstaging.rs | 46 ++++++++++ 3 files changed, 121 insertions(+), 11 deletions(-) create mode 100644 node/network/protocol/src/request_response/vstaging.rs diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 5f865a55dfb4..bbda790d7ecc 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -593,7 +593,7 @@ pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateIndex, CandidateHash, CollatorId, CollatorSignature, Hash, Id as ParaId, + CandidateHash, CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 5f4740279ef6..805ac4dec305 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -52,9 +52,12 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons ///// Multiplexer for incoming requests. // pub mod multiplexer; -/// Actual versioned requests and responses, that are sent over the wire. +/// Actual versioned requests and responses that are sent over the wire. pub mod v1; +/// Actual versioned requests and responses that are sent over the wire. +pub mod vstaging; + /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching /// within protocols. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, EnumIter)] @@ -71,6 +74,10 @@ pub enum Protocol { StatementFetchingV1, /// Sending of dispute statements with application level confirmations. DisputeSendingV1, + + /// Protocol for requesting backed candidate packets in statement distribution + /// in v2. + BackedCandidatePacketV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -102,12 +109,30 @@ const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); /// fit statement distribution within a block of 6 seconds.) const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); +/// We want to time out backed candidate requests to time out relatively fast, +/// because slow requests will bottleneck the backing system. Ideally, we'd have +/// an adaptive timeout based on the candidate size, because there will be a lot of variance +/// in candidate sizes: candidates with no code and no messages vs candidates with code +/// and messages. +/// +/// We supply leniency because there are often large candidates and asynchronous +/// backing allows them to be included over a longer window of time. Exponential back-off +/// up to a maximum of 10 seconds would be ideal, but isn't supported by the +/// infrastructure here yet: see https://github.com/paritytech/polkadot/issues/6009 +const BACKED_CANDIDATE_PACKET_TIMEOUT: Duration = Duration::from_millis(2500); + /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can /// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need /// to have 3 slow nodes connected, to delay transfer for others by `STATEMENTS_TIMEOUT`. pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; +/// We don't want a slow peer to slow down all the others, at the same time we want to get out the +/// data quickly in full to at least some peers (as this will reduce load on us as they then can +/// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need +/// to have 3 slow nodes connected, to delay transfer for others by `STATEMENTS_TIMEOUT`. +pub const MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS: u32 = 5; + /// Response size limit for responses of POV like data. /// /// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in @@ -121,6 +146,12 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; +/// Maximum response sizes for `BackedCandidatePacketV2`. +/// +/// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and +/// additional backing statements. +const BACKED_CANDIDATE_PACKET_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 100_000; + impl Protocol { /// Get a configuration for a given Request response protocol. /// @@ -199,6 +230,15 @@ impl Protocol { request_timeout: Duration::from_secs(12), inbound_queue: Some(tx), }, + + Protocol::BackedCandidatePacketV2 => RequestResponseConfig { + name, + fallback_names, + max_request_size: 1_000, + max_response_size: BACKED_CANDIDATE_PACKET_RESPONSE_SIZE, + request_timeout: BACKED_CANDIDATE_PACKET_TIMEOUT, + inbound_queue: Some(tx), + }, }; (rx, cfg) } @@ -244,23 +284,45 @@ impl Protocol { // average, so something in the ballpark of 100 should be fine. Nodes will retry on // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, + + Protocol::BackedCandidatePacketV2 => { + // We assume we can utilize up to 70% of the available bandwidth for statements. + // This is just a guess/estimate, with the following considerations: If we are + // faster than that, queue size will stay low anyway, even if not - requesters will + // get an immediate error, but if we are slower, requesters will run in a timeout - + // wasting precious time. + let available_bandwidth = 7 * MIN_BANDWIDTH_BYTES / 10; + let size = u64::saturating_sub( + BACKED_CANDIDATE_PACKET_TIMEOUT.as_millis() as u64 * available_bandwidth / + (1000 * MAX_CODE_SIZE as u64), + MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS as u64, + ); + debug_assert!( + size > 0, + "We should have a channel size greater zero, otherwise we won't accept any requests." + ); + size as usize + }, } } /// Fallback protocol names of this protocol, as understood by substrate networking. fn get_fallback_names(self) -> Vec { - std::iter::once(self.get_legacy_name().into()).collect() + self.get_legacy_name().into_iter().map(Into::into).collect() } - /// Legacy protocol name associated with each peer set. - const fn get_legacy_name(self) -> &'static str { + /// Legacy protocol name associated with each peer set, if any. + const fn get_legacy_name(self) -> Option<&'static str> { match self { - Protocol::ChunkFetchingV1 => "/polkadot/req_chunk/1", - Protocol::CollationFetchingV1 => "/polkadot/req_collation/1", - Protocol::PoVFetchingV1 => "/polkadot/req_pov/1", - Protocol::AvailableDataFetchingV1 => "/polkadot/req_available_data/1", - Protocol::StatementFetchingV1 => "/polkadot/req_statement/1", - Protocol::DisputeSendingV1 => "/polkadot/send_dispute/1", + Protocol::ChunkFetchingV1 => Some("/polkadot/req_chunk/1"), + Protocol::CollationFetchingV1 => Some("/polkadot/req_collation/1"), + Protocol::PoVFetchingV1 => Some("/polkadot/req_pov/1"), + Protocol::AvailableDataFetchingV1 => Some("/polkadot/req_available_data/1"), + Protocol::StatementFetchingV1 => Some("/polkadot/req_statement/1"), + Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), + + // Introduced after legacy names became legacy. + Protocol::BackedCandidatePacketV2 => None, } } } @@ -316,6 +378,8 @@ impl ReqProtocolNames { Protocol::AvailableDataFetchingV1 => "/req_available_data/1", Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", + + Protocol::BackedCandidatePacketV2 => "/req_backed_candidate_packet/2", }; format!("{}{}", prefix, short_name).into() diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs new file mode 100644 index 000000000000..12c1a729d066 --- /dev/null +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -0,0 +1,46 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Requests and responses as sent over the wire for the individual protocols. + +use parity_scale_codec::{Decode, Encode}; + +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, UncheckedSignedStatement, +}; + +use super::{IsRequest, Protocol}; + +/// Request a backed candidate packet. +#[derive(Debug, Copy, Clone, Encode, Decode)] +pub struct BackedCandidatePacketRequest { + /// Hash of the candidate we want to request. + pub candidate_hash: CandidateHash, +} + +/// Response to a backed candidate packet request. +#[derive(Debug, Clone, Encode, Decode)] +pub struct BackedCandidatePacketResponse { + /// The candidate receipt, with commitments. + pub candidate_receipt: CommittedCandidateReceipt, + /// All known statements about the candidate, in compact form. + pub statements: Vec, +} + +impl IsRequest for BackedCandidatePacketRequest { + type Response = BackedCandidatePacketResponse; + const PROTOCOL: Protocol = Protocol::BackedCandidatePacketV2; +} From 159ab6aef2f645f96b4a99c345c1e38c5681998c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 14 Sep 2022 18:01:18 -0500 Subject: [PATCH 006/220] start putting down structure of new logic --- node/network/protocol/src/lib.rs | 38 +++-- .../statement-distribution/src/error.rs | 6 + .../src/vstaging/mod.rs | 157 ++++++++++++++++++ node/overseer/src/lib.rs | 2 + 4 files changed, 186 insertions(+), 17 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index bbda790d7ecc..c7fc60c45a62 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -610,6 +610,26 @@ pub mod vstaging { Bitfield(Hash, UncheckedSignedAvailabilityBitfield), } + /// An inventory of a backed candidate, which can be requested. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct BackedCandidateInventory { + /// The relay-parent of the candidate. + pub relay_parent: Hash, + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The para that the candidate is assigned to. + pub para_id: ParaId, + /// The head-data corresponding to the candidate. + pub parent_head_data_hash: Hash, + /// A bitfield which indicates which validators in the para's + /// group at the relay-parent have seconded this candidate. + /// + /// This MUST have the minimum amount of bytes + /// necessary to represent the number of validators in the + /// assigned backing group as-of the relay-parent. + pub seconded_in_group: BitVec, + } + /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { @@ -621,23 +641,7 @@ pub mod vstaging { /// sending node, for the purpose of being requested by the receiving node /// if needed. #[codec(index = 1)] - BackedCandidateInventory { - /// The relay-parent of the candidate. - relay_parent: Hash, - /// The hash of the candidate. - candidate_hash: CandidateHash, - /// The para that the candidate is assigned to. - para_id: ParaId, - /// The head-data corresponding to the candidate. - parent_head_data_hash: Hash, - /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have seconded this candidate. - /// - /// This MUST have the minimum amount of bytes - /// necessary to represent the number of validators in the - /// assigned backing group as-of the relay-parent. - seconded_in_group: BitVec, - }, + BackedCandidateInventory(BackedCandidateInventory), /// A notification of a backed candidate being known by the sending node, /// for the purpose of informing a receiving node which already has the candidate. diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index f91b0980c966..d6035681fe92 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -21,6 +21,7 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::{RuntimeApiError, SubsystemError}; use polkadot_node_subsystem_util::runtime; use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; +use polkadot_node_subsystem_util::backing_implicit_view::{FetchError as ImplicitViewFetchError}; use futures::channel::oneshot; @@ -84,6 +85,11 @@ pub enum Error { // Responder no longer waits for our data. (Should not happen right now.) #[error("Oneshot `GetData` channel closed")] ResponderGetDataCanceled, + + // Failed to activate leaf due to a fetch error. + #[error("Implicit view failure while activating leaf")] + ActivateLeafFailure(ImplicitViewFetchError,) + } /// Utility for eating top level errors and log them. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 85a20e96292f..67bf4c2cc9b0 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -16,3 +16,160 @@ //! Implementation of the v2 statement distribution protocol, //! designed for asynchronous backing. + +use polkadot_primitives::vstaging::{ + Hash, CandidateHash, CommittedCandidateReceipt, ValidatorId, SignedStatement, UncheckedSignedStatement, + GroupIndex, PersistedValidationData, +}; +use polkadot_node_network_protocol::{ + self as net_protocol, + peer_set::ValidationVersion, + vstaging as protocol_vstaging, View, PeerId, +}; +use polkadot_node_subsystem::{ + jaeger, + messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, + overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, +}; +use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; + +use sp_keystore::SyncCryptoStorePtr; + +use std::collections::{HashMap, HashSet}; + +use crate::error::{JfyiError, JfyiErrorResult}; + +struct PerRelayParentState { + max_seconded_count: usize, + seconded_count: HashMap, + candidates: HashMap, + known_by: HashSet, +} + +struct CandidateData { + state: CandidateState, + statements: Vec, +} + +enum CandidateState { + /// The candidate is unconfirmed to exist, as it hasn't yet + /// been fetched. + Unconfirmed, + /// The candidate is confirmed but we don't have the `PersistedValidationData` + /// yet because we are missing some intermediate candidate. + ConfirmedWithoutPVD(CommittedCandidateReceipt), + /// The candidate is confirmed and we have the `PersistedValidationData`. + Confirmed(CommittedCandidateReceipt, PersistedValidationData), +} + +pub(crate) struct State { + /// The utility for managing the implicit and explicit views in a consistent way. + /// + /// We only feed leaves which have prospective parachains enabled to this view. + implicit_view: ImplicitView, + per_relay_parent: HashMap, + peers: HashMap, + keystore: SyncCryptoStorePtr, +} + +struct PeerState { + view: View, +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_network_update( + ctx: &mut Context, + state: &mut State, + update: NetworkBridgeEvent, +) { + match update { + NetworkBridgeEvent::PeerConnected(peer_id, _role, protocol_version, authority_ids) => { + if protocol_version != ValidationVersion::VStaging.into() { + return + } + + state.peers.insert(peer_id, PeerState { + view: View::default(), + }); + + // TODO [now]: update some authorities map. + } + NetworkBridgeEvent::PeerDisconnected(peer_id) => { + state.peers.remove(&peer_id); + } + NetworkBridgeEvent::NewGossipTopology(new_topology) => { + // TODO [now] + } + NetworkBridgeEvent::PeerMessage(peer_id, message) => { + match message { + net_protocol::StatementDistributionMessage::V1(_) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_) + ) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement) + ) => {} // TODO [now] + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateInventory(inner) + ) => {} // TODO [now] + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(relay_parent, candidate_hash) + ) => {} // TODO [now] + } + } + NetworkBridgeEvent::PeerViewChange(peer_id, view) => { + // TODO [now] + } + NetworkBridgeEvent::OurViewChange(_view) => { + // handled by `handle_activated_leaf` + } + } +} + +/// This should only be invoked for leaves that implement prospective parachains. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn activate_leaf( + ctx: &mut Context, + state: &mut State, + leaf: ActivatedLeaf, +) -> JfyiErrorResult<()> { + + state.implicit_view.activate_leaf(ctx.sender(), leaf.hash) + .await + .map_err(JfyiError::ActivateLeafFailure)?; + + for leaf in state.implicit_view.all_allowed_relay_parents() { + if state.per_relay_parent.contains_key(leaf) { continue } + + // TODO [now]: + // 1. fetch info about validators, groups, and assignments + // 2. initialize PerRelayParentState + // 3. try to find new commonalities with peers and send data to them. + state.per_relay_parent.insert( + *leaf, + PerRelayParentState { + max_seconded_count: unimplemented!(), + seconded_count: HashMap::new(), + candidates: HashMap::new(), + known_by: HashSet::new(), + } + ); + } + + Ok(()) +} + +pub(crate) fn deactivate_leaf( + state: &mut State, + leaf_hash: Hash, +) { + // deactivate the leaf in the implicit view. + state.implicit_view.deactivate_leaf(leaf_hash); + let relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); + + // fast exit for no-op. + if relay_parents.len() == state.per_relay_parent.len() { return } + + // clean up per-relay-parent data based on everything removed. + state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); +} diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 1f5087b31091..ee07163b0172 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -476,6 +476,8 @@ pub struct Overseer { NetworkBridgeTxMessage, CandidateBackingMessage, RuntimeApiMessage, + ProspectiveParachainsMessage, + ChainApiMessage, ])] statement_distribution: StatementDistribution, From 58919b4bd3dafa703ce3cd1557c5ed2502211fbe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Sep 2022 18:14:09 -0500 Subject: [PATCH 007/220] handle activated leaf --- .../statement-distribution/src/error.rs | 9 + .../src/vstaging/mod.rs | 154 ++++++++++++++++-- 2 files changed, 152 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index d6035681fe92..298f7e4688aa 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -65,6 +65,15 @@ pub enum Error { #[error("Fetching persisted validation data for para {0:?}, {1:?}")] FetchPersistedValidationData(ParaId, RuntimeApiError), + #[error("Fetching session index failed {0:?}")] + FetchSessionIndex(RuntimeApiError), + + #[error("Fetching session info failed {0:?}")] + FetchSessionInfo(RuntimeApiError), + + #[error("Fetching availability cores failed {0:?}")] + FetchAvailabilityCores(RuntimeApiError), + #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 67bf4c2cc9b0..d79927add75d 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -19,7 +19,7 @@ use polkadot_primitives::vstaging::{ Hash, CandidateHash, CommittedCandidateReceipt, ValidatorId, SignedStatement, UncheckedSignedStatement, - GroupIndex, PersistedValidationData, + GroupIndex, PersistedValidationData, ValidatorIndex, CoreState, Id as ParaId, }; use polkadot_node_network_protocol::{ self as net_protocol, @@ -31,6 +31,7 @@ use polkadot_node_subsystem::{ messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, }; +use polkadot_node_primitives::SignedFullStatement; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use sp_keystore::SyncCryptoStorePtr; @@ -38,17 +39,26 @@ use sp_keystore::SyncCryptoStorePtr; use std::collections::{HashMap, HashSet}; use crate::error::{JfyiError, JfyiErrorResult}; +use crate::LOG_TARGET; struct PerRelayParentState { - max_seconded_count: usize, - seconded_count: HashMap, + validators: Vec, + groups: Vec>, + validator_state: HashMap, candidates: HashMap, known_by: HashSet, + local_validator: Option, +} + +struct PerRelayParentValidatorState { + seconded_count: usize, + group_id: GroupIndex, } struct CandidateData { state: CandidateState, statements: Vec, + known_by: HashSet, } enum CandidateState { @@ -62,6 +72,19 @@ enum CandidateState { Confirmed(CommittedCandidateReceipt, PersistedValidationData), } +// per-relay-parent local validator state. +struct LocalValidatorState { + // our validator group + group: GroupIndex, + // the assignment of our validator group, if any. + assignment: Option, + // the next group assigned to this para. + next_group: GroupIndex, + // the previous group assigned to this para, stored only + // if they are currently assigned to a para. + prev_group: Option<(GroupIndex, ParaId)>, +} + pub(crate) struct State { /// The utility for managing the implicit and explicit views in a consistent way. /// @@ -128,7 +151,7 @@ pub(crate) async fn handle_network_update( /// This should only be invoked for leaves that implement prospective parachains. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn activate_leaf( +pub(crate) async fn handle_activated_leaf( ctx: &mut Context, state: &mut State, leaf: ActivatedLeaf, @@ -141,17 +164,59 @@ pub(crate) async fn activate_leaf( for leaf in state.implicit_view.all_allowed_relay_parents() { if state.per_relay_parent.contains_key(leaf) { continue } - // TODO [now]: - // 1. fetch info about validators, groups, and assignments - // 2. initialize PerRelayParentState - // 3. try to find new commonalities with peers and send data to them. + // New leaf: fetch info from runtime API and initialize + // `per_relay_parent`. + let session_index = polkadot_node_subsystem_util::request_session_index_for_child( + *leaf, + ctx.sender(), + ).await.await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionIndex)?; + + let session_info = polkadot_node_subsystem_util::request_session_info( + *leaf, + session_index, + ctx.sender(), + ).await.await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionInfo)?; + + let session_info = match session_info { + None => { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?leaf, + "No session info available for current session" + ); + + continue; + } + Some(s) => s, + }; + + let availability_cores = polkadot_node_subsystem_util::request_availability_cores( + *leaf, + ctx.sender(), + ).await.await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchAvailabilityCores)?; + + let local_validator = find_local_validator_state( + &session_info.validators, + &state.keystore, + &session_info.validator_groups, + &availability_cores, + ).await; + state.per_relay_parent.insert( *leaf, PerRelayParentState { - max_seconded_count: unimplemented!(), - seconded_count: HashMap::new(), + validators: session_info.validators, + groups: session_info.validator_groups, + validator_state: HashMap::new(), candidates: HashMap::new(), known_by: HashSet::new(), + local_validator, } ); } @@ -159,7 +224,47 @@ pub(crate) async fn activate_leaf( Ok(()) } -pub(crate) fn deactivate_leaf( +async fn find_local_validator_state( + validators: &[ValidatorId], + keystore: &SyncCryptoStorePtr, + groups: &[Vec], + availability_cores: &[CoreState], +) -> Option { + if groups.is_empty() { return None } + + let (validator_id, validator_index) = polkadot_node_subsystem_util::signing_key_and_index( + validators, + keystore, + ).await?; + + let our_group = polkadot_node_subsystem_util::find_validator_group( + groups, + validator_index, + )?; + + // note: this won't work well for parathreads because it only works + // when core assignments to paras are static throughout the session. + + let next_group = GroupIndex((our_group.0 + 1) % groups.len() as u32); + let prev_group = GroupIndex(if our_group.0 == 0 { + our_group.0 - 1 + } else { + groups.len() as u32 - 1 + }); + + let para_for_group = |g: GroupIndex| { + availability_cores.get(g.0 as usize).and_then(|c| c.para_id()) + }; + + Some(LocalValidatorState { + group: our_group, + assignment: para_for_group(our_group), + next_group, + prev_group: para_for_group(prev_group).map(|p| (prev_group, p)), + }) +} + +pub(crate) fn handle_deactivate_leaf( state: &mut State, leaf_hash: Hash, ) { @@ -173,3 +278,30 @@ pub(crate) fn deactivate_leaf( // clean up per-relay-parent data based on everything removed. state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); } + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn share_local_statement( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, +) { + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + "Attempted to share statement for unknown relay-parent" + ); + + return; + } + Some(x) => x, + }; + + // TODO [now]: + // 1. check that we are in the required group for the parachain at that block + // 2. insert candidate if unknown + // 3. send to nodes in current group and next-up the statement. If not a `Seconded` statement, + // send a `Seconded` statement as well. +} From 2ff6317545f4508798ba37f5f26d7f4572b1cb2e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Sep 2022 18:27:13 -0500 Subject: [PATCH 008/220] some sanity-checking on outbound statements --- .../statement-distribution/src/error.rs | 3 ++ .../src/vstaging/mod.rs | 50 ++++++++++++++----- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 298f7e4688aa..bc2778b50161 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -74,6 +74,9 @@ pub enum Error { #[error("Fetching availability cores failed {0:?}")] FetchAvailabilityCores(RuntimeApiError), + #[error("Attempted to share statement when not a validator or not assigned")] + InvalidShare, + #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index d79927add75d..d00d835712b2 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -31,7 +31,7 @@ use polkadot_node_subsystem::{ messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, }; -use polkadot_node_primitives::SignedFullStatement; +use polkadot_node_primitives::{SignedFullStatement, Statement as FullStatement}; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use sp_keystore::SyncCryptoStorePtr; @@ -72,6 +72,16 @@ enum CandidateState { Confirmed(CommittedCandidateReceipt, PersistedValidationData), } +impl CandidateState { + fn receipt(&self) -> Option<&CommittedCandidateReceipt> { + match *self { + CandidateState::Unconfirmed => None, + CandidateState::ConfirmedWithoutPVD(ref c) => Some(c), + CandidateState::Confirmed(ref c, _) => Some(c), + } + } +} + // per-relay-parent local validator state. struct LocalValidatorState { // our validator group @@ -285,23 +295,37 @@ pub(crate) async fn share_local_statement( state: &mut State, relay_parent: Hash, statement: SignedFullStatement, -) { +) -> JfyiErrorResult<()> { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { - None => { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - "Attempted to share statement for unknown relay-parent" - ); - - return; - } + None => return Err(JfyiError::InvalidShare), Some(x) => x, }; + let local_validator = match per_relay_parent.local_validator.as_ref() { + None => return Err(JfyiError::InvalidShare), + Some(l) => l, + }; + + // Two possibilities: either the statement is `Seconded` or we already + // have the candidate. Sanity: check the para-id is valid. + let expected_para = match statement.payload() { + FullStatement::Seconded(ref s) => Some(s.descriptor().para_id), + FullStatement::Valid(hash) => { + per_relay_parent.candidates + .get(&hash) + .and_then(|c| c.state.receipt()) + .map(|c| c.descriptor().para_id) + } + }; + + if expected_para.is_none() || local_validator.assignment != expected_para { + return Err(JfyiError::InvalidShare) + } + // TODO [now]: - // 1. check that we are in the required group for the parachain at that block // 2. insert candidate if unknown - // 3. send to nodes in current group and next-up the statement. If not a `Seconded` statement, + // 3. send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, // send a `Seconded` statement as well. + + Ok(()) } From 7947b44481515b09b6c7bff585439358bb1b74f8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 16 Sep 2022 18:27:18 -0500 Subject: [PATCH 009/220] fmt --- .../statement-distribution/src/error.rs | 8 +- .../src/vstaging/mod.rs | 165 +++++++++--------- 2 files changed, 87 insertions(+), 86 deletions(-) diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index bc2778b50161..406eccfc7874 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -19,9 +19,10 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::{RuntimeApiError, SubsystemError}; -use polkadot_node_subsystem_util::runtime; +use polkadot_node_subsystem_util::{ + backing_implicit_view::FetchError as ImplicitViewFetchError, runtime, +}; use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; -use polkadot_node_subsystem_util::backing_implicit_view::{FetchError as ImplicitViewFetchError}; use futures::channel::oneshot; @@ -100,8 +101,7 @@ pub enum Error { // Failed to activate leaf due to a fetch error. #[error("Implicit view failure while activating leaf")] - ActivateLeafFailure(ImplicitViewFetchError,) - + ActivateLeafFailure(ImplicitViewFetchError), } /// Utility for eating top level errors and log them. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index d00d835712b2..74e8da3fffb5 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -17,29 +17,30 @@ //! Implementation of the v2 statement distribution protocol, //! designed for asynchronous backing. -use polkadot_primitives::vstaging::{ - Hash, CandidateHash, CommittedCandidateReceipt, ValidatorId, SignedStatement, UncheckedSignedStatement, - GroupIndex, PersistedValidationData, ValidatorIndex, CoreState, Id as ParaId, -}; use polkadot_node_network_protocol::{ - self as net_protocol, - peer_set::ValidationVersion, - vstaging as protocol_vstaging, View, PeerId, + self as net_protocol, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, View, }; +use polkadot_node_primitives::{SignedFullStatement, Statement as FullStatement}; use polkadot_node_subsystem::{ jaeger, messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, }; -use polkadot_node_primitives::{SignedFullStatement, Statement as FullStatement}; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, CoreState, GroupIndex, Hash, Id as ParaId, + PersistedValidationData, SignedStatement, UncheckedSignedStatement, ValidatorId, + ValidatorIndex, +}; use sp_keystore::SyncCryptoStorePtr; use std::collections::{HashMap, HashSet}; -use crate::error::{JfyiError, JfyiErrorResult}; -use crate::LOG_TARGET; +use crate::{ + error::{JfyiError, JfyiErrorResult}, + LOG_TARGET, +}; struct PerRelayParentState { validators: Vec, @@ -121,41 +122,47 @@ pub(crate) async fn handle_network_update( return } - state.peers.insert(peer_id, PeerState { - view: View::default(), - }); + state.peers.insert(peer_id, PeerState { view: View::default() }); // TODO [now]: update some authorities map. - } + }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { state.peers.remove(&peer_id); - } + }, NetworkBridgeEvent::NewGossipTopology(new_topology) => { // TODO [now] - } + }, NetworkBridgeEvent::PeerMessage(peer_id, message) => { match message { net_protocol::StatementDistributionMessage::V1(_) => return, net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_) + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), ) => return, net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement) - ) => {} // TODO [now] + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement, + ), + ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateInventory(inner) - ) => {} // TODO [now] + protocol_vstaging::StatementDistributionMessage::BackedCandidateInventory( + inner, + ), + ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(relay_parent, candidate_hash) - ) => {} // TODO [now] + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( + relay_parent, + candidate_hash, + ), + ) => {}, // TODO [now] } - } + }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { // TODO [now] - } + }, NetworkBridgeEvent::OurViewChange(_view) => { // handled by `handle_activated_leaf` - } + }, } } @@ -166,30 +173,32 @@ pub(crate) async fn handle_activated_leaf( state: &mut State, leaf: ActivatedLeaf, ) -> JfyiErrorResult<()> { - - state.implicit_view.activate_leaf(ctx.sender(), leaf.hash) + state + .implicit_view + .activate_leaf(ctx.sender(), leaf.hash) .await .map_err(JfyiError::ActivateLeafFailure)?; for leaf in state.implicit_view.all_allowed_relay_parents() { - if state.per_relay_parent.contains_key(leaf) { continue } + if state.per_relay_parent.contains_key(leaf) { + continue + } // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. - let session_index = polkadot_node_subsystem_util::request_session_index_for_child( - *leaf, - ctx.sender(), - ).await.await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchSessionIndex)?; - - let session_info = polkadot_node_subsystem_util::request_session_info( - *leaf, - session_index, - ctx.sender(), - ).await.await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchSessionInfo)?; + let session_index = + polkadot_node_subsystem_util::request_session_index_for_child(*leaf, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionIndex)?; + + let session_info = + polkadot_node_subsystem_util::request_session_info(*leaf, session_index, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionInfo)?; let session_info = match session_info { None => { @@ -199,24 +208,25 @@ pub(crate) async fn handle_activated_leaf( "No session info available for current session" ); - continue; - } + continue + }, Some(s) => s, }; - let availability_cores = polkadot_node_subsystem_util::request_availability_cores( - *leaf, - ctx.sender(), - ).await.await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchAvailabilityCores)?; + let availability_cores = + polkadot_node_subsystem_util::request_availability_cores(*leaf, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchAvailabilityCores)?; let local_validator = find_local_validator_state( &session_info.validators, &state.keystore, &session_info.validator_groups, &availability_cores, - ).await; + ) + .await; state.per_relay_parent.insert( *leaf, @@ -227,7 +237,7 @@ pub(crate) async fn handle_activated_leaf( candidates: HashMap::new(), known_by: HashSet::new(), local_validator, - } + }, ); } @@ -240,31 +250,24 @@ async fn find_local_validator_state( groups: &[Vec], availability_cores: &[CoreState], ) -> Option { - if groups.is_empty() { return None } + if groups.is_empty() { + return None + } - let (validator_id, validator_index) = polkadot_node_subsystem_util::signing_key_and_index( - validators, - keystore, - ).await?; + let (validator_id, validator_index) = + polkadot_node_subsystem_util::signing_key_and_index(validators, keystore).await?; - let our_group = polkadot_node_subsystem_util::find_validator_group( - groups, - validator_index, - )?; + let our_group = polkadot_node_subsystem_util::find_validator_group(groups, validator_index)?; // note: this won't work well for parathreads because it only works // when core assignments to paras are static throughout the session. let next_group = GroupIndex((our_group.0 + 1) % groups.len() as u32); - let prev_group = GroupIndex(if our_group.0 == 0 { - our_group.0 - 1 - } else { - groups.len() as u32 - 1 - }); - - let para_for_group = |g: GroupIndex| { - availability_cores.get(g.0 as usize).and_then(|c| c.para_id()) - }; + let prev_group = + GroupIndex(if our_group.0 == 0 { our_group.0 - 1 } else { groups.len() as u32 - 1 }); + + let para_for_group = + |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); Some(LocalValidatorState { group: our_group, @@ -274,16 +277,15 @@ async fn find_local_validator_state( }) } -pub(crate) fn handle_deactivate_leaf( - state: &mut State, - leaf_hash: Hash, -) { +pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { // deactivate the leaf in the implicit view. state.implicit_view.deactivate_leaf(leaf_hash); let relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); // fast exit for no-op. - if relay_parents.len() == state.per_relay_parent.len() { return } + if relay_parents.len() == state.per_relay_parent.len() { + return + } // clean up per-relay-parent data based on everything removed. state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); @@ -310,12 +312,11 @@ pub(crate) async fn share_local_statement( // have the candidate. Sanity: check the para-id is valid. let expected_para = match statement.payload() { FullStatement::Seconded(ref s) => Some(s.descriptor().para_id), - FullStatement::Valid(hash) => { - per_relay_parent.candidates - .get(&hash) - .and_then(|c| c.state.receipt()) - .map(|c| c.descriptor().para_id) - } + FullStatement::Valid(hash) => per_relay_parent + .candidates + .get(&hash) + .and_then(|c| c.state.receipt()) + .map(|c| c.descriptor().para_id), }; if expected_para.is_none() || local_validator.assignment != expected_para { From 162d1cbe496de6f3a133af59a832df25cc1ca763 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 16:47:15 -0500 Subject: [PATCH 010/220] update vstaging share to use statements with PVD --- .../statement-distribution/src/vstaging/mod.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 74e8da3fffb5..9f1a089419d9 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -20,7 +20,10 @@ use polkadot_node_network_protocol::{ self as net_protocol, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, View, }; -use polkadot_node_primitives::{SignedFullStatement, Statement as FullStatement}; +use polkadot_node_primitives::{ + SignedFullStatementWithPVD, + StatementWithPVD as FullStatementWithPVD, +}; use polkadot_node_subsystem::{ jaeger, messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, @@ -66,9 +69,6 @@ enum CandidateState { /// The candidate is unconfirmed to exist, as it hasn't yet /// been fetched. Unconfirmed, - /// The candidate is confirmed but we don't have the `PersistedValidationData` - /// yet because we are missing some intermediate candidate. - ConfirmedWithoutPVD(CommittedCandidateReceipt), /// The candidate is confirmed and we have the `PersistedValidationData`. Confirmed(CommittedCandidateReceipt, PersistedValidationData), } @@ -77,7 +77,6 @@ impl CandidateState { fn receipt(&self) -> Option<&CommittedCandidateReceipt> { match *self { CandidateState::Unconfirmed => None, - CandidateState::ConfirmedWithoutPVD(ref c) => Some(c), CandidateState::Confirmed(ref c, _) => Some(c), } } @@ -296,7 +295,7 @@ pub(crate) async fn share_local_statement( ctx: &mut Context, state: &mut State, relay_parent: Hash, - statement: SignedFullStatement, + statement: SignedFullStatementWithPVD, ) -> JfyiErrorResult<()> { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { None => return Err(JfyiError::InvalidShare), @@ -311,8 +310,8 @@ pub(crate) async fn share_local_statement( // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. let expected_para = match statement.payload() { - FullStatement::Seconded(ref s) => Some(s.descriptor().para_id), - FullStatement::Valid(hash) => per_relay_parent + FullStatementWithPVD::Seconded(ref s, _) => Some(s.descriptor().para_id), + FullStatementWithPVD::Valid(hash) => per_relay_parent .candidates .get(&hash) .and_then(|c| c.state.receipt()) From 6d49c58ee48033545250ac0841665f7a43e02370 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 17:49:05 -0500 Subject: [PATCH 011/220] tiny refactor, candidate_hash location --- primitives/src/v2/mod.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 4cab787edd23..a86d2ac31397 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -1513,6 +1513,13 @@ impl CompactStatement { pub fn signing_payload(&self, context: &SigningContext) -> Vec { (self, context).encode() } + + /// Get the underlying candidate hash this references. + pub fn candidate_hash(&self) -> &CandidateHash { + match *self { + CompactStatement::Seconded(ref h) | CompactStatement::Valid(ref h) => h, + } + } } // Inner helper for codec on `CompactStatement`. @@ -1561,15 +1568,6 @@ impl parity_scale_codec::Decode for CompactStatement { } } -impl CompactStatement { - /// Get the underlying candidate hash this references. - pub fn candidate_hash(&self) -> &CandidateHash { - match *self { - CompactStatement::Seconded(ref h) | CompactStatement::Valid(ref h) => h, - } - } -} - /// The maximum number of validators `f` which may safely be faulty. /// /// The total number of validators is `n = 3f + e` where `e in { 1, 2, 3 }`. From ed6be7fa197d0906de78b6951114065d8a53f59d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 17:49:13 -0500 Subject: [PATCH 012/220] import local statements --- .../src/vstaging/mod.rs | 60 ++++++++++++++++++- node/primitives/src/lib.rs | 10 +++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9f1a089419d9..6855736ee96f 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -65,6 +65,16 @@ struct CandidateData { known_by: HashSet, } +impl Default for CandidateData { + fn default() -> Self { + CandidateData { + state: CandidateState::Unconfirmed, + statements: Vec::new(), + known_by: HashSet::new(), + } + } +} + enum CandidateState { /// The candidate is unconfirmed to exist, as it hasn't yet /// been fetched. @@ -74,6 +84,13 @@ enum CandidateState { } impl CandidateState { + fn is_confirmed(&self) -> bool { + match *self { + CandidateState::Unconfirmed => false, + CandidateState::Confirmed(_, _) => true, + } + } + fn receipt(&self) -> Option<&CommittedCandidateReceipt> { match *self { CandidateState::Unconfirmed => None, @@ -84,6 +101,8 @@ impl CandidateState { // per-relay-parent local validator state. struct LocalValidatorState { + // The index of the validator. + index: ValidatorIndex, // our validator group group: GroupIndex, // the assignment of our validator group, if any. @@ -269,6 +288,7 @@ async fn find_local_validator_state( |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); Some(LocalValidatorState { + index: validator_index, group: our_group, assignment: para_for_group(our_group), next_group, @@ -310,7 +330,7 @@ pub(crate) async fn share_local_statement( // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. let expected_para = match statement.payload() { - FullStatementWithPVD::Seconded(ref s, _) => Some(s.descriptor().para_id), + FullStatementWithPVD::Seconded(ref c, _) => Some(c.descriptor().para_id), FullStatementWithPVD::Valid(hash) => per_relay_parent .candidates .get(&hash) @@ -318,10 +338,48 @@ pub(crate) async fn share_local_statement( .map(|c| c.descriptor().para_id), }; + if local_validator.index != statement.validator_index() { + return Err(JfyiError::InvalidShare) + } + + // TODO [now]: ensure seconded_count isn't too high. Needs our definition + // of 'too high' i.e. max_depth, which isn't done yet. + if expected_para.is_none() || local_validator.assignment != expected_para { return Err(JfyiError::InvalidShare) } + let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); + let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); + + let candidate_entry = match statement.payload() { + FullStatementWithPVD::Seconded(ref c, ref pvd) => { + let candidate_entry = per_relay_parent.candidates.entry(candidate_hash).or_default(); + + if let CandidateState::Unconfirmed = candidate_entry.state { + candidate_entry.state = CandidateState::Confirmed(c.clone(), pvd.clone()); + } + + candidate_entry + } + FullStatementWithPVD::Valid(_) => { + match per_relay_parent.candidates.get_mut(&candidate_hash) { + None => { + // Can't share a 'Valid' statement about a candidate we don't know about! + return Err(JfyiError::InvalidShare); + } + Some(ref c) if !c.state.is_confirmed() => { + // Can't share a 'Valid' statement about a candidate we don't know about! + return Err(JfyiError::InvalidShare); + } + Some(c) => c, + } + } + }; + + candidate_entry.statements.push(compact_statement); + candidate_entry.known_by.insert(local_validator.index); + // TODO [now]: // 2. insert candidate if unknown // 3. send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 33b17c4e33b4..908d05bd128b 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -257,7 +257,15 @@ impl StatementWithPVD { pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement { signed .convert_to_superpayload_with(|s| s.drop_pvd()) - .expect("persisted_validation_data doesn't affect encoded_as; qed") + .expect("persisted_validation_data doesn't affect encode_as; qed") + } + + /// Converts the statement to a compact signed statement by dropping the [`CommittedCandidateReceipt`] + /// and the [`PersistedValidationData`]. + pub fn signed_to_compact(signed: SignedFullStatementWithPVD) -> Signed { + signed + .convert_to_superpayload_with(|s| s.to_compact()) + .expect("doesn't affect encode_as; qed") } } From b2da4f203dc7372100fa5370780102d69890b5e8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 19:04:13 -0500 Subject: [PATCH 013/220] refactor statement import --- .../src/vstaging/mod.rs | 129 +++++++++++++----- 1 file changed, 98 insertions(+), 31 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 6855736ee96f..c6ea991a93ca 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -33,11 +33,13 @@ use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as Im use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, CoreState, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SignedStatement, UncheckedSignedStatement, ValidatorId, - ValidatorIndex, + ValidatorIndex, CompactStatement, }; use sp_keystore::SyncCryptoStorePtr; +use indexmap::IndexMap; + use std::collections::{HashMap, HashSet}; use crate::{ @@ -59,9 +61,15 @@ struct PerRelayParentValidatorState { group_id: GroupIndex, } +// stores statements and the candidate receipt/persisted validation data if any. struct CandidateData { state: CandidateState, - statements: Vec, + seconded_statements: Vec, + valid_statements: Vec, + + // validators which have either produced a statement about the + // candidate or which have sent a signed statement or which we have + // sent statements to. known_by: HashSet, } @@ -69,12 +77,45 @@ impl Default for CandidateData { fn default() -> Self { CandidateData { state: CandidateState::Unconfirmed, - statements: Vec::new(), + seconded_statements: Vec::new(), + valid_statements: Vec::new(), known_by: HashSet::new(), } } } +impl CandidateData { + fn has_issued_seconded(&self, validator: ValidatorIndex) -> bool { + self.seconded_statements.iter().find(|s| s.validator_index() == validator).is_some() + } + + fn has_issued_valid(&self, validator: ValidatorIndex) -> bool { + self.valid_statements.iter().find(|s| s.validator_index() == validator).is_some() + } + + // ignores duplicates or equivocations. returns 'false' if those are detected, 'true' otherwise. + fn insert_signed_statement(&mut self, statement: SignedStatement) -> bool { + let validator_index = statement.validator_index(); + + // only accept one statement by the validator. + let has_issued_statement = self.has_issued_seconded(validator_index) || self.has_issued_valid(validator_index); + if has_issued_statement { return false } + + match statement.payload() { + CompactStatement::Seconded(_) => self.seconded_statements.push(statement), + CompactStatement::Valid(_) => self.valid_statements.push(statement), + } + + self.known_by.insert(validator_index); + + true + } + + fn note_known_by(&mut self, validator: ValidatorIndex) { + self.known_by.insert(validator); + } +} + enum CandidateState { /// The candidate is unconfirmed to exist, as it hasn't yet /// been fetched. @@ -310,6 +351,7 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); } +// Imports a locally originating statement and distributes it to peers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn share_local_statement( ctx: &mut Context, @@ -322,9 +364,9 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; - let local_validator = match per_relay_parent.local_validator.as_ref() { + let (local_index, local_assignment) = match per_relay_parent.local_validator.as_ref() { None => return Err(JfyiError::InvalidShare), - Some(l) => l, + Some(l) => (l.index, l.assignment) }; // Two possibilities: either the statement is `Seconded` or we already @@ -338,52 +380,77 @@ pub(crate) async fn share_local_statement( .map(|c| c.descriptor().para_id), }; - if local_validator.index != statement.validator_index() { + if local_index != statement.validator_index() { return Err(JfyiError::InvalidShare) } // TODO [now]: ensure seconded_count isn't too high. Needs our definition // of 'too high' i.e. max_depth, which isn't done yet. - if expected_para.is_none() || local_validator.assignment != expected_para { + if expected_para.is_none() || local_assignment != expected_para { return Err(JfyiError::InvalidShare) } - let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); - let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); + // Insert candidate if unknown + more sanity checks. + let (compact_statement, candidate_hash) = { + let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); + let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); - let candidate_entry = match statement.payload() { - FullStatementWithPVD::Seconded(ref c, ref pvd) => { - let candidate_entry = per_relay_parent.candidates.entry(candidate_hash).or_default(); + let candidate_entry = match statement.payload() { + FullStatementWithPVD::Seconded(ref c, ref pvd) => { + let candidate_entry = per_relay_parent.candidates.entry(candidate_hash).or_default(); - if let CandidateState::Unconfirmed = candidate_entry.state { - candidate_entry.state = CandidateState::Confirmed(c.clone(), pvd.clone()); - } - - candidate_entry - } - FullStatementWithPVD::Valid(_) => { - match per_relay_parent.candidates.get_mut(&candidate_hash) { - None => { - // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare); + if let CandidateState::Unconfirmed = candidate_entry.state { + candidate_entry.state = CandidateState::Confirmed(c.clone(), pvd.clone()); } - Some(ref c) if !c.state.is_confirmed() => { - // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare); + + candidate_entry + } + FullStatementWithPVD::Valid(_) => { + match per_relay_parent.candidates.get_mut(&candidate_hash) { + None => { + // Can't share a 'Valid' statement about a candidate we don't know about! + return Err(JfyiError::InvalidShare); + } + Some(ref c) if !c.state.is_confirmed() => { + // Can't share a 'Valid' statement about a candidate we don't know about! + return Err(JfyiError::InvalidShare); + } + Some(c) => c, } - Some(c) => c, } + }; + + if !candidate_entry.insert_signed_statement(compact_statement.clone()) { + gum::warn!( + target: LOG_TARGET, + statement = ?compact_statement.payload(), + "Candidate backing issued redundant statement?", + ); + + return Err(JfyiError::InvalidShare); } - }; - candidate_entry.statements.push(compact_statement); - candidate_entry.known_by.insert(local_validator.index); + (compact_statement, candidate_hash) + }; // TODO [now]: - // 2. insert candidate if unknown // 3. send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, // send a `Seconded` statement as well. + // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. Ok(()) } + +// Circulates a compact statement to all peers who need it: those in the current group of the +// local validator, those in the next group for the parachain, and grid peers which have already +// indicated that they know the candidate as backed. +// +// If we're not sure whether the peer knows the candidate is `Seconded` already, we also send a `Seconded` +// statement. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn circulate_statement_possibly_with_seconded( + context: &mut Context, +) { + +} From 59383f2edc93d26771ad2cb77fe3597024e78387 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 22:37:47 -0500 Subject: [PATCH 014/220] first stab at broadcast logic --- .../src/vstaging/mod.rs | 101 +++++++++++++++++- 1 file changed, 96 insertions(+), 5 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index c6ea991a93ca..cc9e17c46287 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -18,7 +18,9 @@ //! designed for asynchronous backing. use polkadot_node_network_protocol::{ - self as net_protocol, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, View, + self as net_protocol, peer_set::ValidationVersion, vstaging as protocol_vstaging, + grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, + PeerId, View, Versioned, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, @@ -31,9 +33,9 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, CoreState, GroupIndex, Hash, Id as ParaId, + AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CoreState, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SignedStatement, UncheckedSignedStatement, ValidatorId, - ValidatorIndex, CompactStatement, + ValidatorIndex, CompactStatement, SessionIndex, }; use sp_keystore::SyncCryptoStorePtr; @@ -49,11 +51,13 @@ use crate::{ struct PerRelayParentState { validators: Vec, + discovery_keys: Vec, groups: Vec>, validator_state: HashMap, candidates: HashMap, known_by: HashSet, local_validator: Option, + session: SessionIndex, } struct PerRelayParentValidatorState { @@ -163,6 +167,7 @@ pub(crate) struct State { per_relay_parent: HashMap, peers: HashMap, keystore: SyncCryptoStorePtr, + topology_storage: SessionBoundGridTopologyStorage, } struct PeerState { @@ -291,11 +296,13 @@ pub(crate) async fn handle_activated_leaf( *leaf, PerRelayParentState { validators: session_info.validators, + discovery_keys: session_info.discovery_keys, groups: session_info.validator_groups, validator_state: HashMap::new(), candidates: HashMap::new(), known_by: HashSet::new(), local_validator, + session: session_index, }, ); } @@ -448,9 +455,93 @@ pub(crate) async fn share_local_statement( // // If we're not sure whether the peer knows the candidate is `Seconded` already, we also send a `Seconded` // statement. +// +// preconditions: the candidate entry exists in the state under the relay parent +// and the statement has already been imported into the entry. If this is a `Valid` +// statement, then there must be at least one `Seconded` statement. +// TODO [now]: make this a more general `broadcast_statement` with an `BroadcastBehavior` that +// affects targets: `Local` keeps current behavior while `Forward` only sends onwards via `BackedCandidate` knowers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn circulate_statement_possibly_with_seconded( - context: &mut Context, +async fn broadcast_local_statement( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedStatement, ) { + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + Some(x) => x, + None => return, + }; + let candidate_hash = statement.payload().candidate_hash().clone(); + let candidate_entry = match per_relay_parent.candidates.get_mut(&candidate_hash) { + Some(x) => x, + None => return, + }; + + let prior_seconded = match statement.payload() { + CompactStatement::Seconded(_) => None, + CompactStatement::Valid(_) => match candidate_entry.seconded_statements.first() { + Some(s) => Some(s.as_unchecked().clone()), + None => return, + } + }; + + let targets = { + let local_validator = match per_relay_parent.local_validator.as_ref() { + Some(v) => v, + None => return, // sanity: should be impossible to reach this. + }; + + let current_group = per_relay_parent.groups[local_validator.group.0 as usize].iter().cloned(); + let next_group = per_relay_parent.groups[local_validator.next_group.0 as usize].iter().cloned(); + + // TODO [now]: extend targets with validators which + // a) we've sent `BackedCandidateInv` for this candidate to + // b) have either requested the candidate _or_ have sent `BackedCandidateKnown` to us. + + current_group.chain(next_group) + .filter_map(|v| per_relay_parent.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone()))) + .collect::>() + }; + + let mut prior_to = Vec::new(); + let mut statement_to = Vec::new(); + for (validator_index, authority_id) in targets { + // TODO [now], also continue if not connected. + let peer_id: PeerId = unimplemented!(); + + // We guarantee that the receiving peer knows the candidate by + // sending them a `Seconded` statement first. + if candidate_entry.known_by.insert(validator_index) { + if let Some(_) = prior_seconded.as_ref() { + prior_to.push(peer_id.clone()); + } + } + + statement_to.push(peer_id); + } + + // ship off the network messages to the network bridge. + + if !prior_to.is_empty() { + let prior_seconded = prior_seconded.expect("prior_to is only non-empty when prior_seconded exists; qed"); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + prior_to, + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + prior_seconded.clone(), + )).into() + )).await; + } + + if !statement_to.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + statement_to, + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + )).into() + )).await; + } } From a66a80514cca47f633ee1a769c30121825b6b89d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Sep 2022 22:37:51 -0500 Subject: [PATCH 015/220] fmt --- .../src/vstaging/mod.rs | 82 ++++++++++++------- 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index cc9e17c46287..766cd93cb28b 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -18,13 +18,13 @@ //! designed for asynchronous backing. use polkadot_node_network_protocol::{ - self as net_protocol, peer_set::ValidationVersion, vstaging as protocol_vstaging, + self as net_protocol, grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, - PeerId, View, Versioned, + peer_set::ValidationVersion, + vstaging as protocol_vstaging, PeerId, Versioned, View, }; use polkadot_node_primitives::{ - SignedFullStatementWithPVD, - StatementWithPVD as FullStatementWithPVD, + SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, }; use polkadot_node_subsystem::{ jaeger, @@ -33,9 +33,9 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CoreState, GroupIndex, Hash, Id as ParaId, - PersistedValidationData, SignedStatement, UncheckedSignedStatement, ValidatorId, - ValidatorIndex, CompactStatement, SessionIndex, + AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, + GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; use sp_keystore::SyncCryptoStorePtr; @@ -90,11 +90,17 @@ impl Default for CandidateData { impl CandidateData { fn has_issued_seconded(&self, validator: ValidatorIndex) -> bool { - self.seconded_statements.iter().find(|s| s.validator_index() == validator).is_some() + self.seconded_statements + .iter() + .find(|s| s.validator_index() == validator) + .is_some() } fn has_issued_valid(&self, validator: ValidatorIndex) -> bool { - self.valid_statements.iter().find(|s| s.validator_index() == validator).is_some() + self.valid_statements + .iter() + .find(|s| s.validator_index() == validator) + .is_some() } // ignores duplicates or equivocations. returns 'false' if those are detected, 'true' otherwise. @@ -102,8 +108,11 @@ impl CandidateData { let validator_index = statement.validator_index(); // only accept one statement by the validator. - let has_issued_statement = self.has_issued_seconded(validator_index) || self.has_issued_valid(validator_index); - if has_issued_statement { return false } + let has_issued_statement = + self.has_issued_seconded(validator_index) || self.has_issued_valid(validator_index); + if has_issued_statement { + return false + } match statement.payload() { CompactStatement::Seconded(_) => self.seconded_statements.push(statement), @@ -373,7 +382,7 @@ pub(crate) async fn share_local_statement( let (local_index, local_assignment) = match per_relay_parent.local_validator.as_ref() { None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment) + Some(l) => (l.index, l.assignment), }; // Two possibilities: either the statement is `Seconded` or we already @@ -405,27 +414,28 @@ pub(crate) async fn share_local_statement( let candidate_entry = match statement.payload() { FullStatementWithPVD::Seconded(ref c, ref pvd) => { - let candidate_entry = per_relay_parent.candidates.entry(candidate_hash).or_default(); + let candidate_entry = + per_relay_parent.candidates.entry(candidate_hash).or_default(); if let CandidateState::Unconfirmed = candidate_entry.state { candidate_entry.state = CandidateState::Confirmed(c.clone(), pvd.clone()); } candidate_entry - } + }, FullStatementWithPVD::Valid(_) => { match per_relay_parent.candidates.get_mut(&candidate_hash) { - None => { + None => { // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare); - } + return Err(JfyiError::InvalidShare) + }, Some(ref c) if !c.state.is_confirmed() => { // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare); - } + return Err(JfyiError::InvalidShare) + }, Some(c) => c, } - } + }, }; if !candidate_entry.insert_signed_statement(compact_statement.clone()) { @@ -435,7 +445,7 @@ pub(crate) async fn share_local_statement( "Candidate backing issued redundant statement?", ); - return Err(JfyiError::InvalidShare); + return Err(JfyiError::InvalidShare) } (compact_statement, candidate_hash) @@ -484,7 +494,7 @@ async fn broadcast_local_statement( CompactStatement::Valid(_) => match candidate_entry.seconded_statements.first() { Some(s) => Some(s.as_unchecked().clone()), None => return, - } + }, }; let targets = { @@ -493,15 +503,20 @@ async fn broadcast_local_statement( None => return, // sanity: should be impossible to reach this. }; - let current_group = per_relay_parent.groups[local_validator.group.0 as usize].iter().cloned(); - let next_group = per_relay_parent.groups[local_validator.next_group.0 as usize].iter().cloned(); + let current_group = + per_relay_parent.groups[local_validator.group.0 as usize].iter().cloned(); + let next_group = + per_relay_parent.groups[local_validator.next_group.0 as usize].iter().cloned(); // TODO [now]: extend targets with validators which // a) we've sent `BackedCandidateInv` for this candidate to // b) have either requested the candidate _or_ have sent `BackedCandidateKnown` to us. - current_group.chain(next_group) - .filter_map(|v| per_relay_parent.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone()))) + current_group + .chain(next_group) + .filter_map(|v| { + per_relay_parent.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone())) + }) .collect::>() }; @@ -525,14 +540,17 @@ async fn broadcast_local_statement( // ship off the network messages to the network bridge. if !prior_to.is_empty() { - let prior_seconded = prior_seconded.expect("prior_to is only non-empty when prior_seconded exists; qed"); + let prior_seconded = + prior_seconded.expect("prior_to is only non-empty when prior_seconded exists; qed"); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( prior_to, Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( relay_parent, prior_seconded.clone(), - )).into() - )).await; + )) + .into(), + )) + .await; } if !statement_to.is_empty() { @@ -541,7 +559,9 @@ async fn broadcast_local_statement( Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), - )).into() - )).await; + )) + .into(), + )) + .await; } } From 5441efb31d2fabf058e7bd81018b865909ff2718 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 15:39:01 -0500 Subject: [PATCH 016/220] fill out some TODOs --- .../src/vstaging/mod.rs | 43 ++++++++++++++++--- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 766cd93cb28b..7550e94a8c83 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -177,10 +177,12 @@ pub(crate) struct State { peers: HashMap, keystore: SyncCryptoStorePtr, topology_storage: SessionBoundGridTopologyStorage, + authorities: HashMap, } struct PeerState { view: View, + maybe_authority: Option>, } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -190,20 +192,44 @@ pub(crate) async fn handle_network_update( update: NetworkBridgeEvent, ) { match update { - NetworkBridgeEvent::PeerConnected(peer_id, _role, protocol_version, authority_ids) => { + NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, authority_ids) => { + gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); + if protocol_version != ValidationVersion::VStaging.into() { return } - state.peers.insert(peer_id, PeerState { view: View::default() }); + state.peers.insert( + peer_id, + PeerState { + view: View::default(), + maybe_authority: authority_ids.clone(), + }, + ); - // TODO [now]: update some authorities map. + if let Some(authority_ids) = authority_ids { + authority_ids.into_iter().for_each(|a| { + state.authorities.insert(a, peer_id); + }) + } }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { state.peers.remove(&peer_id); }, - NetworkBridgeEvent::NewGossipTopology(new_topology) => { - // TODO [now] + NetworkBridgeEvent::NewGossipTopology(topology) => { + let new_session_index = topology.session; + let new_topology: SessionGridTopology = topology.into(); + let old_topology = state.topology_storage.get_current_topology(); + let newly_added = new_topology.peers_diff(old_topology); + state.topology_storage.update_topology(new_session_index, new_topology); + for peer in newly_added { + if let Some(data) = state.peers.get_mut(&peer) { + // TODO [now]: send the peer any topology-specific + // messages we need to send them. Like forwarding or sending backed-candidate + // messages. But in principle we shouldn't have accepted any such messages as we don't + // yet have the topology. + } + } }, NetworkBridgeEvent::PeerMessage(peer_id, message) => { match message { @@ -523,8 +549,11 @@ async fn broadcast_local_statement( let mut prior_to = Vec::new(); let mut statement_to = Vec::new(); for (validator_index, authority_id) in targets { - // TODO [now], also continue if not connected. - let peer_id: PeerId = unimplemented!(); + // Find peer ID based on authority ID, and also filter to connected. + let peer_id: PeerId = match state.authorities.get(&authority_id) { + Some(p) if state.peers.contains_key(p) => p.clone(), + None | Some(_) => continue, + }; // We guarantee that the receiving peer knows the candidate by // sending them a `Seconded` statement first. From cd722bba2eb5b28e0feb53b3f1b9c45628cf3271 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 17:06:05 -0500 Subject: [PATCH 017/220] start on handling incoming --- .../src/vstaging/mod.rs | 110 +++++++++++++++++- 1 file changed, 104 insertions(+), 6 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 7550e94a8c83..9a98a9af77b3 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -21,6 +21,7 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, peer_set::ValidationVersion, + UnifiedReputationChange as Rep, vstaging as protocol_vstaging, PeerId, Versioned, View, }; use polkadot_node_primitives::{ @@ -35,7 +36,7 @@ use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as Im use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, SigningContext, }; use sp_keystore::SyncCryptoStorePtr; @@ -49,6 +50,17 @@ use crate::{ LOG_TARGET, }; +const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); +const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = + Rep::CostMinor("Unexpected Statement, missing knowlege for relay parent"); +const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = + Rep::CostMinor("Unexpected Statement, unknown candidate"); +const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = + Rep::CostMinor("Unexpected Statement, remote not allowed"); + +const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); + + struct PerRelayParentState { validators: Vec, discovery_keys: Vec, @@ -185,6 +197,13 @@ struct PeerState { maybe_authority: Option>, } +/// How many votes we need to consider a candidate backed. +/// +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +fn minimum_votes(n_validators: usize) -> usize { + std::cmp::min(2, n_validators) +} + #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn handle_network_update( ctx: &mut Context, @@ -477,9 +496,16 @@ pub(crate) async fn share_local_statement( (compact_statement, candidate_hash) }; + // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, + // send a `Seconded` statement as well. + broadcast_local_statement( + ctx, + state, + relay_parent, + compact_statement, + ).await; + // TODO [now]: - // 3. send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, - // send a `Seconded` statement as well. // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. Ok(()) @@ -531,15 +557,17 @@ async fn broadcast_local_statement( let current_group = per_relay_parent.groups[local_validator.group.0 as usize].iter().cloned(); - let next_group = - per_relay_parent.groups[local_validator.next_group.0 as usize].iter().cloned(); + + // TODO [now]: extend targets with validators in any current leaf which + // are assigned to the group // TODO [now]: extend targets with validators which // a) we've sent `BackedCandidateInv` for this candidate to // b) have either requested the candidate _or_ have sent `BackedCandidateKnown` to us. + // TODO [now]: dedup + current_group - .chain(next_group) .filter_map(|v| { per_relay_parent.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone())) }) @@ -594,3 +622,73 @@ async fn broadcast_local_statement( .await; } } + +/// Check a statement signature under this parent hash. +fn check_statement_signature( + session_index: SessionIndex, + validators: &[ValidatorId], + relay_parent: Hash, + statement: UncheckedSignedStatement, +) -> std::result::Result { + let signing_context = + SigningContext { session_index, parent_hash: relay_parent }; + + validators + .get(statement.unchecked_validator_index().0 as usize) + .ok_or_else(|| statement.clone()) + .and_then(|v| statement.try_into_checked(&signing_context, v)) +} + +async fn report_peer( + sender: &mut impl overseer::StatementDistributionSenderTrait, + peer: PeerId, + rep: Rep, +) { + sender.send_message(NetworkBridgeTxMessage::ReportPeer(peer, rep)).await +} + + +/// Handle an incoming statement. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_statement( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + relay_parent: Hash, + statement: UncheckedSignedStatement, +) { + if !state.peers.contains_key(&peer) { + // sanity: should be impossible. + return; + } + + // Ensure we know the relay parent. + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE).await; + return + } + Some(p) => p, + }; + + // Ensure the statement is correctly signed. + let checked_statement = match check_statement_signature( + per_relay_parent.session, + &per_relay_parent.validators[..], + relay_parent, + statement, + ) { + Ok(s) => s, + Err(_) => { + report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await; + return; + } + }; + + let candidate_hash = checked_statement.payload().candidate_hash(); + + // Ensure that if the statement is kind 'Valid' that we know the candidate. + if let CompactStatement::Valid(_) = checked_statement.payload() { + // TODO [now] + } +} From c702f05ec0cc3678aabb42e1611e4f4e015457cd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 17:18:20 -0500 Subject: [PATCH 018/220] split off session info into separate map --- .../src/vstaging/mod.rs | 82 ++++++++++++------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9a98a9af77b3..757575b437eb 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -36,7 +36,7 @@ use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as Im use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, SigningContext, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, SigningContext, SessionInfo, }; use sp_keystore::SyncCryptoStorePtr; @@ -62,12 +62,8 @@ const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature" struct PerRelayParentState { - validators: Vec, - discovery_keys: Vec, - groups: Vec>, validator_state: HashMap, candidates: HashMap, - known_by: HashSet, local_validator: Option, session: SessionIndex, } @@ -186,6 +182,7 @@ pub(crate) struct State { /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, per_relay_parent: HashMap, + per_session: HashMap, peers: HashMap, keystore: SyncCryptoStorePtr, topology_storage: SessionBoundGridTopologyStorage, @@ -311,32 +308,39 @@ pub(crate) async fn handle_activated_leaf( .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchSessionIndex)?; - let session_info = + let availability_cores = + polkadot_node_subsystem_util::request_availability_cores(*leaf, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchAvailabilityCores)?; + + if !state.per_session.contains_key(&session_index) { + let session_info = polkadot_node_subsystem_util::request_session_info(*leaf, session_index, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchSessionInfo)?; - let session_info = match session_info { - None => { - gum::warn!( - target: LOG_TARGET, - relay_parent = ?leaf, - "No session info available for current session" - ); + let session_info = match session_info { + None => { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?leaf, + "No session info available for current session" + ); - continue - }, - Some(s) => s, - }; + continue + }, + Some(s) => s, + }; - let availability_cores = - polkadot_node_subsystem_util::request_availability_cores(*leaf, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchAvailabilityCores)?; + state.per_session.insert(session_index, session_info); + } + + let session_info = state.per_session.get(&session_index) + .expect("either existed or just inserted; qed"); let local_validator = find_local_validator_state( &session_info.validators, @@ -349,12 +353,8 @@ pub(crate) async fn handle_activated_leaf( state.per_relay_parent.insert( *leaf, PerRelayParentState { - validators: session_info.validators, - discovery_keys: session_info.discovery_keys, - groups: session_info.validator_groups, validator_state: HashMap::new(), candidates: HashMap::new(), - known_by: HashSet::new(), local_validator, session: session_index, }, @@ -410,6 +410,10 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { // clean up per-relay-parent data based on everything removed. state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); + + // clean up sessions based on everything remaining. + let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); + state.per_session.retain(|s, _| sessions.contains(s)); } // Imports a locally originating statement and distributes it to peers. @@ -535,6 +539,11 @@ async fn broadcast_local_statement( None => return, }; + let session_info = match state.per_session.get(&per_relay_parent.session) { + Some(s) => s, + None => return, + }; + let candidate_hash = statement.payload().candidate_hash().clone(); let candidate_entry = match per_relay_parent.candidates.get_mut(&candidate_hash) { Some(x) => x, @@ -556,7 +565,7 @@ async fn broadcast_local_statement( }; let current_group = - per_relay_parent.groups[local_validator.group.0 as usize].iter().cloned(); + session_info.validator_groups[local_validator.group.0 as usize].iter().cloned(); // TODO [now]: extend targets with validators in any current leaf which // are assigned to the group @@ -569,7 +578,7 @@ async fn broadcast_local_statement( current_group .filter_map(|v| { - per_relay_parent.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone())) + session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone())) }) .collect::>() }; @@ -671,10 +680,23 @@ async fn handle_incoming_statement( Some(p) => p, }; + let session_info = match state.per_session.get(&per_relay_parent.session) { + None => { + gum::warn!( + target: LOG_TARGET, + session = ?per_relay_parent.session, + "Missing expected session info.", + ); + + return + } + Some(s) => s, + }; + // Ensure the statement is correctly signed. let checked_statement = match check_statement_signature( per_relay_parent.session, - &per_relay_parent.validators[..], + &session_info.validators[..], relay_parent, statement, ) { From 6a7dd7977bdfd619dfaaefc485ba698899335225 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 19:05:15 -0500 Subject: [PATCH 019/220] start in on a knowledge tracker --- .../src/vstaging/direct.rs | 201 ++++++++++++++++++ .../src/vstaging/mod.rs | 13 +- 2 files changed, 203 insertions(+), 11 deletions(-) create mode 100644 node/network/statement-distribution/src/vstaging/direct.rs diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs new file mode 100644 index 000000000000..e7f867a3a4ee --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -0,0 +1,201 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Direct distribution of statements, even those concerning candidates which +//! are not yet backed. +//! +//! Members of a validation group assigned to a para at a given relay-parent +//! always distribute statements directly to each other. +//! +//! Each validator in the group is permitted to send up to some limit of +//! `Seconded` statements per validator in the group. These may differ per-validator, +//! if an attacker is exploiting network partitions, so we have to track up to +//! `limit*group_size^2` `Seconded` statements. The limits and group sizes are both +//! relatively small, and this is an absolute worst case. +//! +//! This module exposes a "DirectInGroup" utility which allows us to determine +//! whether to accept or reject messages, and to track which candidates we consider +//! 'legitimate' based on the first `limit` `Seconded` statements we see signed by +//! each validator. +// TODO [now]: decide if we want to also distribute statements to validators +// that are assigned as-of an active leaf i.e. the next group. + +use std::ops::Range; + +use polkadot_primitives::vstaging::{ValidatorIndex, CandidateHash}; + +/// Utility for keeping track of limits on direct statements within a group. +/// +/// See module docs for more details. +pub struct DirectInGroup { + validators: Vec, + our_index: usize, + seconding_limit: usize, + + // a 3D matrix where the dimensions have the following meaning + // X: indicates the sending validator (size: group_size - 1, omitting self) + // Y: indicates the originating validator who issued the statement (size: group_size) + // Z: the candidate hash of the statement (size: seconding_limit) + // + // preallocated to (group size - 1) * group_size * seconding_limit. + incoming: Vec>, + + // a 2D matrix of accepted incoming `Seconded` messages from validators + // in the group. + // X: indicates the originating validator (size: group_size) + // Y: a seconded candidate we've accepted knowledge of locally (size: seconding_limit) + accepted: Vec>, + + // TODO [now]: outgoing sends +} + +impl DirectInGroup { + /// Instantiate a new `DirectInGroup` tracker. Fails if `our_index` is out of bounds + /// or `group_validators` is empty or `our_index` is not in the group. + pub fn new( + group_validators: Vec, + our_index: ValidatorIndex, + seconding_limit: usize, + ) -> Option { + if group_validators.is_empty() { return None } + if our_index.0 as usize >= group_validators.len() { return None } + + let our_index = index_in_group(&group_validators, our_index)?; + + let incoming_size = (group_validators.len() - 1) * group_validators.len() * seconding_limit; + let accepted_size = group_validators.len() * seconding_limit; + + let incoming = vec![None; incoming_size]; + let accepted = vec![None; accepted_size]; + + Some(DirectInGroup { + validators: group_validators, + our_index, + seconding_limit, + incoming, + accepted, + }) + } + + /// Handle an incoming `Seconded` statement from the given validator. + /// If the outcome is `Reject` then no internal state is altered. + pub fn handle_incoming_seconded( + &mut self, + sender: ValidatorIndex, + originator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> Result { + let sender_index = match self.index_in_group(sender) { + None => return Err(RejectIncoming::NotInGroup), + Some(i) => i, + }; + + let originator_index = match self.index_in_group(sender) { + None => return Err(RejectIncoming::NotInGroup), + Some(i) => i, + }; + + if sender_index == self.our_index || originator_index == self.our_index { + return Err(RejectIncoming::NotInGroup); + } + + let range = self.incoming_range(sender_index, originator_index); + for i in range { + if self.incoming[i] == Some(candidate_hash) { + // duplicates get rejected. + return Err(RejectIncoming::PeerExcess) + } + + // ok, found an empty slot. + if self.incoming[i].is_none() { + self.incoming[i] = Some(candidate_hash); + return self.handle_accepted_incoming( + originator_index, + candidate_hash, + ); + } + } + + Err(RejectIncoming::PeerExcess) + } + + // TODO [now]: some API analogues to can_send / can_receive. + + fn handle_accepted_incoming( + &mut self, + originator: usize, + candidate_hash: CandidateHash, + ) -> Result { + let range = self.accepted_range(originator); + for i in range { + if self.accepted[i] == Some(candidate_hash) { + return Ok(AcceptIncoming::YesKnown); + } + + if self.accepted[i].is_none() { + self.accepted[i] = Some(candidate_hash); + return Ok(AcceptIncoming::YesUnknown); + } + } + + Err(RejectIncoming::OriginatorExcess) + } + + fn index_in_group(&self, validator: ValidatorIndex) -> Option { + index_in_group(&self.validators, validator) + } + + fn adjust_for_skipped_self(&self, index: usize) -> usize { + if index > self.our_index { index - 1 } else { index } + } + + fn incoming_range(&self, sender: usize, originator: usize) -> Range { + // adjust X dimension to account for the fact that our index is skipped. + let sender = self.adjust_for_skipped_self(sender); + let base = (sender * (self.validators.len() - 1)) + originator * self.seconding_limit; + + base .. base + self.seconding_limit + } + + fn accepted_range(&self, originator: usize) -> Range { + let base = originator * self.seconding_limit; + base .. base + self.seconding_limit + } +} + +/// Incoming `Seconded` message was rejected. +pub enum RejectIncoming { + /// Peer sent excessive messages. + PeerExcess, + /// Originator sent excessive messages, peer seems innocent. + OriginatorExcess, + /// Sender or originator is not in the group. + NotInGroup, +} + +/// Incoming `Seconded` message was accepted. +pub enum AcceptIncoming { + /// The `Seconded` statement was within the peer's limits and unknown + /// for the originator. + YesUnknown, + /// The `Seconded` statement was within the peer's limits and already + /// known for the originator. + YesKnown, +} + +fn index_in_group(validators: &[ValidatorIndex], index: ValidatorIndex) -> Option { + validators.iter().position(|v| v == &index) +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 757575b437eb..1bd4e0447b18 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -50,6 +50,8 @@ use crate::{ LOG_TARGET, }; +mod direct; + const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = Rep::CostMinor("Unexpected Statement, missing knowlege for relay parent"); @@ -169,11 +171,6 @@ struct LocalValidatorState { group: GroupIndex, // the assignment of our validator group, if any. assignment: Option, - // the next group assigned to this para. - next_group: GroupIndex, - // the previous group assigned to this para, stored only - // if they are currently assigned to a para. - prev_group: Option<(GroupIndex, ParaId)>, } pub(crate) struct State { @@ -382,10 +379,6 @@ async fn find_local_validator_state( // note: this won't work well for parathreads because it only works // when core assignments to paras are static throughout the session. - let next_group = GroupIndex((our_group.0 + 1) % groups.len() as u32); - let prev_group = - GroupIndex(if our_group.0 == 0 { our_group.0 - 1 } else { groups.len() as u32 - 1 }); - let para_for_group = |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); @@ -393,8 +386,6 @@ async fn find_local_validator_state( index: validator_index, group: our_group, assignment: para_for_group(our_group), - next_group, - prev_group: para_for_group(prev_group).map(|p| (prev_group, p)), }) } From 9bfda1f6259e1bd6d5cc53cbb64b5d4cb29e5a7f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 19:06:56 -0500 Subject: [PATCH 020/220] address some grumbles --- node/network/protocol/src/request_response/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 805ac4dec305..30053183148d 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -109,7 +109,7 @@ const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); /// fit statement distribution within a block of 6 seconds.) const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); -/// We want to time out backed candidate requests to time out relatively fast, +/// We want to backed candidate requests to time out relatively fast, /// because slow requests will bottleneck the backing system. Ideally, we'd have /// an adaptive timeout based on the candidate size, because there will be a lot of variance /// in candidate sizes: candidates with no code and no messages vs candidates with code @@ -130,7 +130,7 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can /// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need -/// to have 3 slow nodes connected, to delay transfer for others by `STATEMENTS_TIMEOUT`. +/// to have 3 slow nodes connected, to delay transfer for others by `BACKED_CANDIDATE_PACKET_TIMEOUT`. pub const MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS: u32 = 5; /// Response size limit for responses of POV like data. From 0f25b3abe73d69f23eb530c897d4650f15a119d4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 19:07:01 -0500 Subject: [PATCH 021/220] format --- .../src/vstaging/direct.rs | 32 ++++++----- .../src/vstaging/mod.rs | 57 ++++++++----------- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index e7f867a3a4ee..24d37a626da0 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -35,7 +35,7 @@ use std::ops::Range; -use polkadot_primitives::vstaging::{ValidatorIndex, CandidateHash}; +use polkadot_primitives::vstaging::{CandidateHash, ValidatorIndex}; /// Utility for keeping track of limits on direct statements within a group. /// @@ -58,7 +58,6 @@ pub struct DirectInGroup { // X: indicates the originating validator (size: group_size) // Y: a seconded candidate we've accepted knowledge of locally (size: seconding_limit) accepted: Vec>, - // TODO [now]: outgoing sends } @@ -70,8 +69,12 @@ impl DirectInGroup { our_index: ValidatorIndex, seconding_limit: usize, ) -> Option { - if group_validators.is_empty() { return None } - if our_index.0 as usize >= group_validators.len() { return None } + if group_validators.is_empty() { + return None + } + if our_index.0 as usize >= group_validators.len() { + return None + } let our_index = index_in_group(&group_validators, our_index)?; @@ -109,7 +112,7 @@ impl DirectInGroup { }; if sender_index == self.our_index || originator_index == self.our_index { - return Err(RejectIncoming::NotInGroup); + return Err(RejectIncoming::NotInGroup) } let range = self.incoming_range(sender_index, originator_index); @@ -122,10 +125,7 @@ impl DirectInGroup { // ok, found an empty slot. if self.incoming[i].is_none() { self.incoming[i] = Some(candidate_hash); - return self.handle_accepted_incoming( - originator_index, - candidate_hash, - ); + return self.handle_accepted_incoming(originator_index, candidate_hash) } } @@ -142,12 +142,12 @@ impl DirectInGroup { let range = self.accepted_range(originator); for i in range { if self.accepted[i] == Some(candidate_hash) { - return Ok(AcceptIncoming::YesKnown); + return Ok(AcceptIncoming::YesKnown) } if self.accepted[i].is_none() { self.accepted[i] = Some(candidate_hash); - return Ok(AcceptIncoming::YesUnknown); + return Ok(AcceptIncoming::YesUnknown) } } @@ -159,7 +159,11 @@ impl DirectInGroup { } fn adjust_for_skipped_self(&self, index: usize) -> usize { - if index > self.our_index { index - 1 } else { index } + if index > self.our_index { + index - 1 + } else { + index + } } fn incoming_range(&self, sender: usize, originator: usize) -> Range { @@ -167,12 +171,12 @@ impl DirectInGroup { let sender = self.adjust_for_skipped_self(sender); let base = (sender * (self.validators.len() - 1)) + originator * self.seconding_limit; - base .. base + self.seconding_limit + base..base + self.seconding_limit } fn accepted_range(&self, originator: usize) -> Range { let base = originator * self.seconding_limit; - base .. base + self.seconding_limit + base..base + self.seconding_limit } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1bd4e0447b18..3d2ee546eac7 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -21,8 +21,7 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, peer_set::ValidationVersion, - UnifiedReputationChange as Rep, - vstaging as protocol_vstaging, PeerId, Versioned, View, + vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -35,8 +34,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, - GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, SigningContext, SessionInfo, + GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SessionInfo, + SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; use sp_keystore::SyncCryptoStorePtr; @@ -62,7 +61,6 @@ const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); - struct PerRelayParentState { validator_state: HashMap, candidates: HashMap, @@ -214,10 +212,7 @@ pub(crate) async fn handle_network_update( state.peers.insert( peer_id, - PeerState { - view: View::default(), - maybe_authority: authority_ids.clone(), - }, + PeerState { view: View::default(), maybe_authority: authority_ids.clone() }, ); if let Some(authority_ids) = authority_ids { @@ -313,12 +308,15 @@ pub(crate) async fn handle_activated_leaf( .map_err(JfyiError::FetchAvailabilityCores)?; if !state.per_session.contains_key(&session_index) { - let session_info = - polkadot_node_subsystem_util::request_session_info(*leaf, session_index, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchSessionInfo)?; + let session_info = polkadot_node_subsystem_util::request_session_info( + *leaf, + session_index, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionInfo)?; let session_info = match session_info { None => { @@ -336,7 +334,9 @@ pub(crate) async fn handle_activated_leaf( state.per_session.insert(session_index, session_info); } - let session_info = state.per_session.get(&session_index) + let session_info = state + .per_session + .get(&session_index) .expect("either existed or just inserted; qed"); let local_validator = find_local_validator_state( @@ -493,12 +493,7 @@ pub(crate) async fn share_local_statement( // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, // send a `Seconded` statement as well. - broadcast_local_statement( - ctx, - state, - relay_parent, - compact_statement, - ).await; + broadcast_local_statement(ctx, state, relay_parent, compact_statement).await; // TODO [now]: // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. @@ -568,9 +563,7 @@ async fn broadcast_local_statement( // TODO [now]: dedup current_group - .filter_map(|v| { - session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone())) - }) + .filter_map(|v| session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone()))) .collect::>() }; @@ -630,8 +623,7 @@ fn check_statement_signature( relay_parent: Hash, statement: UncheckedSignedStatement, ) -> std::result::Result { - let signing_context = - SigningContext { session_index, parent_hash: relay_parent }; + let signing_context = SigningContext { session_index, parent_hash: relay_parent }; validators .get(statement.unchecked_validator_index().0 as usize) @@ -647,7 +639,6 @@ async fn report_peer( sender.send_message(NetworkBridgeTxMessage::ReportPeer(peer, rep)).await } - /// Handle an incoming statement. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn handle_incoming_statement( @@ -659,7 +650,7 @@ async fn handle_incoming_statement( ) { if !state.peers.contains_key(&peer) { // sanity: should be impossible. - return; + return } // Ensure we know the relay parent. @@ -667,7 +658,7 @@ async fn handle_incoming_statement( None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE).await; return - } + }, Some(p) => p, }; @@ -680,7 +671,7 @@ async fn handle_incoming_statement( ); return - } + }, Some(s) => s, }; @@ -694,8 +685,8 @@ async fn handle_incoming_statement( Ok(s) => s, Err(_) => { report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await; - return; - } + return + }, }; let candidate_hash = checked_statement.payload().candidate_hash(); From 60bb289af82fe77b0333ca52462a7af7f4adef88 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 19:07:19 -0500 Subject: [PATCH 022/220] missed comment --- node/network/protocol/src/request_response/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 30053183148d..11229f73d029 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -130,7 +130,7 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can /// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need -/// to have 3 slow nodes connected, to delay transfer for others by `BACKED_CANDIDATE_PACKET_TIMEOUT`. +/// to have 5 slow nodes connected, to delay transfer for others by `BACKED_CANDIDATE_PACKET_TIMEOUT`. pub const MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS: u32 = 5; /// Response size limit for responses of POV like data. From 03ff76dd85d386a9e5090b383c9eecb03fb83ccb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 22:26:25 -0500 Subject: [PATCH 023/220] some docs for direct --- .../src/vstaging/direct.rs | 34 ++++++++++++++----- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index 24d37a626da0..cd80393425e0 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -20,16 +20,32 @@ //! Members of a validation group assigned to a para at a given relay-parent //! always distribute statements directly to each other. //! -//! Each validator in the group is permitted to send up to some limit of -//! `Seconded` statements per validator in the group. These may differ per-validator, -//! if an attacker is exploiting network partitions, so we have to track up to -//! `limit*group_size^2` `Seconded` statements. The limits and group sizes are both -//! relatively small, and this is an absolute worst case. +//! The main way we limit the amount of candidates that have to be handled by +//! the system is to limit the amount of `Seconded` messages that we allow +//! each validator to issue at each relay-parent. Since the amount of relay-parents +//! that we have to deal with at any time is itself bounded, this lets us bound +//! the memory and work that we have here. Bounding `Seconded` statements is enough +//! because they imply a bounded amount of `Valid` statements about the same candidate +//! which may follow. //! -//! This module exposes a "DirectInGroup" utility which allows us to determine -//! whether to accept or reject messages, and to track which candidates we consider -//! 'legitimate' based on the first `limit` `Seconded` statements we see signed by -//! each validator. +//! The motivation for this piece of code is that the statements that each validator +//! sees may differ. i.e. even though a validator is allowed to issue X `Seconded` +//! statements at a relay-parent, they may in fact issue X*2 and issue one set to +//! one partition of the backing group and one set to another. Of course, in practice +//! these types of partitions will not exist, but in the worst case each validator in the +//! group would see an entirely different set of X `Seconded` statements from some validator +//! and each validator is in its own partition. After that partition resolves, we'd have to +//! deal with up to `limit*group_size^2` `Seconded` statements. +//! +//! Given that both our group sizes and our limits per relay-parent are small, this is +//! quite manageable, and the utility here lets us deal with it in only a few kilobytes +//! of memory. +//! +//! More concretely, this module exposes a "DirectInGroup" utility which allows us to determine +//! whether to accept or reject messages from other validators in the same group as we +//! are in, based on _the most charitable possible interpretation of our protocol rules_, +//! and to keep track of what we have sent to other validators in the group and what we may +//! continue to send them. // TODO [now]: decide if we want to also distribute statements to validators // that are assigned as-of an active leaf i.e. the next group. From c2918fc9ba69b3fcec786c8ff878104843d5bbdc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 22:29:50 -0500 Subject: [PATCH 024/220] add note on slashing --- node/network/statement-distribution/src/vstaging/direct.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index cd80393425e0..897342c5dcb9 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -41,6 +41,12 @@ //! quite manageable, and the utility here lets us deal with it in only a few kilobytes //! of memory. //! +//! It's also worth noting that any case where a validator issues more than the legal limit +//! of `Seconded` statements at a relay parent is trivially slashable on-chain, which means +//! the 'worst case' adversary that this code defends against is effectively lighting money +//! on fire. Nevertheless, we handle the case here to ensure that the behavior of the +//! system is well-defined even if an adversary is willing to be slashed. +//! //! More concretely, this module exposes a "DirectInGroup" utility which allows us to determine //! whether to accept or reject messages from other validators in the same group as we //! are in, based on _the most charitable possible interpretation of our protocol rules_, From bddeff1fb631def2128e27f36bd48353cccb5910 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 23 Sep 2022 22:31:04 -0500 Subject: [PATCH 025/220] amend --- node/network/statement-distribution/src/vstaging/direct.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index 897342c5dcb9..7793449e7922 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -35,7 +35,9 @@ //! these types of partitions will not exist, but in the worst case each validator in the //! group would see an entirely different set of X `Seconded` statements from some validator //! and each validator is in its own partition. After that partition resolves, we'd have to -//! deal with up to `limit*group_size^2` `Seconded` statements. +//! deal with up to `limit*group_size` `Seconded` statements from that validator. And then +//! if every validator in the group does the same thing, we're dealing with something like +//! `limit*group_size^2` `Seconded` statements in total. //! //! Given that both our group sizes and our limits per relay-parent are small, this is //! quite manageable, and the utility here lets us deal with it in only a few kilobytes From 6009816ec19f55402de173a3a681aeb86c65b9c4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 26 Sep 2022 18:36:22 -0500 Subject: [PATCH 026/220] simplify 'direct' code --- .../src/vstaging/direct.rs | 282 +++++++++++------- 1 file changed, 179 insertions(+), 103 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index 7793449e7922..72f2b5c714ae 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -59,30 +59,46 @@ use std::ops::Range; -use polkadot_primitives::vstaging::{CandidateHash, ValidatorIndex}; +use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; + +use std::collections::{HashMap, HashSet}; + +#[derive(Hash, PartialEq, Eq)] +struct ValidStatementManifest { + remote: ValidatorIndex, + originator: ValidatorIndex, + candidate_hash: CandidateHash, +} + +// A piece of knowledge about a candidate +#[derive(Hash, Clone, PartialEq, Eq)] +enum Knowledge { + // General knowledge. + General(CandidateHash), + // Specific knowledge of a given statement (with its originator) + Specific(CompactStatement, ValidatorIndex), +} + +// Knowledge paired with its source. +#[derive(Hash, Clone, PartialEq, Eq)] +enum TaggedKnowledge { + // Knowledge we have received from the validator on the p2p layer. + IncomingP2P(Knowledge), + // Knowledge we have sent to the validator on the p2p layer. + OutgoingP2P(Knowledge), + // Knowledge of candidates the validator has seconded. + Seconded(CandidateHash), +} /// Utility for keeping track of limits on direct statements within a group. /// /// See module docs for more details. pub struct DirectInGroup { validators: Vec, - our_index: usize, + our_index: ValidatorIndex, seconding_limit: usize, - // a 3D matrix where the dimensions have the following meaning - // X: indicates the sending validator (size: group_size - 1, omitting self) - // Y: indicates the originating validator who issued the statement (size: group_size) - // Z: the candidate hash of the statement (size: seconding_limit) - // - // preallocated to (group size - 1) * group_size * seconding_limit. - incoming: Vec>, - - // a 2D matrix of accepted incoming `Seconded` messages from validators - // in the group. - // X: indicates the originating validator (size: group_size) - // Y: a seconded candidate we've accepted knowledge of locally (size: seconding_limit) - accepted: Vec>, - // TODO [now]: outgoing sends + knowledge: HashMap>, } impl DirectInGroup { @@ -100,128 +116,188 @@ impl DirectInGroup { return None } - let our_index = index_in_group(&group_validators, our_index)?; - - let incoming_size = (group_validators.len() - 1) * group_validators.len() * seconding_limit; - let accepted_size = group_validators.len() * seconding_limit; - - let incoming = vec![None; incoming_size]; - let accepted = vec![None; accepted_size]; + let _ = index_in_group(&group_validators, our_index)?; Some(DirectInGroup { validators: group_validators, our_index, seconding_limit, - incoming, - accepted, + knowledge: HashMap::new(), }) } - /// Handle an incoming `Seconded` statement from the given validator. - /// If the outcome is `Reject` then no internal state is altered. - pub fn handle_incoming_seconded( - &mut self, + /// Query whether we can receive some statement from the given validator. + /// + /// This does no deduplication of `Valid` statements. + pub fn can_receive( + &self, sender: ValidatorIndex, originator: ValidatorIndex, - candidate_hash: CandidateHash, - ) -> Result { - let sender_index = match self.index_in_group(sender) { - None => return Err(RejectIncoming::NotInGroup), - Some(i) => i, - }; - - let originator_index = match self.index_in_group(sender) { - None => return Err(RejectIncoming::NotInGroup), - Some(i) => i, - }; - - if sender_index == self.our_index || originator_index == self.our_index { - return Err(RejectIncoming::NotInGroup) + statement: CompactStatement, + ) -> Result { + if self.they_sent(sender, Knowledge::Specific(statement.clone(), originator)) { + return Err(RejectIncoming::Duplicate) } - let range = self.incoming_range(sender_index, originator_index); - for i in range { - if self.incoming[i] == Some(candidate_hash) { - // duplicates get rejected. - return Err(RejectIncoming::PeerExcess) - } - - // ok, found an empty slot. - if self.incoming[i].is_none() { - self.incoming[i] = Some(candidate_hash); - return self.handle_accepted_incoming(originator_index, candidate_hash) - } + match statement { + CompactStatement::Seconded(candidate_hash) => { + // check whether the sender has not sent too many seconded statements for the originator. + // we know by the duplicate check above that this iterator doesn't include the + // statement itself. + let other_seconded_for_orig_from_remote = self + .knowledge + .get(&sender) + .into_iter() + .flat_map(|v_knowledge| v_knowledge.iter()) + .filter(|k| match k { + TaggedKnowledge::IncomingP2P(Knowledge::Specific( + CompactStatement::Seconded(_), + orig, + )) if orig == &originator => true, + _ => false, + }) + .count(); + + if other_seconded_for_orig_from_remote == self.seconding_limit { + return Err(RejectIncoming::ExcessiveSeconded) + } + + // at this point, it doesn't seem like the remote has done anything wrong. + if self.seconded_already_or_within_limit(originator, candidate_hash) { + Ok(Accept::Ok) + } else { + Ok(Accept::WithPrejudice) + } + }, + CompactStatement::Valid(candidate_hash) => { + if !self.knows_candidate(sender, candidate_hash) { + return Err(RejectIncoming::CandidateUnknown) + } + + Ok(Accept::Ok) + }, } - - Err(RejectIncoming::PeerExcess) } - // TODO [now]: some API analogues to can_send / can_receive. + /// Query whether we can send a statement to a given validator. + pub fn can_send( + &self, + receiver: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) -> Result { + if self.we_sent(receiver, Knowledge::Specific(statement.clone(), originator)) { + return Err(RejectOutgoing::Known) + } - fn handle_accepted_incoming( - &mut self, - originator: usize, - candidate_hash: CandidateHash, - ) -> Result { - let range = self.accepted_range(originator); - for i in range { - if self.accepted[i] == Some(candidate_hash) { - return Ok(AcceptIncoming::YesKnown) - } - - if self.accepted[i].is_none() { - self.accepted[i] = Some(candidate_hash); - return Ok(AcceptIncoming::YesUnknown) - } + if self.they_sent(receiver, Knowledge::Specific(statement.clone(), originator)) { + return Err(RejectOutgoing::Known) } - Err(RejectIncoming::OriginatorExcess) + match statement { + CompactStatement::Seconded(candidate_hash) => { + // we send the same `Seconded` statements to all our peers, and only the first `k` from + // each originator. + if !self.seconded_already_or_within_limit(originator, candidate_hash) { + return Err(RejectOutgoing::ExcessiveSeconded) + } + + Ok(Accept::Ok) + }, + CompactStatement::Valid(candidate_hash) => { + if !self.knows_candidate(receiver, candidate_hash) { + return Err(RejectOutgoing::CandidateUnknown) + } + + Ok(Accept::Ok) + }, + } } - fn index_in_group(&self, validator: ValidatorIndex) -> Option { - index_in_group(&self.validators, validator) + // returns true if it's legal to accept a new `Seconded` message from this validator. + // This is either + // 1. because we've already accepted it. + // 2. because there's space for more seconding. + fn seconded_already_or_within_limit( + &self, + validator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + let seconded_other_candidates = self + .knowledge + .get(&validator) + .into_iter() + .flat_map(|v_knowledge| v_knowledge.iter()) + .filter(|k| match k { + TaggedKnowledge::Seconded(c) if c != &candidate_hash => true, + _ => false, + }) + .count(); + + // This fulfills both properties by under-counting when the validator is at the limit + // but _has_ seconded the candidate already. + seconded_other_candidates < self.seconding_limit } - fn adjust_for_skipped_self(&self, index: usize) -> usize { - if index > self.our_index { - index - 1 - } else { - index - } + fn they_sent(&self, validator: ValidatorIndex, knowledge: Knowledge) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::IncomingP2P(knowledge))) } - fn incoming_range(&self, sender: usize, originator: usize) -> Range { - // adjust X dimension to account for the fact that our index is skipped. - let sender = self.adjust_for_skipped_self(sender); - let base = (sender * (self.validators.len() - 1)) + originator * self.seconding_limit; + fn we_sent(&self, validator: ValidatorIndex, knowledge: Knowledge) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::OutgoingP2P(knowledge))) + } + + fn knows_candidate(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.we_sent_seconded(validator, candidate_hash) || + self.they_sent_seconded(validator, candidate_hash) + } - base..base + self.seconding_limit + fn we_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.we_sent(validator, Knowledge::General(candidate_hash)) } - fn accepted_range(&self, originator: usize) -> Range { - let base = originator * self.seconding_limit; - base..base + self.seconding_limit + fn they_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.they_sent(validator, Knowledge::General(candidate_hash)) } } -/// Incoming `Seconded` message was rejected. +/// Incoming statement was accepted. +#[derive(Debug, PartialEq)] +pub enum Accept { + /// Neither the peer nor the originator have apparently exceeded limits. + /// Candidate or statement may already be known. + Ok, + /// Accept the message; the peer hasn't exceeded limits but the originator has. + WithPrejudice, +} + +/// Incoming statement was rejected. +#[derive(Debug, PartialEq)] pub enum RejectIncoming { - /// Peer sent excessive messages. - PeerExcess, - /// Originator sent excessive messages, peer seems innocent. - OriginatorExcess, + /// Peer sent excessive `Seconded` statements. + ExcessiveSeconded, /// Sender or originator is not in the group. NotInGroup, + /// Candidate is unknown to us. Only applies to `Valid` statements. + CandidateUnknown, + /// Statement is duplicate. + Duplicate, } -/// Incoming `Seconded` message was accepted. -pub enum AcceptIncoming { - /// The `Seconded` statement was within the peer's limits and unknown - /// for the originator. - YesUnknown, - /// The `Seconded` statement was within the peer's limits and already - /// known for the originator. - YesKnown, +/// Outgoing statement was rejected. +#[derive(Debug, PartialEq)] +pub enum RejectOutgoing { + /// Candidate was unknown. ONly applies to `Valid` statements. + CandidateUnknown, + /// We attempted to send excessive `Seconded` statements. + /// indicates a bug on the local node's code. + ExcessiveSeconded, + /// The statement was already known to the peer. + Known, } fn index_in_group(validators: &[ValidatorIndex], index: ValidatorIndex) -> Option { From 54f06c06ca71d526e7ec828743dd23f3d6b8ece7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 26 Sep 2022 18:49:39 -0500 Subject: [PATCH 027/220] finish up the 'direct' logic --- .../src/vstaging/direct.rs | 98 ++++++++++++++----- 1 file changed, 76 insertions(+), 22 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index 72f2b5c714ae..06d92256434a 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -95,32 +95,22 @@ enum TaggedKnowledge { /// See module docs for more details. pub struct DirectInGroup { validators: Vec, - our_index: ValidatorIndex, seconding_limit: usize, knowledge: HashMap>, } impl DirectInGroup { - /// Instantiate a new `DirectInGroup` tracker. Fails if `our_index` is out of bounds - /// or `group_validators` is empty or `our_index` is not in the group. + /// Instantiate a new `DirectInGroup` tracker. Fails if `group_validators` is empty pub fn new( group_validators: Vec, - our_index: ValidatorIndex, seconding_limit: usize, ) -> Option { if group_validators.is_empty() { return None } - if our_index.0 as usize >= group_validators.len() { - return None - } - - let _ = index_in_group(&group_validators, our_index)?; - Some(DirectInGroup { validators: group_validators, - our_index, seconding_limit, knowledge: HashMap::new(), }) @@ -135,6 +125,10 @@ impl DirectInGroup { originator: ValidatorIndex, statement: CompactStatement, ) -> Result { + if !self.is_in_group(sender) || !self.is_in_group(originator) { + return Err(RejectIncoming::NotInGroup) + } + if self.they_sent(sender, Knowledge::Specific(statement.clone(), originator)) { return Err(RejectIncoming::Duplicate) } @@ -179,18 +173,49 @@ impl DirectInGroup { } } + /// Note that we accepted an incoming statement. This updates internal structures. + /// Should only be called after a successful `can_receive` call. + pub fn note_received( + &mut self, + sender: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) { + { + let mut sender_knowledge = self.knowledge.entry(sender).or_default(); + sender_knowledge.insert(TaggedKnowledge::IncomingP2P( + Knowledge::Specific(statement.clone(), originator) + )); + + if let CompactStatement::Seconded(candidate_hash) = statement.clone() { + sender_knowledge.insert(TaggedKnowledge::IncomingP2P( + Knowledge::General(candidate_hash) + )); + } + } + + if let CompactStatement::Seconded(candidate_hash) = statement { + let mut originator_knowledge = self.knowledge.entry(originator).or_default(); + originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + } + } + /// Query whether we can send a statement to a given validator. pub fn can_send( &self, - receiver: ValidatorIndex, + target: ValidatorIndex, originator: ValidatorIndex, statement: CompactStatement, - ) -> Result { - if self.we_sent(receiver, Knowledge::Specific(statement.clone(), originator)) { + ) -> Result<(), RejectOutgoing> { + if !self.is_in_group(target) || !self.is_in_group(originator) { + return Err(RejectOutgoing::NotInGroup) + } + + if self.we_sent(target, Knowledge::Specific(statement.clone(), originator)) { return Err(RejectOutgoing::Known) } - if self.they_sent(receiver, Knowledge::Specific(statement.clone(), originator)) { + if self.they_sent(target, Knowledge::Specific(statement.clone(), originator)) { return Err(RejectOutgoing::Known) } @@ -202,18 +227,45 @@ impl DirectInGroup { return Err(RejectOutgoing::ExcessiveSeconded) } - Ok(Accept::Ok) + Ok(()) }, CompactStatement::Valid(candidate_hash) => { - if !self.knows_candidate(receiver, candidate_hash) { + if !self.knows_candidate(target, candidate_hash) { return Err(RejectOutgoing::CandidateUnknown) } - Ok(Accept::Ok) + Ok(()) }, } } + /// Note that we sent an outgoing statement to a peer in the group. + /// This must be preceded by a successful `can_send` call. + pub fn note_sent( + &mut self, + target: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) { + { + let mut target_knowledge = self.knowledge.entry(target).or_default(); + target_knowledge.insert(TaggedKnowledge::OutgoingP2P( + Knowledge::Specific(statement.clone(), originator) + )); + + if let CompactStatement::Seconded(candidate_hash) = statement.clone() { + target_knowledge.insert(TaggedKnowledge::OutgoingP2P( + Knowledge::General(candidate_hash) + )); + } + } + + if let CompactStatement::Seconded(candidate_hash) = statement { + let mut originator_knowledge = self.knowledge.entry(originator).or_default(); + originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + } + } + // returns true if it's legal to accept a new `Seconded` message from this validator. // This is either // 1. because we've already accepted it. @@ -263,6 +315,10 @@ impl DirectInGroup { fn they_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { self.they_sent(validator, Knowledge::General(candidate_hash)) } + + fn is_in_group(&self, validator: ValidatorIndex) -> bool { + self.validators.contains(&validator) + } } /// Incoming statement was accepted. @@ -298,8 +354,6 @@ pub enum RejectOutgoing { ExcessiveSeconded, /// The statement was already known to the peer. Known, -} - -fn index_in_group(validators: &[ValidatorIndex], index: ValidatorIndex) -> Option { - validators.iter().position(|v| v == &index) + /// Target or originator not in the group. + NotInGroup, } From f4effa183cd0a59eeb4155b44030bacb0f050b93 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 26 Sep 2022 19:21:46 -0500 Subject: [PATCH 028/220] add a bunch of tests for the direct-in-group logic --- .../src/vstaging/direct.rs | 413 ++++++++++++++++++ 1 file changed, 413 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/direct.rs index 06d92256434a..292ea05bc877 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/direct.rs @@ -357,3 +357,416 @@ pub enum RejectOutgoing { /// Target or originator not in the group. NotInGroup, } + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::vstaging::Hash; + + #[test] + fn rejects_incoming_outside_of_group() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(100), + ValidatorIndex(5), + CompactStatement::Seconded(CandidateHash(Hash::repeat_byte(1))), + ), + Err(RejectIncoming::NotInGroup), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(100), + CompactStatement::Seconded(CandidateHash(Hash::repeat_byte(1))), + ), + Err(RejectIncoming::NotInGroup), + ); + } + + #[test] + fn begrudgingly_accepts_too_many_seconded_from_multiple_peers() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectIncoming::ExcessiveSeconded), + ); + } + + #[test] + fn rejects_too_many_seconded_from_sender() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Ok(Accept::WithPrejudice), + ); + } + + #[test] + fn rejects_duplicates() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectIncoming::Duplicate), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ), + Err(RejectIncoming::Duplicate), + ); + } + + #[test] + fn rejects_incoming_valid_without_seconded() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Err(RejectIncoming::CandidateUnknown), + ); + } + + #[test] + fn accepts_incoming_valid_after_receiving_seconded() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok) + ); + } + + #[test] + fn accepts_incoming_valid_after_outgoing_seconded() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok) + ); + } + + #[test] + fn cannot_send_too_many_seconded_even_to_multiple_peers() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectOutgoing::ExcessiveSeconded), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectOutgoing::ExcessiveSeconded), + ); + } + + #[test] + fn cannot_send_duplicate() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectOutgoing::Known), + ); + } + + #[test] + fn cannot_send_what_was_received() { + let group = vec![ + ValidatorIndex(5), + ValidatorIndex(200), + ValidatorIndex(24), + ValidatorIndex(146), + ]; + + let seconding_limit = 2; + + let mut tracker = DirectInGroup::new( + group.clone(), + seconding_limit, + ).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectOutgoing::Known), + ); + } +} From 26c86b74b8ddbdefd46101ddae4713fdb91846fd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 26 Sep 2022 23:33:36 -0500 Subject: [PATCH 029/220] rename 'direct' to 'cluster', begin a candidate_entry module --- .../src/vstaging/candidate_entry.rs | 95 ++++++++ .../src/vstaging/{direct.rs => cluster.rs} | 209 +++++++---------- .../src/vstaging/mod.rs | 212 ++++++------------ 3 files changed, 247 insertions(+), 269 deletions(-) create mode 100644 node/network/statement-distribution/src/vstaging/candidate_entry.rs rename node/network/statement-distribution/src/vstaging/{direct.rs => cluster.rs} (82%) diff --git a/node/network/statement-distribution/src/vstaging/candidate_entry.rs b/node/network/statement-distribution/src/vstaging/candidate_entry.rs new file mode 100644 index 000000000000..f3f883b460da --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/candidate_entry.rs @@ -0,0 +1,95 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A [`CandidateEntry`] tracks all info concerning a candidate block. +//! +//! This entity doesn't actually store the statements about the candidate, +//! just metadata of which validators have seconded or validated the +//! candidate, and the candidate and [`PersistedValidationData`] itself, +//! if that has already been fetched. +//! +//! Note that it is possible for validators for multiple groups to second +//! a candidate. Given that each candidate's para and relay-parent is +//! determined by the candidate hash, and the current scheduling mechanism +//! of the relay-chain only schedules one group per para per relay-parent, +//! this is certainly in error. Nevertheless, if we receive statements about +//! a candidate _prior_ to fetching the candidate itself, we do not have +//! confirmation of which group is assigned to the para in actuality. + +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, GroupIndex, PersistedValidationData, +}; + +use std::collections::HashMap; + +/// A tracker for all validators which have seconded or validated a particular +/// candidate. See module docs for more details. +pub struct CandidateEntry { + candidate_hash: CandidateHash, + state: CandidateState, +} + +impl CandidateEntry { + /// Create a blank [`CandidateEntry`] + pub fn new(candidate_hash: CandidateHash) -> Self { + CandidateEntry { candidate_hash, state: CandidateState::Unconfirmed(HashMap::new()) } + } + + /// Supply the [`CommittedCandidateReceipt`] and [`PersistedValidationData`]. + /// This does not check that the receipt matches the candidate hash nor that the PVD + /// matches the commitment in the candidate's descriptor. Also, supply + /// the intended [`GroupIndex`] assigned to the para at the relay-parent + /// of the candidate-receipt. + /// + /// No-op if already provided. + pub fn confirm( + &mut self, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, + group_index: GroupIndex, + ) { + } + + /// Whether the candidate is confirmed to actually exist. + pub fn is_confirmed(&self) -> bool { + match self.state { + CandidateState::Unconfirmed(_) => false, + CandidateState::Confirmed { .. } => true, + } + } + + /// The receipt of the candidate descriptor. Only exists if confirmed. + pub fn receipt(&self) -> Option<&CommittedCandidateReceipt> { + match self.state { + CandidateState::Confirmed { ref candidate, .. } => Some(candidate), + CandidateState::Unconfirmed(_) => None, + } + } +} + +enum CandidateState { + Unconfirmed(HashMap), + Confirmed { + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + group_index: GroupIndex, + attestations: GroupAttestationRecord, + }, +} + +struct GroupAttestationRecord { + // TODO [now] +} diff --git a/node/network/statement-distribution/src/vstaging/direct.rs b/node/network/statement-distribution/src/vstaging/cluster.rs similarity index 82% rename from node/network/statement-distribution/src/vstaging/direct.rs rename to node/network/statement-distribution/src/vstaging/cluster.rs index 292ea05bc877..d8f96243fcec 100644 --- a/node/network/statement-distribution/src/vstaging/direct.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Direct distribution of statements, even those concerning candidates which -//! are not yet backed. +//! Direct distribution of statements within a cluster, +//! even those concerning candidates which are not yet backed. //! //! Members of a validation group assigned to a para at a given relay-parent //! always distribute statements directly to each other. @@ -49,7 +49,7 @@ //! on fire. Nevertheless, we handle the case here to ensure that the behavior of the //! system is well-defined even if an adversary is willing to be slashed. //! -//! More concretely, this module exposes a "DirectInGroup" utility which allows us to determine +//! More concretely, this module exposes a "ClusterTracker" utility which allows us to determine //! whether to accept or reject messages from other validators in the same group as we //! are in, based on _the most charitable possible interpretation of our protocol rules_, //! and to keep track of what we have sent to other validators in the group and what we may @@ -93,24 +93,21 @@ enum TaggedKnowledge { /// Utility for keeping track of limits on direct statements within a group. /// /// See module docs for more details. -pub struct DirectInGroup { +pub struct ClusterTracker { validators: Vec, seconding_limit: usize, knowledge: HashMap>, } -impl DirectInGroup { - /// Instantiate a new `DirectInGroup` tracker. Fails if `group_validators` is empty - pub fn new( - group_validators: Vec, - seconding_limit: usize, - ) -> Option { - if group_validators.is_empty() { +impl ClusterTracker { + /// Instantiate a new `ClusterTracker` tracker. Fails if `cluster_validators` is empty + pub fn new(cluster_validators: Vec, seconding_limit: usize) -> Option { + if cluster_validators.is_empty() { return None } - Some(DirectInGroup { - validators: group_validators, + Some(ClusterTracker { + validators: cluster_validators, seconding_limit, knowledge: HashMap::new(), }) @@ -183,14 +180,14 @@ impl DirectInGroup { ) { { let mut sender_knowledge = self.knowledge.entry(sender).or_default(); - sender_knowledge.insert(TaggedKnowledge::IncomingP2P( - Knowledge::Specific(statement.clone(), originator) - )); + sender_knowledge.insert(TaggedKnowledge::IncomingP2P(Knowledge::Specific( + statement.clone(), + originator, + ))); if let CompactStatement::Seconded(candidate_hash) = statement.clone() { - sender_knowledge.insert(TaggedKnowledge::IncomingP2P( - Knowledge::General(candidate_hash) - )); + sender_knowledge + .insert(TaggedKnowledge::IncomingP2P(Knowledge::General(candidate_hash))); } } @@ -249,14 +246,14 @@ impl DirectInGroup { ) { { let mut target_knowledge = self.knowledge.entry(target).or_default(); - target_knowledge.insert(TaggedKnowledge::OutgoingP2P( - Knowledge::Specific(statement.clone(), originator) - )); + target_knowledge.insert(TaggedKnowledge::OutgoingP2P(Knowledge::Specific( + statement.clone(), + originator, + ))); if let CompactStatement::Seconded(candidate_hash) = statement.clone() { - target_knowledge.insert(TaggedKnowledge::OutgoingP2P( - Knowledge::General(candidate_hash) - )); + target_knowledge + .insert(TaggedKnowledge::OutgoingP2P(Knowledge::General(candidate_hash))); } } @@ -266,6 +263,25 @@ impl DirectInGroup { } } + /// Get all targets as validator-indices. This doesn't attempt to filter + /// out the local validator index. + pub fn targets(&self) -> &[ValidatorIndex] { + &self.validators + } + + /// Whether a validator knows the candidate is `Seconded`. + pub fn knows_candidate( + &self, + validator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + // we sent, they sent, or they signed and we received from someone else. + + self.we_sent_seconded(validator, candidate_hash) || + self.they_sent_seconded(validator, candidate_hash) || + self.validator_seconded(validator, candidate_hash) + } + // returns true if it's legal to accept a new `Seconded` message from this validator. // This is either // 1. because we've already accepted it. @@ -303,11 +319,6 @@ impl DirectInGroup { .map_or(false, |k| k.contains(&TaggedKnowledge::OutgoingP2P(knowledge))) } - fn knows_candidate(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { - self.we_sent_seconded(validator, candidate_hash) || - self.they_sent_seconded(validator, candidate_hash) - } - fn we_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { self.we_sent(validator, Knowledge::General(candidate_hash)) } @@ -316,6 +327,12 @@ impl DirectInGroup { self.they_sent(validator, Knowledge::General(candidate_hash)) } + fn validator_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::Seconded(candidate_hash))) + } + fn is_in_group(&self, validator: ValidatorIndex) -> bool { self.validators.contains(&validator) } @@ -347,7 +364,7 @@ pub enum RejectIncoming { /// Outgoing statement was rejected. #[derive(Debug, PartialEq)] pub enum RejectOutgoing { - /// Candidate was unknown. ONly applies to `Valid` statements. + /// Candidate was unknown. Only applies to `Valid` statements. CandidateUnknown, /// We attempted to send excessive `Seconded` statements. /// indicates a bug on the local node's code. @@ -365,19 +382,12 @@ mod tests { #[test] fn rejects_incoming_outside_of_group() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); assert_eq!( tracker.can_receive( @@ -400,22 +410,15 @@ mod tests { #[test] fn begrudgingly_accepts_too_many_seconded_from_multiple_peers() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; let hash_a = CandidateHash(Hash::repeat_byte(1)); let hash_b = CandidateHash(Hash::repeat_byte(2)); let hash_c = CandidateHash(Hash::repeat_byte(3)); - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); assert_eq!( tracker.can_receive( @@ -457,22 +460,15 @@ mod tests { #[test] fn rejects_too_many_seconded_from_sender() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; let hash_a = CandidateHash(Hash::repeat_byte(1)); let hash_b = CandidateHash(Hash::repeat_byte(2)); let hash_c = CandidateHash(Hash::repeat_byte(3)); - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); assert_eq!( tracker.can_receive( @@ -514,20 +510,13 @@ mod tests { #[test] fn rejects_duplicates() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; let hash_a = CandidateHash(Hash::repeat_byte(1)); - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); tracker.note_received( ValidatorIndex(5), @@ -562,19 +551,12 @@ mod tests { #[test] fn rejects_incoming_valid_without_seconded() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); @@ -590,19 +572,12 @@ mod tests { #[test] fn accepts_incoming_valid_after_receiving_seconded() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); tracker.note_received( @@ -623,19 +598,12 @@ mod tests { #[test] fn accepts_incoming_valid_after_outgoing_seconded() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); tracker.note_sent( @@ -656,19 +624,12 @@ mod tests { #[test] fn cannot_send_too_many_seconded_even_to_multiple_peers() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); let hash_b = CandidateHash(Hash::repeat_byte(2)); let hash_c = CandidateHash(Hash::repeat_byte(3)); @@ -706,19 +667,12 @@ mod tests { #[test] fn cannot_send_duplicate() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); tracker.note_sent( @@ -739,19 +693,12 @@ mod tests { #[test] fn cannot_send_what_was_received() { - let group = vec![ - ValidatorIndex(5), - ValidatorIndex(200), - ValidatorIndex(24), - ValidatorIndex(146), - ]; + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; let seconding_limit = 2; - let mut tracker = DirectInGroup::new( - group.clone(), - seconding_limit, - ).expect("not empty"); + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); tracker.note_received( diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 3d2ee546eac7..5983d0316d71 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -48,8 +48,11 @@ use crate::{ error::{JfyiError, JfyiErrorResult}, LOG_TARGET, }; +use candidate_entry::CandidateEntry; +use cluster::ClusterTracker; -mod direct; +mod candidate_entry; +mod cluster; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = @@ -63,7 +66,7 @@ const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature" struct PerRelayParentState { validator_state: HashMap, - candidates: HashMap, + candidates: HashMap, local_validator: Option, session: SessionIndex, } @@ -73,94 +76,6 @@ struct PerRelayParentValidatorState { group_id: GroupIndex, } -// stores statements and the candidate receipt/persisted validation data if any. -struct CandidateData { - state: CandidateState, - seconded_statements: Vec, - valid_statements: Vec, - - // validators which have either produced a statement about the - // candidate or which have sent a signed statement or which we have - // sent statements to. - known_by: HashSet, -} - -impl Default for CandidateData { - fn default() -> Self { - CandidateData { - state: CandidateState::Unconfirmed, - seconded_statements: Vec::new(), - valid_statements: Vec::new(), - known_by: HashSet::new(), - } - } -} - -impl CandidateData { - fn has_issued_seconded(&self, validator: ValidatorIndex) -> bool { - self.seconded_statements - .iter() - .find(|s| s.validator_index() == validator) - .is_some() - } - - fn has_issued_valid(&self, validator: ValidatorIndex) -> bool { - self.valid_statements - .iter() - .find(|s| s.validator_index() == validator) - .is_some() - } - - // ignores duplicates or equivocations. returns 'false' if those are detected, 'true' otherwise. - fn insert_signed_statement(&mut self, statement: SignedStatement) -> bool { - let validator_index = statement.validator_index(); - - // only accept one statement by the validator. - let has_issued_statement = - self.has_issued_seconded(validator_index) || self.has_issued_valid(validator_index); - if has_issued_statement { - return false - } - - match statement.payload() { - CompactStatement::Seconded(_) => self.seconded_statements.push(statement), - CompactStatement::Valid(_) => self.valid_statements.push(statement), - } - - self.known_by.insert(validator_index); - - true - } - - fn note_known_by(&mut self, validator: ValidatorIndex) { - self.known_by.insert(validator); - } -} - -enum CandidateState { - /// The candidate is unconfirmed to exist, as it hasn't yet - /// been fetched. - Unconfirmed, - /// The candidate is confirmed and we have the `PersistedValidationData`. - Confirmed(CommittedCandidateReceipt, PersistedValidationData), -} - -impl CandidateState { - fn is_confirmed(&self) -> bool { - match *self { - CandidateState::Unconfirmed => false, - CandidateState::Confirmed(_, _) => true, - } - } - - fn receipt(&self) -> Option<&CommittedCandidateReceipt> { - match *self { - CandidateState::Unconfirmed => None, - CandidateState::Confirmed(ref c, _) => Some(c), - } - } -} - // per-relay-parent local validator state. struct LocalValidatorState { // The index of the validator. @@ -169,6 +84,8 @@ struct LocalValidatorState { group: GroupIndex, // the assignment of our validator group, if any. assignment: Option, + // the 'direct-in-group' communication at this relay-parent. + cluster_tracker: ClusterTracker, } pub(crate) struct State { @@ -382,10 +299,16 @@ async fn find_local_validator_state( let para_for_group = |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); + let group_validators = groups[our_group.0 as usize].clone(); Some(LocalValidatorState { index: validator_index, group: our_group, assignment: para_for_group(our_group), + cluster_tracker: ClusterTracker::new( + group_validators, + todo!(), // TODO [now]: seconding limit? + ) + .expect("group is non-empty because we are in it; qed"), }) } @@ -420,10 +343,11 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; - let (local_index, local_assignment) = match per_relay_parent.local_validator.as_ref() { - None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment), - }; + let (local_index, local_assignment, local_group) = + match per_relay_parent.local_validator.as_ref() { + None => return Err(JfyiError::InvalidShare), + Some(l) => (l.index, l.assignment, l.group), + }; // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. @@ -432,7 +356,7 @@ pub(crate) async fn share_local_statement( FullStatementWithPVD::Valid(hash) => per_relay_parent .candidates .get(&hash) - .and_then(|c| c.state.receipt()) + .and_then(|c| c.receipt()) .map(|c| c.descriptor().para_id), }; @@ -454,11 +378,13 @@ pub(crate) async fn share_local_statement( let candidate_entry = match statement.payload() { FullStatementWithPVD::Seconded(ref c, ref pvd) => { - let candidate_entry = - per_relay_parent.candidates.entry(candidate_hash).or_default(); + let candidate_entry = per_relay_parent + .candidates + .entry(candidate_hash) + .or_insert_with(|| CandidateEntry::new(candidate_hash)); - if let CandidateState::Unconfirmed = candidate_entry.state { - candidate_entry.state = CandidateState::Confirmed(c.clone(), pvd.clone()); + if !candidate_entry.is_confirmed() { + candidate_entry.confirm(c.clone(), pvd.clone(), local_group); } candidate_entry @@ -469,7 +395,7 @@ pub(crate) async fn share_local_statement( // Can't share a 'Valid' statement about a candidate we don't know about! return Err(JfyiError::InvalidShare) }, - Some(ref c) if !c.state.is_confirmed() => { + Some(ref c) if !c.is_confirmed() => { // Can't share a 'Valid' statement about a candidate we don't know about! return Err(JfyiError::InvalidShare) }, @@ -478,22 +404,23 @@ pub(crate) async fn share_local_statement( }, }; - if !candidate_entry.insert_signed_statement(compact_statement.clone()) { - gum::warn!( - target: LOG_TARGET, - statement = ?compact_statement.payload(), - "Candidate backing issued redundant statement?", - ); + // TODO [now]: note seconded. + // if !candidate_entry.insert_signed_statement(compact_statement.clone()) { + // gum::warn!( + // target: LOG_TARGET, + // statement = ?compact_statement.payload(), + // "Candidate backing issued redundant statement?", + // ); - return Err(JfyiError::InvalidShare) - } + // return Err(JfyiError::InvalidShare) + // } (compact_statement, candidate_hash) }; // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, // send a `Seconded` statement as well. - broadcast_local_statement(ctx, state, relay_parent, compact_statement).await; + send_statement_direct(ctx, state, relay_parent, compact_statement).await; // TODO [now]: // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. @@ -514,7 +441,7 @@ pub(crate) async fn share_local_statement( // TODO [now]: make this a more general `broadcast_statement` with an `BroadcastBehavior` that // affects targets: `Local` keeps current behavior while `Forward` only sends onwards via `BackedCandidate` knowers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn broadcast_local_statement( +async fn send_statement_direct( ctx: &mut Context, state: &mut State, relay_parent: Hash, @@ -536,55 +463,64 @@ async fn broadcast_local_statement( None => return, }; - let prior_seconded = match statement.payload() { - CompactStatement::Seconded(_) => None, - CompactStatement::Valid(_) => match candidate_entry.seconded_statements.first() { - Some(s) => Some(s.as_unchecked().clone()), - None => return, - }, - }; + // TODO [now]: get a seconding statement from the 'statement store', TBD + let prior_seconded: Option = unimplemented!(); + // TODO [now]: clean up this junk below + // let prior_seconded = match statement.payload() { + // CompactStatement::Seconded(_) => None, + // CompactStatement::Valid(_) => match candidate_entry.seconded_statements.first() { + // Some(s) => Some(s.as_unchecked().clone()), + // None => return, + // }, + // }; + + // two kinds of targets: those in our 'cluster' (currently just those in the same group), + // and those we are propagating to through the grid. + enum TargetKind { + Cluster, + Grid, + } - let targets = { + let targets: Vec<(ValidatorIndex, AuthorityDiscoveryId, TargetKind)> = { let local_validator = match per_relay_parent.local_validator.as_ref() { Some(v) => v, None => return, // sanity: should be impossible to reach this. }; - let current_group = - session_info.validator_groups[local_validator.group.0 as usize].iter().cloned(); - - // TODO [now]: extend targets with validators in any current leaf which - // are assigned to the group - - // TODO [now]: extend targets with validators which - // a) we've sent `BackedCandidateInv` for this candidate to - // b) have either requested the candidate _or_ have sent `BackedCandidateKnown` to us. + let current_group = local_validator + .cluster_tracker + .targets() + .iter() + .filter(|&v| v != &local_validator.index) + .map(|v| (*v, TargetKind::Cluster)); - // TODO [now]: dedup + // TODO [now]: extend with grid targets, dedup current_group - .filter_map(|v| session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone()))) + .filter_map(|(v, k)| { + session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) + }) .collect::>() }; let mut prior_to = Vec::new(); let mut statement_to = Vec::new(); - for (validator_index, authority_id) in targets { + for (validator_index, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. let peer_id: PeerId = match state.authorities.get(&authority_id) { Some(p) if state.peers.contains_key(p) => p.clone(), None | Some(_) => continue, }; - // We guarantee that the receiving peer knows the candidate by - // sending them a `Seconded` statement first. - if candidate_entry.known_by.insert(validator_index) { - if let Some(_) = prior_seconded.as_ref() { - prior_to.push(peer_id.clone()); - } + match kind { + TargetKind::Cluster => { + // TODO [now]: use cluster mechanics to determine whether to sent + // 'prior_to' and 'statement_to'. + }, + TargetKind::Grid => { + // TODO [now] + }, } - - statement_to.push(peer_id); } // ship off the network messages to the network bridge. From 9dd68f16b10358d42a836a0cb4d5a6a19a81a1ee Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 28 Sep 2022 13:46:46 -0500 Subject: [PATCH 030/220] distill candidate_entry --- .../src/vstaging/candidate_entry.rs | 72 +++++++++++-------- .../src/vstaging/mod.rs | 21 +++--- 2 files changed, 49 insertions(+), 44 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidate_entry.rs b/node/network/statement-distribution/src/vstaging/candidate_entry.rs index f3f883b460da..e6813e1b60fc 100644 --- a/node/network/statement-distribution/src/vstaging/candidate_entry.rs +++ b/node/network/statement-distribution/src/vstaging/candidate_entry.rs @@ -33,8 +33,6 @@ use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, GroupIndex, PersistedValidationData, }; -use std::collections::HashMap; - /// A tracker for all validators which have seconded or validated a particular /// candidate. See module docs for more details. pub struct CandidateEntry { @@ -43,53 +41,65 @@ pub struct CandidateEntry { } impl CandidateEntry { - /// Create a blank [`CandidateEntry`] - pub fn new(candidate_hash: CandidateHash) -> Self { - CandidateEntry { candidate_hash, state: CandidateState::Unconfirmed(HashMap::new()) } + /// Create an unconfirmed [`CandidateEntry`] + pub fn unconfirmed(candidate_hash: CandidateHash) -> Self { + CandidateEntry { candidate_hash, state: CandidateState::Unconfirmed } + } + + /// Create a confirmed [`CandidateEntry`] + pub fn confirmed( + candidate_hash: CandidateHash, + receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Self { + CandidateEntry { + candidate_hash, + state: CandidateState::Confirmed(receipt, persisted_validation_data), + } } /// Supply the [`CommittedCandidateReceipt`] and [`PersistedValidationData`]. /// This does not check that the receipt matches the candidate hash nor that the PVD - /// matches the commitment in the candidate's descriptor. Also, supply - /// the intended [`GroupIndex`] assigned to the para at the relay-parent - /// of the candidate-receipt. + /// matches the commitment in the candidate's descriptor. /// /// No-op if already provided. - pub fn confirm( - &mut self, - candidate: CommittedCandidateReceipt, - pvd: PersistedValidationData, - group_index: GroupIndex, - ) { + pub fn confirm(&mut self, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData) { + if let CandidateState::Confirmed(_, _) = self.state { + return + } + self.state = CandidateState::Confirmed(candidate, pvd); } /// Whether the candidate is confirmed to actually exist. pub fn is_confirmed(&self) -> bool { match self.state { - CandidateState::Unconfirmed(_) => false, - CandidateState::Confirmed { .. } => true, + CandidateState::Confirmed(_, _) => true, + CandidateState::Unconfirmed => false, } } - /// The receipt of the candidate descriptor. Only exists if confirmed. - pub fn receipt(&self) -> Option<&CommittedCandidateReceipt> { + /// The internals of a confirmed candidate. Exists iff confirmed. + pub fn confirmed_internals( + &self, + ) -> Option<(&CommittedCandidateReceipt, &PersistedValidationData)> { match self.state { - CandidateState::Confirmed { ref candidate, .. } => Some(candidate), - CandidateState::Unconfirmed(_) => None, + CandidateState::Confirmed(ref c, ref pvd) => Some((c, pvd)), + CandidateState::Unconfirmed => None, } } -} -enum CandidateState { - Unconfirmed(HashMap), - Confirmed { - candidate: CommittedCandidateReceipt, - persisted_validation_data: PersistedValidationData, - group_index: GroupIndex, - attestations: GroupAttestationRecord, - }, + /// The receipt of the candidate. Exists iff confirmed. + pub fn receipt(&self) -> Option<&CommittedCandidateReceipt> { + self.confirmed_internals().map(|(c, _)| c) + } + + /// The persisted-validation-data of the candidate. Exists iff confirmed. + pub fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + self.confirmed_internals().map(|(_, p)| p) + } } -struct GroupAttestationRecord { - // TODO [now] +enum CandidateState { + Unconfirmed, + Confirmed(CommittedCandidateReceipt, PersistedValidationData), } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 5983d0316d71..601f8f2fa9ab 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -343,11 +343,10 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; - let (local_index, local_assignment, local_group) = - match per_relay_parent.local_validator.as_ref() { - None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment, l.group), - }; + let (local_index, local_assignment) = match per_relay_parent.local_validator.as_ref() { + None => return Err(JfyiError::InvalidShare), + Some(l) => (l.index, l.assignment), + }; // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. @@ -378,14 +377,10 @@ pub(crate) async fn share_local_statement( let candidate_entry = match statement.payload() { FullStatementWithPVD::Seconded(ref c, ref pvd) => { - let candidate_entry = per_relay_parent - .candidates - .entry(candidate_hash) - .or_insert_with(|| CandidateEntry::new(candidate_hash)); - - if !candidate_entry.is_confirmed() { - candidate_entry.confirm(c.clone(), pvd.clone(), local_group); - } + let candidate_entry = + per_relay_parent.candidates.entry(candidate_hash).or_insert_with(|| { + CandidateEntry::confirmed(candidate_hash, c.clone(), pvd.clone()) + }); candidate_entry }, From 9727d8c2920534ae07448ac31b43a52c6a7e7bd3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 28 Sep 2022 14:42:34 -0500 Subject: [PATCH 031/220] start in on a statement-store module --- Cargo.lock | 1 + .../network/statement-distribution/Cargo.toml | 1 + .../src/vstaging/mod.rs | 1 + .../src/vstaging/statement_store.rs | 141 ++++++++++++++++++ 4 files changed, 144 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/statement_store.rs diff --git a/Cargo.lock b/Cargo.lock index bbe3ce7a2b1f..82ba1f23d278 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7357,6 +7357,7 @@ version = "0.9.29" dependencies = [ "arrayvec 0.5.2", "assert_matches", + "bitvec 1.0.0", "fatality", "futures", "futures-timer", diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index a173ee865e20..ddfb02a7243c 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -20,6 +20,7 @@ indexmap = "1.9.1" parity-scale-codec = { version = "3.1.5", default-features = false, features = ["derive"] } thiserror = "1.0.31" fatality = "0.0.6" +bitvec = "1" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 601f8f2fa9ab..3ceaf2cef034 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -53,6 +53,7 @@ use cluster::ClusterTracker; mod candidate_entry; mod cluster; +mod statement_store; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs new file mode 100644 index 000000000000..c101c28748b6 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -0,0 +1,141 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A store of all statements under a given relay-parent. +//! +//! This structure doesn't attempt to do any spam protection, which must +//! be provided at a higher level. +//! +//! This keeps track of statements submitted with a number of different of +//! views into this data: views based on the candidate, views based on the validator +//! groups, and views based on the validators themselves. + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use polkadot_primitives::vstaging::{ + CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, +}; +use std::collections::hash_map::{Entry as HEntry, HashMap}; + +/// Storage for statements. Intended to be used for statements signed under +/// the same relay-parent. See module docs for more details. +pub struct StatementStore { + groups: Vec>, + validator_meta: HashMap, + group_statements: HashMap<(GroupIndex, CandidateHash), GroupStatements>, + known_statements: HashMap, +} + +impl StatementStore { + /// Create a new [`StatementStore`] + pub fn new(groups: Vec>) -> Self { + let mut validator_meta = HashMap::new(); + for (g, group) in groups.iter().enumerate() { + for (i, v) in group.iter().enumerate() { + validator_meta.insert( + v, + ValidatorMeta { + seconded_count: 0, + within_group_index: i, + group: GroupIndex(g as _), + }, + ); + } + } + + StatementStore { + groups, + validator_meta: HashMap::new(), + group_statements: HashMap::new(), + known_statements: HashMap::new(), + } + } + + /// Insert a statement. Returns `true` if was not known already, `false` if it was. + /// Ignores statements by unknown validators and returns `false`. + pub fn insert(&mut self, statement: SignedStatement) -> bool { + let validator_index = statement.validator_index(); + + let validator_meta = match self.validator_meta.get_mut(&validator_index) { + None => return false, + Some(m) => m, + }; + + let compact = statement.payload().clone(); + let fingerprint = (validator_index, compact.clone()); + match self.known_statements.entry(fingerprint) { + HEntry::Occupied(_) => return false, + HEntry::Vacant(mut e) => { + e.insert(statement); + }, + } + + let candidate_hash = *compact.candidate_hash(); + let seconded = if let CompactStatement::Seconded(_) = compact { true } else { false }; + + // cross-reference updates. + { + let group_index = validator_meta.group; + let group = self.groups.get(group_index.0 as usize).expect( + "we only have meta info on validators confirmed to be \ + in groups at construction; qed", + ); + + let group_statements = self + .group_statements + .entry((group_index, candidate_hash)) + .or_insert_with(|| GroupStatements::with_group_size(group.len())); + + if seconded { + validator_meta.seconded_count += 1; + group_statements.note_seconded(validator_meta.within_group_index); + } else { + group_statements.note_validated(validator_meta.within_group_index); + } + } + + true + } +} + +type Fingerprint = (ValidatorIndex, CompactStatement); + +struct ValidatorMeta { + group: GroupIndex, + within_group_index: usize, + seconded_count: usize, +} + +struct GroupStatements { + seconded_statements: BitVec, + valid_statements: BitVec, +} + +impl GroupStatements { + fn with_group_size(group_size: usize) -> Self { + GroupStatements { + seconded_statements: BitVec::repeat(false, group_size), + valid_statements: BitVec::repeat(false, group_size), + } + } + + fn note_seconded(&mut self, within_group_index: usize) { + self.seconded_statements.set(within_group_index, true); + } + + fn note_validated(&mut self, within_group_index: usize) { + self.valid_statements.set(within_group_index, true); + } +} From 44c2b37c3ae73f6a6663f557d0c2b34d3be76f29 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 28 Sep 2022 14:57:14 -0500 Subject: [PATCH 032/220] some utilities for the statement store --- .../src/vstaging/statement_store.rs | 45 ++++++++++++++++--- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index c101c28748b6..33efc7c37967 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -23,7 +23,7 @@ //! views into this data: views based on the candidate, views based on the validator //! groups, and views based on the validators themselves. -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec, slice::BitSlice}; use polkadot_primitives::vstaging::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; @@ -108,6 +108,37 @@ impl StatementStore { true } + + /// Get a bit-slice of validators in the group which have issued statements of the + /// given form about the candidate. If unavailable, returns `None`. + pub fn group_statement_bitslice( + &self, + group_index: GroupIndex, + statement: CompactStatement, + ) -> Option<&BitSlice> { + let candidate_hash = *statement.candidate_hash(); + self.group_statements + .get(&(group_index, candidate_hash)) + .map(|g| match statement { + CompactStatement::Seconded(_) => &*g.seconded, + CompactStatement::Valid(_) => &*g.valid, + }) + } + + /// Get an iterator over signed statements of the given form by the given group. + pub fn group_statements<'a>( + &'a self, + group_index: GroupIndex, + statement: CompactStatement, + ) -> impl Iterator + 'a { + let bitslice = self.group_statement_bitslice(group_index, statement.clone()); + let group_validators = self.groups.get(group_index.0 as usize); + + bitslice.into_iter() + .flat_map(|v| v.iter_ones()) + .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) + .filter_map(move |v| self.known_statements.get(&(*v, statement.clone()))) + } } type Fingerprint = (ValidatorIndex, CompactStatement); @@ -119,23 +150,23 @@ struct ValidatorMeta { } struct GroupStatements { - seconded_statements: BitVec, - valid_statements: BitVec, + seconded: BitVec, + valid: BitVec, } impl GroupStatements { fn with_group_size(group_size: usize) -> Self { GroupStatements { - seconded_statements: BitVec::repeat(false, group_size), - valid_statements: BitVec::repeat(false, group_size), + seconded: BitVec::repeat(false, group_size), + valid: BitVec::repeat(false, group_size), } } fn note_seconded(&mut self, within_group_index: usize) { - self.seconded_statements.set(within_group_index, true); + self.seconded.set(within_group_index, true); } fn note_validated(&mut self, within_group_index: usize) { - self.valid_statements.set(within_group_index, true); + self.valid.set(within_group_index, true); } } From 75627c527f1228e38f9642a2d00c341e382be875 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 28 Sep 2022 16:02:03 -0500 Subject: [PATCH 033/220] rewrite 'send_statement_direct' using new tools --- .../src/vstaging/cluster.rs | 24 ++- .../src/vstaging/mod.rs | 142 +++++++++++++----- .../src/vstaging/statement_store.rs | 19 ++- 3 files changed, 145 insertions(+), 40 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index d8f96243fcec..ce78d7bec879 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -87,6 +87,8 @@ enum TaggedKnowledge { // Knowledge we have sent to the validator on the p2p layer. OutgoingP2P(Knowledge), // Knowledge of candidates the validator has seconded. + // This is limited only to `Seconded` statements we have accepted + // _without prejudice_. Seconded(CandidateHash), } @@ -192,8 +194,12 @@ impl ClusterTracker { } if let CompactStatement::Seconded(candidate_hash) = statement { - let mut originator_knowledge = self.knowledge.entry(originator).or_default(); - originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + // since we accept additional `Seconded` statements beyond the limits + // 'with prejudice', we must respect the limit here. + if self.seconded_already_or_within_limit(originator, candidate_hash) { + let mut originator_knowledge = self.knowledge.entry(originator).or_default(); + originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + } } } @@ -282,6 +288,18 @@ impl ClusterTracker { self.validator_seconded(validator, candidate_hash) } + /// Returns the validator-index of the producer a `Seconded` statement + /// for the candidate which is legal for us to send to all nodes in the cluster. + pub fn sendable_seconder(&self, candidate_hash: CandidateHash) -> Option { + for (v, k) in &self.knowledge { + if k.contains(&TaggedKnowledge::Seconded(candidate_hash)) { + return Some(*v) + } + } + + None + } + // returns true if it's legal to accept a new `Seconded` message from this validator. // This is either // 1. because we've already accepted it. @@ -716,4 +734,6 @@ mod tests { Err(RejectOutgoing::Known), ); } + + // TODO [now] ensure statements received with prejudice don't prevent sending later } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 3ceaf2cef034..1e79fdebada3 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -50,6 +50,7 @@ use crate::{ }; use candidate_entry::CandidateEntry; use cluster::ClusterTracker; +use statement_store::StatementStore; mod candidate_entry; mod cluster; @@ -69,6 +70,7 @@ struct PerRelayParentState { validator_state: HashMap, candidates: HashMap, local_validator: Option, + statement_store: StatementStore, session: SessionIndex, } @@ -271,6 +273,7 @@ pub(crate) async fn handle_activated_leaf( validator_state: HashMap::new(), candidates: HashMap::new(), local_validator, + statement_store: StatementStore::new(session_info.validator_groups.clone()), session: session_index, }, ); @@ -344,10 +347,11 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; - let (local_index, local_assignment) = match per_relay_parent.local_validator.as_ref() { - None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment), - }; + let (local_index, local_assignment, local_group) = + match per_relay_parent.local_validator.as_ref() { + None => return Err(JfyiError::InvalidShare), + Some(l) => (l.index, l.assignment, l.group), + }; // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. @@ -400,23 +404,21 @@ pub(crate) async fn share_local_statement( }, }; - // TODO [now]: note seconded. - // if !candidate_entry.insert_signed_statement(compact_statement.clone()) { - // gum::warn!( - // target: LOG_TARGET, - // statement = ?compact_statement.payload(), - // "Candidate backing issued redundant statement?", - // ); - - // return Err(JfyiError::InvalidShare) - // } + if !per_relay_parent.statement_store.insert(compact_statement.clone()) { + gum::warn!( + target: LOG_TARGET, + statement = ?compact_statement.payload(), + "Candidate backing issued redundant statement?", + ); + return Err(JfyiError::InvalidShare) + } (compact_statement, candidate_hash) }; // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, // send a `Seconded` statement as well. - send_statement_direct(ctx, state, relay_parent, compact_statement).await; + send_statement_direct(ctx, state, relay_parent, local_group, compact_statement).await; // TODO [now]: // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. @@ -431,6 +433,10 @@ pub(crate) async fn share_local_statement( // If we're not sure whether the peer knows the candidate is `Seconded` already, we also send a `Seconded` // statement. // +// The group index which is _canonically assigned_ to this parachain must be +// specified already. This function should not be used when the candidate receipt and +// therefore the canonical group for the parachain is unknown. +// // preconditions: the candidate entry exists in the state under the relay parent // and the statement has already been imported into the entry. If this is a `Valid` // statement, then there must be at least one `Seconded` statement. @@ -441,6 +447,7 @@ async fn send_statement_direct( ctx: &mut Context, state: &mut State, relay_parent: Hash, + group_index: GroupIndex, statement: SignedStatement, ) { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { @@ -454,21 +461,13 @@ async fn send_statement_direct( }; let candidate_hash = statement.payload().candidate_hash().clone(); - let candidate_entry = match per_relay_parent.candidates.get_mut(&candidate_hash) { - Some(x) => x, - None => return, - }; - // TODO [now]: get a seconding statement from the 'statement store', TBD - let prior_seconded: Option = unimplemented!(); - // TODO [now]: clean up this junk below - // let prior_seconded = match statement.payload() { - // CompactStatement::Seconded(_) => None, - // CompactStatement::Valid(_) => match candidate_entry.seconded_statements.first() { - // Some(s) => Some(s.as_unchecked().clone()), - // None => return, - // }, - // }; + let mut prior_seconded = None; + let compact_statement = statement.payload().clone(); + let is_seconded = match compact_statement { + CompactStatement::Seconded(_) => true, + CompactStatement::Valid(_) => false, + }; // two kinds of targets: those in our 'cluster' (currently just those in the same group), // and those we are propagating to through the grid. @@ -477,8 +476,8 @@ async fn send_statement_direct( Grid, } - let targets: Vec<(ValidatorIndex, AuthorityDiscoveryId, TargetKind)> = { - let local_validator = match per_relay_parent.local_validator.as_ref() { + let (local_validator, targets) = { + let local_validator = match per_relay_parent.local_validator.as_mut() { Some(v) => v, None => return, // sanity: should be impossible to reach this. }; @@ -492,16 +491,20 @@ async fn send_statement_direct( // TODO [now]: extend with grid targets, dedup - current_group + let targets = current_group .filter_map(|(v, k)| { session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) }) - .collect::>() + .collect::>(); + + (local_validator, targets) }; + let originator = statement.validator_index(); + let mut prior_to = Vec::new(); let mut statement_to = Vec::new(); - for (validator_index, authority_id, kind) in targets { + for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. let peer_id: PeerId = match state.authorities.get(&authority_id) { Some(p) if state.peers.contains_key(p) => p.clone(), @@ -510,8 +513,65 @@ async fn send_statement_direct( match kind { TargetKind::Cluster => { - // TODO [now]: use cluster mechanics to determine whether to sent - // 'prior_to' and 'statement_to'. + if !local_validator.cluster_tracker.knows_candidate(target, candidate_hash) && + !is_seconded + { + // lazily initialize this. + let prior_seconded = if let Some(ref p) = prior_seconded.as_ref() { + p + } else { + // This should always succeed because: + // 1. If this is not a `Seconded` statement we must have + // received at least one `Seconded` statement from other validators + // in our cluster. + // 2. We should have deposited all statements we've received into the statement store. + + match cluster_sendable_seconded_statement( + &local_validator.cluster_tracker, + &per_relay_parent.statement_store, + candidate_hash, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?relay_parent, + "degenerate state: we authored a `Valid` statement without \ + knowing any `Seconded` statements." + ); + + return + }, + Some(s) => &*prior_seconded.get_or_insert(s.as_unchecked().clone()), + } + }; + + // One of the properties of the 'cluster sendable seconded statement' + // is that we `can_send` it to all nodes in the cluster which don't have the candidate already. And + // we're already in a branch that's gated off from cluster nodes + // which have knowledge of the candidate. + local_validator.cluster_tracker.note_sent( + target, + prior_seconded.unchecked_validator_index(), + CompactStatement::Seconded(candidate_hash), + ); + prior_to.push(peer_id); + } + + // At this point, all peers in the cluster should 'know' + // the candidate, so we don't expect for this to fail. + if let Ok(()) = local_validator.cluster_tracker.can_send( + target, + originator, + compact_statement.clone(), + ) { + local_validator.cluster_tracker.note_sent( + target, + originator, + compact_statement.clone(), + ); + statement_to.push(peer_id); + } }, TargetKind::Grid => { // TODO [now] @@ -548,6 +608,16 @@ async fn send_statement_direct( } } +fn cluster_sendable_seconded_statement<'a>( + cluster_tracker: &ClusterTracker, + statement_store: &'a StatementStore, + candidate_hash: CandidateHash, +) -> Option<&'a SignedStatement> { + cluster_tracker.sendable_seconder(candidate_hash).and_then(|v| { + statement_store.validator_statement(v, CompactStatement::Seconded(candidate_hash)) + }) +} + /// Check a statement signature under this parent hash. fn check_statement_signature( session_index: SessionIndex, diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 33efc7c37967..b6336150f8c6 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -23,7 +23,7 @@ //! views into this data: views based on the candidate, views based on the validator //! groups, and views based on the validators themselves. -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec, slice::BitSlice}; +use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec}; use polkadot_primitives::vstaging::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; @@ -34,6 +34,10 @@ use std::collections::hash_map::{Entry as HEntry, HashMap}; pub struct StatementStore { groups: Vec>, validator_meta: HashMap, + + // we keep statements per-group because even though only one group _should_ be + // producing statements about a candidate, until we have the candidate receipt + // itself, we can't tell which group that is. group_statements: HashMap<(GroupIndex, CandidateHash), GroupStatements>, known_statements: HashMap, } @@ -125,6 +129,7 @@ impl StatementStore { }) } + // TODO [now]: this may not be useful. /// Get an iterator over signed statements of the given form by the given group. pub fn group_statements<'a>( &'a self, @@ -134,11 +139,21 @@ impl StatementStore { let bitslice = self.group_statement_bitslice(group_index, statement.clone()); let group_validators = self.groups.get(group_index.0 as usize); - bitslice.into_iter() + bitslice + .into_iter() .flat_map(|v| v.iter_ones()) .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) .filter_map(move |v| self.known_statements.get(&(*v, statement.clone()))) } + + /// Get the full statement of this kind issued by this validator, if it is known. + pub fn validator_statement( + &self, + validator_index: ValidatorIndex, + statement: CompactStatement, + ) -> Option<&SignedStatement> { + self.known_statements.get(&(validator_index, statement)) + } } type Fingerprint = (ValidatorIndex, CompactStatement); From 336f412daee6c74eac0bbf28faa82dcbf028203f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 29 Sep 2022 20:09:58 -0500 Subject: [PATCH 034/220] filter sending logic on peers which have the relay-parent in their view. --- .../src/vstaging/mod.rs | 37 +++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1e79fdebada3..10079d270990 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -106,9 +106,21 @@ pub(crate) struct State { struct PeerState { view: View, + implicit_view: HashSet, maybe_authority: Option>, } +impl PeerState { + // Whether we know that a peer knows a relay-parent. + // The peer knows the relay-parent if it is either implicit or explicit + // in their view. However, if it is implicit via an active-leaf we don't + // recognize, we will not accurately be able to recognize them as 'knowing' + // the relay-parent. + fn knows_relay_parent(&self, relay_parent: &Hash) -> bool { + self.implicit_view.contains(relay_parent) + } +} + /// How many votes we need to consider a candidate backed. /// /// WARNING: This has to be kept in sync with the runtime check in the inclusion module. @@ -132,7 +144,11 @@ pub(crate) async fn handle_network_update( state.peers.insert( peer_id, - PeerState { view: View::default(), maybe_authority: authority_ids.clone() }, + PeerState { + view: View::default(), + implicit_view: HashSet::new(), + maybe_authority: authority_ids.clone(), + }, ); if let Some(authority_ids) = authority_ids { @@ -185,7 +201,7 @@ pub(crate) async fn handle_network_update( } }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { - // TODO [now] + // TODO [now] update explicit and implicit views }, NetworkBridgeEvent::OurViewChange(_view) => { // handled by `handle_activated_leaf` @@ -277,6 +293,9 @@ pub(crate) async fn handle_activated_leaf( session: session_index, }, ); + + // TODO [now]: update peers which have the leaf in their view. + // update their implicit view. send any messages accordingly. } Ok(()) @@ -507,7 +526,9 @@ async fn send_statement_direct( for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. let peer_id: PeerId = match state.authorities.get(&authority_id) { - Some(p) if state.peers.contains_key(p) => p.clone(), + Some(p) + if state.peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => + p.clone(), None | Some(_) => continue, }; @@ -677,6 +698,16 @@ async fn handle_incoming_statement( Some(s) => s, }; + let local_validator = match per_relay_parent.local_validator { + None => { + // we shouldn't be receiving statements unless we're a validator + // this session. + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + Some(ref l) => l, + }; + // Ensure the statement is correctly signed. let checked_statement = match check_statement_signature( per_relay_parent.session, From 90a91a2dbb4d3d5ed834b8de1f8bfc6ae2dbd1b7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 29 Sep 2022 20:51:48 -0500 Subject: [PATCH 035/220] some more logic for handling incoming statements --- .../src/vstaging/cluster.rs | 14 +++ .../src/vstaging/mod.rs | 119 ++++++++++++++---- 2 files changed, 108 insertions(+), 25 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index ce78d7bec879..ca193608c692 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -275,6 +275,20 @@ impl ClusterTracker { &self.validators } + /// Get all possible senders for the given originator. + /// Returns the empty slice in the case that the originator + /// is not part of the cluster. + // note: this API is future-proofing for a case where we may + // extend clusters beyond just the assigned group, for optimization + // purposes. + pub fn senders_for_originator(&self, originator: ValidatorIndex) -> &[ValidatorIndex] { + if self.validators.contains(&originator) { + &self.validators[..] + } else { + &[] + } + } + /// Whether a validator knows the candidate is `Seconded`. pub fn knows_candidate( &self, diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 10079d270990..eae97feefa0a 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -19,7 +19,7 @@ use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, + grid_topology::{RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; @@ -49,7 +49,7 @@ use crate::{ LOG_TARGET, }; use candidate_entry::CandidateEntry; -use cluster::ClusterTracker; +use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; use statement_store::StatementStore; mod candidate_entry; @@ -63,6 +63,7 @@ const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = Rep::CostMinor("Unexpected Statement, unknown candidate"); const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = Rep::CostMinor("Unexpected Statement, remote not allowed"); +const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` Statements"); const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); @@ -100,7 +101,7 @@ pub(crate) struct State { per_session: HashMap, peers: HashMap, keystore: SyncCryptoStorePtr, - topology_storage: SessionBoundGridTopologyStorage, + topology_storage: SessionGridTopologies, authorities: HashMap, } @@ -119,6 +120,10 @@ impl PeerState { fn knows_relay_parent(&self, relay_parent: &Hash) -> bool { self.implicit_view.contains(relay_parent) } + + fn is_authority(&self, authority_id: &AuthorityDiscoveryId) -> bool { + self.maybe_authority.as_ref().map_or(false, |x| x.contains(authority_id)) + } } /// How many votes we need to consider a candidate backed. @@ -163,17 +168,12 @@ pub(crate) async fn handle_network_update( NetworkBridgeEvent::NewGossipTopology(topology) => { let new_session_index = topology.session; let new_topology: SessionGridTopology = topology.into(); - let old_topology = state.topology_storage.get_current_topology(); - let newly_added = new_topology.peers_diff(old_topology); - state.topology_storage.update_topology(new_session_index, new_topology); - for peer in newly_added { - if let Some(data) = state.peers.get_mut(&peer) { - // TODO [now]: send the peer any topology-specific - // messages we need to send them. Like forwarding or sending backed-candidate - // messages. But in principle we shouldn't have accepted any such messages as we don't - // yet have the topology. - } - } + state.topology_storage.insert_topology(new_session_index, new_topology); + + // TODO [now]: can we not update authority IDs for peers? + + // TODO [now] for all relay-parents with this session, send all grid peers + // any `BackedCandidateInv` messages they might need. }, NetworkBridgeEvent::PeerMessage(peer_id, message) => { match message { @@ -294,6 +294,8 @@ pub(crate) async fn handle_activated_leaf( }, ); + state.topology_storage.inc_session_refs(session_index); + // TODO [now]: update peers which have the leaf in their view. // update their implicit view. send any messages accordingly. } @@ -346,7 +348,17 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { } // clean up per-relay-parent data based on everything removed. - state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); + let topology_storage = &mut state.topology_storage; + state.per_relay_parent.retain(|r, x| { + if relay_parents.contains(r) { + true + } else { + // clean up topology storage. + topology_storage.dec_session_refs(x.session); + + false + } + }); // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); @@ -671,10 +683,13 @@ async fn handle_incoming_statement( relay_parent: Hash, statement: UncheckedSignedStatement, ) { - if !state.peers.contains_key(&peer) { - // sanity: should be impossible. - return - } + let peer_state = match state.peers.get(&peer) { + None => { + // sanity: should be impossible. + return + }, + Some(p) => p, + }; // Ensure we know the relay parent. let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { @@ -698,16 +713,62 @@ async fn handle_incoming_statement( Some(s) => s, }; - let local_validator = match per_relay_parent.local_validator { + let local_validator = match per_relay_parent.local_validator.as_mut() { None => { // we shouldn't be receiving statements unless we're a validator // this session. report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; return }, - Some(ref l) => l, + Some(l) => l, + }; + + let cluster_sender_index = { + let allowed_senders = local_validator + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()); + + allowed_senders + .iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next() + }; + + // TODO [now]: handle direct statements from grid peers + let cluster_sender_index = match cluster_sender_index { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + Some(c) => c, }; + // additional cluster checks. + { + match local_validator.cluster_tracker.can_receive( + cluster_sender_index, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ) { + Ok(ClusterAccept::Ok | ClusterAccept::WithPrejudice) => {}, + Err(ClusterRejectIncoming::ExcessiveSeconded) => { + report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; + return + }, + Err(ClusterRejectIncoming::CandidateUnknown | ClusterRejectIncoming::Duplicate) => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + Err(ClusterRejectIncoming::NotInGroup) => { + // sanity: shouldn't be possible; we already filtered this + // out above. + return + }, + } + } + // Ensure the statement is correctly signed. let checked_statement = match check_statement_signature( per_relay_parent.session, @@ -722,10 +783,18 @@ async fn handle_incoming_statement( }, }; - let candidate_hash = checked_statement.payload().candidate_hash(); + local_validator.cluster_tracker.note_received( + cluster_sender_index, + checked_statement.validator_index(), + checked_statement.payload().clone(), + ); - // Ensure that if the statement is kind 'Valid' that we know the candidate. - if let CompactStatement::Valid(_) = checked_statement.payload() { - // TODO [now] + if !per_relay_parent.statement_store.insert(checked_statement) { + return } + + // TODO [now]: + // * add a candidate entry if we need to + // * issue requests for the candidate if we need to + // * import the statement into backing if we can. } From 4dc62b9e6d451ab16b030844ab23575eab857e60 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 29 Sep 2022 22:29:25 -0500 Subject: [PATCH 036/220] req/res: BackedCandidatePacket -> AttestedCandidate + tweaks --- .../protocol/src/request_response/mod.rs | 32 +++++++++--------- .../protocol/src/request_response/vstaging.rs | 33 ++++++++++++++----- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 11229f73d029..70db3326e936 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -75,9 +75,9 @@ pub enum Protocol { /// Sending of dispute statements with application level confirmations. DisputeSendingV1, - /// Protocol for requesting backed candidate packets in statement distribution + /// Protocol for requesting candidates with attestations in statement distribution /// in v2. - BackedCandidatePacketV2, + AttestedCandidateV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -109,7 +109,7 @@ const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); /// fit statement distribution within a block of 6 seconds.) const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); -/// We want to backed candidate requests to time out relatively fast, +/// We want to attested candidate requests to time out relatively fast, /// because slow requests will bottleneck the backing system. Ideally, we'd have /// an adaptive timeout based on the candidate size, because there will be a lot of variance /// in candidate sizes: candidates with no code and no messages vs candidates with code @@ -119,7 +119,7 @@ const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); /// backing allows them to be included over a longer window of time. Exponential back-off /// up to a maximum of 10 seconds would be ideal, but isn't supported by the /// infrastructure here yet: see https://github.com/paritytech/polkadot/issues/6009 -const BACKED_CANDIDATE_PACKET_TIMEOUT: Duration = Duration::from_millis(2500); +const ATTESTED_CANDIDATE_TIMEOUT: Duration = Duration::from_millis(2500); /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can @@ -130,8 +130,8 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can /// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need -/// to have 5 slow nodes connected, to delay transfer for others by `BACKED_CANDIDATE_PACKET_TIMEOUT`. -pub const MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS: u32 = 5; +/// to have 5 slow nodes connected, to delay transfer for others by `ATTESTED_CANDIDATE_TIMEOUT`. +pub const MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS: u32 = 5; /// Response size limit for responses of POV like data. /// @@ -146,11 +146,11 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; -/// Maximum response sizes for `BackedCandidatePacketV2`. +/// Maximum response sizes for `AttestedCandidateV2`. /// /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and /// additional backing statements. -const BACKED_CANDIDATE_PACKET_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 100_000; +const ATTESTED_CANDIDATE_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 100_000; impl Protocol { /// Get a configuration for a given Request response protocol. @@ -231,12 +231,12 @@ impl Protocol { inbound_queue: Some(tx), }, - Protocol::BackedCandidatePacketV2 => RequestResponseConfig { + Protocol::AttestedCandidateV2 => RequestResponseConfig { name, fallback_names, max_request_size: 1_000, - max_response_size: BACKED_CANDIDATE_PACKET_RESPONSE_SIZE, - request_timeout: BACKED_CANDIDATE_PACKET_TIMEOUT, + max_response_size: ATTESTED_CANDIDATE_RESPONSE_SIZE, + request_timeout: ATTESTED_CANDIDATE_TIMEOUT, inbound_queue: Some(tx), }, }; @@ -285,7 +285,7 @@ impl Protocol { // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, - Protocol::BackedCandidatePacketV2 => { + Protocol::AttestedCandidateV2 => { // We assume we can utilize up to 70% of the available bandwidth for statements. // This is just a guess/estimate, with the following considerations: If we are // faster than that, queue size will stay low anyway, even if not - requesters will @@ -293,9 +293,9 @@ impl Protocol { // wasting precious time. let available_bandwidth = 7 * MIN_BANDWIDTH_BYTES / 10; let size = u64::saturating_sub( - BACKED_CANDIDATE_PACKET_TIMEOUT.as_millis() as u64 * available_bandwidth / + ATTESTED_CANDIDATE_TIMEOUT.as_millis() as u64 * available_bandwidth / (1000 * MAX_CODE_SIZE as u64), - MAX_PARALLEL_BACKED_CANDIDATE_PACKET_REQUESTS as u64, + MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as u64, ); debug_assert!( size > 0, @@ -322,7 +322,7 @@ impl Protocol { Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), // Introduced after legacy names became legacy. - Protocol::BackedCandidatePacketV2 => None, + Protocol::AttestedCandidateV2 => None, } } } @@ -379,7 +379,7 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", - Protocol::BackedCandidatePacketV2 => "/req_backed_candidate_packet/2", + Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index 12c1a729d066..f6908d92836c 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -20,27 +20,42 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, UncheckedSignedStatement, + PersistedValidationData, }; +use bitvec::{vec::BitVec, order::Lsb0}; use super::{IsRequest, Protocol}; -/// Request a backed candidate packet. -#[derive(Debug, Copy, Clone, Encode, Decode)] -pub struct BackedCandidatePacketRequest { +/// Request a candidate with statements. +#[derive(Debug, Clone, Encode, Decode)] +pub struct AttestedCandidateRequest { /// Hash of the candidate we want to request. pub candidate_hash: CandidateHash, + /// bitfield with 'AND' semantics, indicating which validators + /// to send `Seconded` statements for. + /// + /// The mask must have exactly the minimum size required to + /// fit all validators from the backing group. + /// + /// The response may not contain any `Seconded` statements outside + /// of this mask. + pub seconded_mask: BitVec, } -/// Response to a backed candidate packet request. +/// Response to an `AttestedCandidateRequest`. #[derive(Debug, Clone, Encode, Decode)] -pub struct BackedCandidatePacketResponse { +pub struct AttestedCandidateResponse { /// The candidate receipt, with commitments. pub candidate_receipt: CommittedCandidateReceipt, - /// All known statements about the candidate, in compact form. + /// The [`PersistedValidationData`] corresponding to the candidate. + pub persisted_validation_data: PersistedValidationData, + /// All known statements about the candidate, in compact form, + /// omitting `Seconded` statements which were intended to be masked + /// out. pub statements: Vec, } -impl IsRequest for BackedCandidatePacketRequest { - type Response = BackedCandidatePacketResponse; - const PROTOCOL: Protocol = Protocol::BackedCandidatePacketV2; +impl IsRequest for AttestedCandidateRequest { + type Response = AttestedCandidateResponse; + const PROTOCOL: Protocol = Protocol::AttestedCandidateV2; } From 2ca0e6016f38b3aca36d52b490ed79818fe717de Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 29 Sep 2022 22:31:02 -0500 Subject: [PATCH 037/220] add a `validated_in_group` bitfield to BackedCandidateInventory --- node/network/protocol/src/lib.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index c7fc60c45a62..0d5bdce7b663 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -622,12 +622,21 @@ pub mod vstaging { /// The head-data corresponding to the candidate. pub parent_head_data_hash: Hash, /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have seconded this candidate. + /// group at the relay-parent have validated this candidate + /// and issued `Seconded` statements about it. /// - /// This MUST have the minimum amount of bytes + /// This MUST have exactly the minimum amount of bytes /// necessary to represent the number of validators in the /// assigned backing group as-of the relay-parent. pub seconded_in_group: BitVec, + /// A bitfield which indicates which validators in the para's + /// group at the relay-parent have validated this candidate + /// and issued `Valid` statements about it. + /// + /// This MUST have exactly the minimum amount of bytes + /// necessary to represent the number of validators in the + /// assigned backing group as-of the relay-parent. + pub validated_in_group: BitVec, } /// Network messages used by the statement distribution subsystem. From e49528540f8329d59891ae08961f40cf953ef455 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 29 Sep 2022 22:31:46 -0500 Subject: [PATCH 038/220] BackedCandidateInventory -> Manifest --- node/network/protocol/src/lib.rs | 4 ++-- node/network/statement-distribution/src/vstaging/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 0d5bdce7b663..7e245a23841b 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -612,7 +612,7 @@ pub mod vstaging { /// An inventory of a backed candidate, which can be requested. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] - pub struct BackedCandidateInventory { + pub struct BackedCandidateManifest { /// The relay-parent of the candidate. pub relay_parent: Hash, /// The hash of the candidate. @@ -650,7 +650,7 @@ pub mod vstaging { /// sending node, for the purpose of being requested by the receiving node /// if needed. #[codec(index = 1)] - BackedCandidateInventory(BackedCandidateInventory), + BackedCandidateManifest(BackedCandidateManifest), /// A notification of a backed candidate being known by the sending node, /// for the purpose of informing a receiving node which already has the candidate. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index eae97feefa0a..88ad2aedfc51 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -188,7 +188,7 @@ pub(crate) async fn handle_network_update( ), ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateInventory( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( inner, ), ) => {}, // TODO [now] From 585e79b4b855683f7cd656aaf98fb95177cfe002 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 1 Oct 2022 20:52:12 -0500 Subject: [PATCH 039/220] start in on requester module --- .../protocol/src/request_response/vstaging.rs | 5 +- .../src/vstaging/mod.rs | 5 +- .../src/vstaging/requester.rs | 157 ++++++++++++++++++ 3 files changed, 161 insertions(+), 6 deletions(-) create mode 100644 node/network/statement-distribution/src/vstaging/requester.rs diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index f6908d92836c..34da40ca229d 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -18,11 +18,10 @@ use parity_scale_codec::{Decode, Encode}; +use bitvec::{order::Lsb0, vec::BitVec}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, UncheckedSignedStatement, - PersistedValidationData, + CandidateHash, CommittedCandidateReceipt, PersistedValidationData, UncheckedSignedStatement, }; -use bitvec::{vec::BitVec, order::Lsb0}; use super::{IsRequest, Protocol}; diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 88ad2aedfc51..fb9638eb973b 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -54,6 +54,7 @@ use statement_store::StatementStore; mod candidate_entry; mod cluster; +mod requester; mod statement_store; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); @@ -188,9 +189,7 @@ pub(crate) async fn handle_network_update( ), ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( - inner, - ), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs new file mode 100644 index 000000000000..f4723e8fe0b4 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -0,0 +1,157 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! A requester for full information on candidates. +//! +// TODO [now]: some module docs. + +use polkadot_node_network_protocol::{ + request_response::{ + vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, + }, + PeerId, +}; +use polkadot_primitives::vstaging::{ + AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ParaId, +}; + +use bitvec::{order::Lsb0, vec::BitVec}; +use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; + +use std::collections::{ + hash_map::{Entry as HEntry, HashMap, VacantEntry}, + HashSet, +}; + +/// An identifier for a candidate. +/// +/// In this module, we are requesting candidates +/// for which we have no information other than the candidate hash and statements signed +/// by validators. It is possible for validators for multiple groups to abuse this lack of +/// information: until we actually get the preimage of this candidate we cannot confirm +/// anything other than the candidate hash. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct CandidateIdentifier { + relay_parent: Hash, + candidate_hash: CandidateHash, + group_index: GroupIndex, +} + +struct TaggedResponse { + identifier: CandidateIdentifier, + authority_id: AuthorityDiscoveryId, + response: AttestedCandidateResponse, +} + +/// A pending request. +pub struct PendingRequest { + expected_para: ParaId, + known_by: Vec, +} + +/// A vacant pending request entry. +pub struct VacantRequestEntry<'a> { + vacant_request_entry: VacantEntry<'a, CandidateIdentifier, PendingRequest>, + unique_identifiers: &'a mut HashMap>, + identifier: CandidateIdentifier, +} + +/// An entry in the request manager. +pub enum RequestEntry<'a> { + Occupied(&'a mut PendingRequest), + Vacant(VacantRequestEntry<'a>), +} + +impl<'a> RequestEntry<'a> { + /// Yields the existing pending request or inserts it, with the given + /// metadata. + pub fn or_insert_with( + self, + group_assignments: impl FnOnce(GroupIndex) -> ParaId, + ) -> &'a mut PendingRequest { + let mut vacant = match self { + RequestEntry::Occupied(o) => return o, + RequestEntry::Vacant(v) => v, + }; + + vacant + .unique_identifiers + .entry(vacant.identifier.candidate_hash) + .or_insert_with(HashSet::new) + .insert(vacant.identifier.clone()); + + let group_index = vacant.identifier.group_index; + + vacant.vacant_request_entry.insert(PendingRequest { + expected_para: group_assignments(group_index), + known_by: Vec::new(), + }) + } +} + +/// A manager for outgoing requests. +pub struct RequestManager { + pending_responses: FuturesUnordered>>, + requests: HashMap, + // all unique identifiers for the candidate. + unique_identifiers: HashMap>, +} + +impl RequestManager { + /// Create a new [`RequestManager`]. + pub fn new() -> Self { + RequestManager { + pending_responses: FuturesUnordered::new(), + requests: HashMap::new(), + unique_identifiers: HashMap::new(), + } + } + + /// Either yields the pending request data for the given parameters, + /// or yields a [`VacantRequestEntry`] which can be used to instantiate + /// it. + pub fn entry( + &mut self, + relay_parent: Hash, + candidate_hash: CandidateHash, + group_index: GroupIndex, + ) -> RequestEntry { + let identifier = CandidateIdentifier { relay_parent, candidate_hash, group_index }; + + let requests = &mut self.requests; + let unique_identifiers = &mut self.unique_identifiers; + + match requests.entry(identifier.clone()) { + HEntry::Vacant(v) => RequestEntry::Vacant(VacantRequestEntry { + vacant_request_entry: v, + unique_identifiers, + identifier, + }), + HEntry::Occupied(o) => RequestEntry::Occupied(o.into_mut()), + } + } + + /// Remove all pending requests for the given candidate. + pub fn remove_for(&mut self, candidate: CandidateHash) { + if let Some(identifiers) = self.unique_identifiers.remove(&candidate) { + for id in identifiers { + self.requests.remove(&id); + } + } + } + + // TODO [now]: `dispatch_next -> Option` + + // TODO [now]: `await_incoming -> IncomingPendingValidation` +} From 5fddf7da525ce01bde38957cf6b9d5e8d7249540 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 2 Oct 2022 20:20:14 -0500 Subject: [PATCH 040/220] add outgoing request for attested candidate --- node/network/protocol/src/request_response/outgoing.rs | 6 +++++- .../statement-distribution/src/vstaging/requester.rs | 10 ++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/node/network/protocol/src/request_response/outgoing.rs b/node/network/protocol/src/request_response/outgoing.rs index b93c4e93cd31..78011651c852 100644 --- a/node/network/protocol/src/request_response/outgoing.rs +++ b/node/network/protocol/src/request_response/outgoing.rs @@ -23,7 +23,7 @@ use sc_network::PeerId; use polkadot_primitives::v2::AuthorityDiscoveryId; -use super::{v1, IsRequest, Protocol}; +use super::{v1, vstaging, IsRequest, Protocol}; /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`. #[derive(Debug)] @@ -40,6 +40,8 @@ pub enum Requests { StatementFetchingV1(OutgoingRequest), /// Requests for notifying about an ongoing dispute. DisputeSendingV1(OutgoingRequest), + /// Request a candidate and attestations. + AttestedCandidateV2(OutgoingRequest), } impl Requests { @@ -52,6 +54,7 @@ impl Requests { Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, + Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2, } } @@ -70,6 +73,7 @@ impl Requests { Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), + Self::AttestedCandidateV2(r) => r.encode_request(), } } } diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index f4723e8fe0b4..45d1053cdded 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -151,6 +151,16 @@ impl RequestManager { } } + /// Yields the next request to dispatch, if there is any. + /// + /// Provide a closure which informs us whether peers are still connected. + pub fn next_request( + &mut self, + peer_connected: impl Fn(&PeerId) -> bool, + ) -> Option { + + } + // TODO [now]: `dispatch_next -> Option` // TODO [now]: `await_incoming -> IncomingPendingValidation` From 7fefe782ce8f7a0150ff05269fb53f41c86768fe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 17:47:49 -0500 Subject: [PATCH 041/220] add a priority mechanism for requester --- .../src/vstaging/requester.rs | 205 ++++++++++++------ primitives/src/v2/mod.rs | 2 +- 2 files changed, 143 insertions(+), 64 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index 45d1053cdded..953f013bd663 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -18,20 +18,21 @@ use polkadot_node_network_protocol::{ request_response::{ vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, - MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, + OutgoingRequest, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, PeerId, }; -use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ParaId, -}; +use polkadot_primitives::vstaging::{CandidateHash, GroupIndex, Hash, ParaId}; use bitvec::{order::Lsb0, vec::BitVec}; use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; -use std::collections::{ - hash_map::{Entry as HEntry, HashMap, VacantEntry}, - HashSet, +use std::{ + cmp::Reverse, + collections::{ + hash_map::{Entry as HEntry, HashMap, VacantEntry}, + BTreeSet, HashSet, + }, }; /// An identifier for a candidate. @@ -41,69 +42,79 @@ use std::collections::{ /// by validators. It is possible for validators for multiple groups to abuse this lack of /// information: until we actually get the preimage of this candidate we cannot confirm /// anything other than the candidate hash. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct CandidateIdentifier { - relay_parent: Hash, - candidate_hash: CandidateHash, - group_index: GroupIndex, + /// The relay-parent this candidate is ostensibly under. + pub relay_parent: Hash, + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The index of the group claiming to be assigned to the candidate's + /// para. + pub group_index: GroupIndex, } struct TaggedResponse { identifier: CandidateIdentifier, - authority_id: AuthorityDiscoveryId, + requested_peer: PeerId, response: AttestedCandidateResponse, } /// A pending request. -pub struct PendingRequest { +pub struct RequestedCandidate { + priority: Priority, expected_para: ParaId, - known_by: Vec, + known_by: Vec, + in_flight: bool, } -/// A vacant pending request entry. -pub struct VacantRequestEntry<'a> { - vacant_request_entry: VacantEntry<'a, CandidateIdentifier, PendingRequest>, - unique_identifiers: &'a mut HashMap>, - identifier: CandidateIdentifier, +impl RequestedCandidate { + // TODO [now]: add peer to known set } -/// An entry in the request manager. -pub enum RequestEntry<'a> { - Occupied(&'a mut PendingRequest), - Vacant(VacantRequestEntry<'a>), +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum Origin { + Cluster = 0, + Unspecified = 1, } -impl<'a> RequestEntry<'a> { - /// Yields the existing pending request or inserts it, with the given - /// metadata. - pub fn or_insert_with( - self, - group_assignments: impl FnOnce(GroupIndex) -> ParaId, - ) -> &'a mut PendingRequest { - let mut vacant = match self { - RequestEntry::Occupied(o) => return o, - RequestEntry::Vacant(v) => v, - }; +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct Priority { + origin: Origin, + attempts: usize, +} - vacant - .unique_identifiers - .entry(vacant.identifier.candidate_hash) - .or_insert_with(HashSet::new) - .insert(vacant.identifier.clone()); +/// An entry for manipulating a requested candidate. +pub struct Entry<'a> { + prev_index: usize, + identifier: CandidateIdentifier, + by_priority: &'a mut Vec<(Priority, CandidateIdentifier)>, + requested: &'a mut RequestedCandidate, +} - let group_index = vacant.identifier.group_index; +impl<'a> Entry<'a> { + /// Access the underlying requested candidate. + pub fn get_mut(&mut self) -> &mut RequestedCandidate { + &mut self.requested + } +} - vacant.vacant_request_entry.insert(PendingRequest { - expected_para: group_assignments(group_index), - known_by: Vec::new(), - }) +impl<'a> Drop for Entry<'a> { + fn drop(&mut self) { + insert_or_update_priority( + &mut *self.by_priority, + Some(self.prev_index), + self.identifier.clone(), + self.requested.priority.clone(), + ); } } /// A manager for outgoing requests. pub struct RequestManager { pending_responses: FuturesUnordered>>, - requests: HashMap, + requests: HashMap, + // sorted by priority. + by_priority: Vec<(Priority, CandidateIdentifier)>, // all unique identifiers for the candidate. unique_identifiers: HashMap>, } @@ -114,37 +125,68 @@ impl RequestManager { RequestManager { pending_responses: FuturesUnordered::new(), requests: HashMap::new(), + by_priority: Vec::new(), unique_identifiers: HashMap::new(), } } - /// Either yields the pending request data for the given parameters, - /// or yields a [`VacantRequestEntry`] which can be used to instantiate - /// it. - pub fn entry( + /// Gets or inserts the `Entry` required + pub fn get_or_insert( &mut self, relay_parent: Hash, candidate_hash: CandidateHash, group_index: GroupIndex, - ) -> RequestEntry { + group_assignment: ParaId, + ) -> Entry { let identifier = CandidateIdentifier { relay_parent, candidate_hash, group_index }; - let requests = &mut self.requests; - let unique_identifiers = &mut self.unique_identifiers; + let (candidate, fresh) = match self.requests.entry(identifier.clone()) { + HEntry::Occupied(e) => (e.into_mut(), false), + HEntry::Vacant(e) => ( + e.insert(RequestedCandidate { + priority: Priority { attempts: 0, origin: Origin::Unspecified }, + expected_para: group_assignment, + known_by: Vec::new(), + in_flight: false, + }), + true, + ), + }; + + let priority_index = if fresh { + self.unique_identifiers + .entry(candidate_hash) + .or_default() + .insert(identifier.clone()); + + insert_or_update_priority( + &mut self.by_priority, + None, + identifier.clone(), + candidate.priority.clone(), + ) + } else { + match self + .by_priority + .binary_search(&(candidate.priority.clone(), identifier.clone())) + { + Ok(i) => i, + Err(i) => unreachable!("requested candidates always have a priority entry; qed"), + } + }; - match requests.entry(identifier.clone()) { - HEntry::Vacant(v) => RequestEntry::Vacant(VacantRequestEntry { - vacant_request_entry: v, - unique_identifiers, - identifier, - }), - HEntry::Occupied(o) => RequestEntry::Occupied(o.into_mut()), + Entry { + prev_index: priority_index, + identifier, + by_priority: &mut self.by_priority, + requested: candidate, } } /// Remove all pending requests for the given candidate. pub fn remove_for(&mut self, candidate: CandidateHash) { if let Some(identifiers) = self.unique_identifiers.remove(&candidate) { + self.by_priority.retain(|(_priority, id)| !identifiers.contains(&id)); for id in identifiers { self.requests.remove(&id); } @@ -153,15 +195,52 @@ impl RequestManager { /// Yields the next request to dispatch, if there is any. /// - /// Provide a closure which informs us whether peers are still connected. + /// This function accepts two closures as an argument. + /// The first closure indicates whether a peer is still connected. + /// The second closure is used to construct a mask for limiting the + /// `Seconded` statements the response is allowed to contain. pub fn next_request( &mut self, peer_connected: impl Fn(&PeerId) -> bool, - ) -> Option { + seconded_mask: impl Fn(&CandidateIdentifier) -> BitVec, + ) -> Option> { + // TODO [now] + None + } + + // TODO [now]: `await_incoming -> IncomingPendingValidation` +} +fn insert_or_update_priority( + priority_sorted: &mut Vec<(Priority, CandidateIdentifier)>, + prev_index: Option, + candidate_identifier: CandidateIdentifier, + new_priority: Priority, +) -> usize { + if let Some(prev_index) = prev_index { + // GIGO: this behaves strangely if prev-index is not for the + // expected identifier. + if priority_sorted[prev_index].0 == new_priority { + // unchanged. + return prev_index + } else { + priority_sorted.remove(prev_index); + } } - // TODO [now]: `dispatch_next -> Option` + let item = (new_priority, candidate_identifier); + match priority_sorted.binary_search(&item) { + Ok(i) => i, // ignore if already present. + Err(i) => { + priority_sorted.insert(i, item); + i + }, + } +} - // TODO [now]: `await_incoming -> IncomingPendingValidation` +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]: test priority ordering. } diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index a86d2ac31397..2029eaea2075 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -780,7 +780,7 @@ impl From for CoreIndex { } /// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)] +#[derive(Encode, Decode, Default, Clone, Copy, Debug, PartialEq, Eq, TypeInfo, PartialOrd, Ord)] #[cfg_attr(feature = "std", derive(Hash, MallocSizeOf))] pub struct GroupIndex(pub u32); From b6b2d1335d21f1da6112e7dd8044e81af9a2cb72 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 18:08:50 -0500 Subject: [PATCH 042/220] some request dispatch logic --- .../src/vstaging/requester.rs | 75 +++++++++++++++++-- 1 file changed, 68 insertions(+), 7 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index 953f013bd663..99bf5f209a14 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -15,10 +15,13 @@ //! // TODO [now]: some module docs. +use crate::LOG_TARGET; + use polkadot_node_network_protocol::{ request_response::{ + outgoing::{Recipient as RequestRecipient, RequestError}, vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, - OutgoingRequest, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, + OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, PeerId, }; @@ -31,7 +34,7 @@ use std::{ cmp::Reverse, collections::{ hash_map::{Entry as HEntry, HashMap, VacantEntry}, - BTreeSet, HashSet, + BTreeSet, HashSet, VecDeque, }, }; @@ -63,7 +66,7 @@ struct TaggedResponse { pub struct RequestedCandidate { priority: Priority, expected_para: ParaId, - known_by: Vec, + known_by: VecDeque, in_flight: bool, } @@ -111,7 +114,7 @@ impl<'a> Drop for Entry<'a> { /// A manager for outgoing requests. pub struct RequestManager { - pending_responses: FuturesUnordered>>, + pending_responses: FuturesUnordered>>>, requests: HashMap, // sorted by priority. by_priority: Vec<(Priority, CandidateIdentifier)>, @@ -130,7 +133,8 @@ impl RequestManager { } } - /// Gets or inserts the `Entry` required + /// Gets an [`Entry`] for mutating a request and inserts it if the + /// manager doesn't store this request already. pub fn get_or_insert( &mut self, relay_parent: Hash, @@ -146,7 +150,7 @@ impl RequestManager { e.insert(RequestedCandidate { priority: Priority { attempts: 0, origin: Origin::Unspecified }, expected_para: group_assignment, - known_by: Vec::new(), + known_by: VecDeque::new(), in_flight: false, }), true, @@ -193,6 +197,8 @@ impl RequestManager { } } + // TODO [now]: removal based on relay-parent. + /// Yields the next request to dispatch, if there is any. /// /// This function accepts two closures as an argument. @@ -204,7 +210,62 @@ impl RequestManager { peer_connected: impl Fn(&PeerId) -> bool, seconded_mask: impl Fn(&CandidateIdentifier) -> BitVec, ) -> Option> { - // TODO [now] + if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + return None + } + + // loop over all requests, in order of priority. + // do some active maintenance of the connected peers. + // dispatch the first request which is not in-flight already. + for (_priority, id) in &self.by_priority { + let entry = match self.requests.get_mut(&id) { + None => { + gum::error!( + target: LOG_TARGET, + identifier = ?id, + "Missing entry for priority queue member", + ); + + continue + }, + Some(e) => e, + }; + + if entry.in_flight { + continue + } + + entry.known_by.retain(&peer_connected); + + let recipient = match entry.known_by.pop_front() { + None => continue, // no peers. + Some(r) => r, + }; + + entry.known_by.push_back(recipient.clone()); + + let (request, response_fut) = OutgoingRequest::new( + RequestRecipient::Peer(recipient.clone()), + AttestedCandidateRequest { + candidate_hash: id.candidate_hash, + seconded_mask: seconded_mask(&id), + }, + ); + + let stored_id = id.clone(); + self.pending_responses.push(Box::new(async move { + response_fut.await.map(|response| TaggedResponse { + identifier: stored_id, + requested_peer: recipient, + response, + }) + })); + + entry.in_flight = true; + + return Some(request) + } + None } From d800805f3084b41c42b32728b26ed5378b78060d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 18:13:09 -0500 Subject: [PATCH 043/220] add seconded mask to tagged-request --- .../src/vstaging/requester.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index 99bf5f209a14..cd820d957f51 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -30,12 +30,9 @@ use polkadot_primitives::vstaging::{CandidateHash, GroupIndex, Hash, ParaId}; use bitvec::{order::Lsb0, vec::BitVec}; use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; -use std::{ - cmp::Reverse, - collections::{ - hash_map::{Entry as HEntry, HashMap, VacantEntry}, - BTreeSet, HashSet, VecDeque, - }, +use std::collections::{ + hash_map::{Entry as HEntry, HashMap, VacantEntry}, + BTreeSet, HashSet, VecDeque, }; /// An identifier for a candidate. @@ -59,6 +56,7 @@ pub struct CandidateIdentifier { struct TaggedResponse { identifier: CandidateIdentifier, requested_peer: PeerId, + seconded_mask: BitVec, response: AttestedCandidateResponse, } @@ -244,11 +242,12 @@ impl RequestManager { entry.known_by.push_back(recipient.clone()); + let seconded_mask = seconded_mask(&id); let (request, response_fut) = OutgoingRequest::new( RequestRecipient::Peer(recipient.clone()), AttestedCandidateRequest { candidate_hash: id.candidate_hash, - seconded_mask: seconded_mask(&id), + seconded_mask: seconded_mask.clone(), }, ); @@ -257,6 +256,7 @@ impl RequestManager { response_fut.await.map(|response| TaggedResponse { identifier: stored_id, requested_peer: recipient, + seconded_mask, response, }) })); From 443ffabc1ca54dd56cb19964d754444a82903458 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 20:03:31 -0500 Subject: [PATCH 044/220] amend manifest to hold group index --- node/network/protocol/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 7e245a23841b..cc08ee1effa6 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -593,8 +593,8 @@ pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateHash, CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, - UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, + CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, + Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; use polkadot_node_primitives::{ @@ -617,8 +617,8 @@ pub mod vstaging { pub relay_parent: Hash, /// The hash of the candidate. pub candidate_hash: CandidateHash, - /// The para that the candidate is assigned to. - pub para_id: ParaId, + /// The group index backing the candidate at the relay-parent. + pub group_index: GroupIndex, /// The head-data corresponding to the candidate. pub parent_head_data_hash: Hash, /// A bitfield which indicates which validators in the para's From e0de216f692d0d0cd9ca8218897c472d4dba341b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 20:09:34 -0500 Subject: [PATCH 045/220] handle errors and set up scaffold for response validation --- .../src/vstaging/mod.rs | 4 + .../src/vstaging/requester.rs | 210 ++++++++++++++++-- 2 files changed, 198 insertions(+), 16 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index fb9638eb973b..4b02cb725784 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -67,6 +67,10 @@ const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` Statements"); const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); +const COST_IMPROPERLY_DECODED_RESPONSE: Rep = + Rep::CostMajor("Improperly Encoded Candidate Response"); + +const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); struct PerRelayParentState { validator_state: HashMap, diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index cd820d957f51..b9aaf5497282 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -15,6 +15,7 @@ //! // TODO [now]: some module docs. +use super::{BENEFIT_VALID_RESPONSE, COST_IMPROPERLY_DECODED_RESPONSE}; use crate::LOG_TARGET; use polkadot_node_network_protocol::{ @@ -23,16 +24,22 @@ use polkadot_node_network_protocol::{ vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - PeerId, + PeerId, UnifiedReputationChange as Rep, +}; +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, ParaId, PersistedValidationData, + SignedStatement, ValidatorId, ValidatorIndex, }; -use polkadot_primitives::vstaging::{CandidateHash, GroupIndex, Hash, ParaId}; use bitvec::{order::Lsb0, vec::BitVec}; -use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; +use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use std::collections::{ - hash_map::{Entry as HEntry, HashMap, VacantEntry}, - BTreeSet, HashSet, VecDeque, +use std::{ + collections::{ + hash_map::{Entry as HEntry, HashMap, VacantEntry}, + BTreeSet, HashSet, VecDeque, + }, + pin::Pin, }; /// An identifier for a candidate. @@ -57,13 +64,12 @@ struct TaggedResponse { identifier: CandidateIdentifier, requested_peer: PeerId, seconded_mask: BitVec, - response: AttestedCandidateResponse, + response: OutgoingResult, } /// A pending request. pub struct RequestedCandidate { priority: Priority, - expected_para: ParaId, known_by: VecDeque, in_flight: bool, } @@ -112,7 +118,7 @@ impl<'a> Drop for Entry<'a> { /// A manager for outgoing requests. pub struct RequestManager { - pending_responses: FuturesUnordered>>>, + pending_responses: FuturesUnordered>, requests: HashMap, // sorted by priority. by_priority: Vec<(Priority, CandidateIdentifier)>, @@ -138,7 +144,6 @@ impl RequestManager { relay_parent: Hash, candidate_hash: CandidateHash, group_index: GroupIndex, - group_assignment: ParaId, ) -> Entry { let identifier = CandidateIdentifier { relay_parent, candidate_hash, group_index }; @@ -147,7 +152,6 @@ impl RequestManager { HEntry::Vacant(e) => ( e.insert(RequestedCandidate { priority: Priority { attempts: 0, origin: Origin::Unspecified }, - expected_para: group_assignment, known_by: VecDeque::new(), in_flight: false, }), @@ -252,13 +256,13 @@ impl RequestManager { ); let stored_id = id.clone(); - self.pending_responses.push(Box::new(async move { - response_fut.await.map(|response| TaggedResponse { + self.pending_responses.push(Box::pin(async move { + TaggedResponse { identifier: stored_id, requested_peer: recipient, seconded_mask, - response, - }) + response: response_fut.await, + } })); entry.in_flight = true; @@ -269,7 +273,181 @@ impl RequestManager { None } - // TODO [now]: `await_incoming -> IncomingPendingValidation` + /// Await the next incoming response to a sent request, or immediately + /// return `None` if there are no pending responses. + pub async fn await_incoming(&mut self) -> Option { + match self.pending_responses.next().await { + None => None, + Some(response) => Some(UnhandledResponse { manager: self, response }), + } + } +} + +/// A response to a request, which has not yet been handled. +pub struct UnhandledResponse<'a> { + manager: &'a mut RequestManager, + response: TaggedResponse, +} + +impl<'a> UnhandledResponse<'a> { + /// Get the candidate identifier which the corresponding request + /// was classified under. + pub fn candidate_identifier(&self) -> &CandidateIdentifier { + &self.response.identifier + } + + /// Validate the response. If the response is valid, this will yield the + /// candidate, the [`PersistedValidationData`] of the candidate, and requested + /// checked statements. + /// + /// This will also produce a record of misbehaviors by peers: + /// * If the response is partially valid, misbehavior by the responding peer. + /// * If there are other peers which have advertised the same candidate for different + /// relay-parents or para-ids, misbehavior reports for those peers will also + /// be generated. + /// + /// Finally, in the case that the response is either valid or partially valid, + /// this will clean up all remaining requests for the candidate in the manager. + /// + /// As parameters, the user should supply the canonical group array as well + /// as a mapping from validator index to validator ID. Additionally, the user should + /// provide a lookup used to check whether the para is allowed for the group at that relay-parent. + /// The validator pubkey mapping will not be queried except for validator indices in the group. + /// The allowed-para mapping will be queried with the para committed to by the candidate + /// receipt. + pub fn validate_response( + self, + group: &[ValidatorIndex], + validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, + allowed_para_lookup: impl Fn(ParaId) -> bool, + ) -> ResponseValidationOutput { + let UnhandledResponse { + manager, + response: TaggedResponse { identifier, requested_peer, response, seconded_mask }, + } = self; + + // handle races if the candidate is no longer known. + // this could happen if we requested the candidate under two + // different identifiers at the same time, and received a valid + // response on the other. + let entry = match manager.requests.get_mut(&identifier) { + None => + return ResponseValidationOutput { + reputation_changes: Vec::new(), + request_status: CandidateRequestStatus::Outdated, + }, + Some(e) => e, + }; + + let priority_index = match manager + .by_priority + .binary_search(&(entry.priority.clone(), identifier.clone())) + { + Ok(i) => i, + Err(i) => unreachable!("requested candidates always have a priority entry; qed"), + }; + + entry.in_flight = false; + entry.priority.attempts += 1; + + // update the location in the priority queue. + insert_or_update_priority( + &mut manager.by_priority, + Some(priority_index), + identifier.clone(), + entry.priority.clone(), + ); + + let complete_response = match response { + Err(RequestError::InvalidResponse(e)) => { + gum::trace!( + target: LOG_TARGET, + err = ?e, + peer = ?requested_peer, + "Improperly encoded response" + ); + + return ResponseValidationOutput { + reputation_changes: vec![(requested_peer, COST_IMPROPERLY_DECODED_RESPONSE)], + request_status: CandidateRequestStatus::Incomplete, + } + }, + Err(RequestError::NetworkError(_) | RequestError::Canceled(_)) => + return ResponseValidationOutput { + reputation_changes: vec![], + request_status: CandidateRequestStatus::Incomplete, + }, + Ok(response) => response, + }; + + let mut output = validate_complete_response( + &identifier, + complete_response, + requested_peer, + seconded_mask, + group, + validator_key_lookup, + allowed_para_lookup, + ); + + if let CandidateRequestStatus::Complete { .. } = output.request_status { + // TODO [now]: clean up everything else to do with the candidate. + // add reputation punishments for all peers advertising the candidate under + // different identifiers. + } + + output + } +} + +fn validate_complete_response( + identifier: &CandidateIdentifier, + response: AttestedCandidateResponse, + requested_peer: PeerId, + mut sent_seconded_bitmask: BitVec, + group: &[ValidatorIndex], + validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, + allowed_para_lookup: impl Fn(ParaId) -> bool, +) -> ResponseValidationOutput { + // TODO [now]: sanity check bitmask size. + // TODO [now]: filter out stuff outside of group or bitmask. + // TODO [now]: filter out duplicates. + // TODO [now]: check statement signatures. + + // sanity-check candidate response. + // note: roughly ascending cost of operations + // TODO [now]: check relay-parent + // TODO [now]: check expected para-id + // TODO [now]: check PVD against hash. + // TODO [now]: check candidate hash. + + unimplemented!() +} + +/// The status of the candidate request after the handling of a response. +pub enum CandidateRequestStatus { + /// The request was outdated at the point of receiving the response. + Outdated, + /// The response either did not arrive or was invalid. + Incomplete, + /// The response completed the request. Statements sent beyond the + /// mask have been ignored. More statements which may have been + /// expected may not be present, and higher-level code should + /// evaluate whether the candidate is still worth storing and whether + /// the sender should be punished. + Complete { + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + statements: Vec, + }, +} + +/// Output of the response validation. +pub struct ResponseValidationOutput { + /// The status of the request. + pub request_status: CandidateRequestStatus, + /// Any reputation changes as a result of validating the response. + pub reputation_changes: Vec<(PeerId, Rep)>, } fn insert_or_update_priority( From c493bca2edb6f7af0c6ec56c09d005997729dd19 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Oct 2022 20:49:09 -0500 Subject: [PATCH 046/220] validate attested candidate responses --- .../src/vstaging/mod.rs | 4 + .../src/vstaging/requester.rs | 162 ++++++++++++++++-- 2 files changed, 147 insertions(+), 19 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 4b02cb725784..8f6be7fdccc8 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -69,8 +69,12 @@ const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` S const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); const COST_IMPROPERLY_DECODED_RESPONSE: Rep = Rep::CostMajor("Improperly Encoded Candidate Response"); +const COST_INVALID_RESPONSE: Rep = Rep::CostMajor("Invalid Candidate Response"); +const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = + Rep::CostMajor("Un-requested Statement In Response"); const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); +const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); struct PerRelayParentState { validator_state: HashMap, diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requester.rs index b9aaf5497282..f4675aa96b15 100644 --- a/node/network/statement-distribution/src/vstaging/requester.rs +++ b/node/network/statement-distribution/src/vstaging/requester.rs @@ -15,7 +15,10 @@ //! // TODO [now]: some module docs. -use super::{BENEFIT_VALID_RESPONSE, COST_IMPROPERLY_DECODED_RESPONSE}; +use super::{ + BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, + COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, +}; use crate::LOG_TARGET; use polkadot_node_network_protocol::{ @@ -27,8 +30,9 @@ use polkadot_node_network_protocol::{ PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, ParaId, PersistedValidationData, - SignedStatement, ValidatorId, ValidatorIndex, + CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId, + PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, + ValidatorIndex, }; use bitvec::{order::Lsb0, vec::BitVec}; @@ -310,14 +314,12 @@ impl<'a> UnhandledResponse<'a> { /// this will clean up all remaining requests for the candidate in the manager. /// /// As parameters, the user should supply the canonical group array as well - /// as a mapping from validator index to validator ID. Additionally, the user should - /// provide a lookup used to check whether the para is allowed for the group at that relay-parent. - /// The validator pubkey mapping will not be queried except for validator indices in the group. - /// The allowed-para mapping will be queried with the para committed to by the candidate - /// receipt. + /// as a mapping from validator index to validator ID. The validator pubkey mapping + /// will not be queried except for validator indices in the group. pub fn validate_response( self, group: &[ValidatorIndex], + session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, allowed_para_lookup: impl Fn(ParaId) -> bool, ) -> ResponseValidationOutput { @@ -386,8 +388,8 @@ impl<'a> UnhandledResponse<'a> { requested_peer, seconded_mask, group, + session, validator_key_lookup, - allowed_para_lookup, ); if let CandidateRequestStatus::Complete { .. } = output.request_status { @@ -406,22 +408,140 @@ fn validate_complete_response( requested_peer: PeerId, mut sent_seconded_bitmask: BitVec, group: &[ValidatorIndex], + session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, - allowed_para_lookup: impl Fn(ParaId) -> bool, ) -> ResponseValidationOutput { - // TODO [now]: sanity check bitmask size. - // TODO [now]: filter out stuff outside of group or bitmask. - // TODO [now]: filter out duplicates. - // TODO [now]: check statement signatures. + // sanity check bitmask size. this is based entirely on + // local logic here. + if sent_seconded_bitmask.len() != group.len() { + gum::error!( + target: LOG_TARGET, + group_len = group.len(), + sent_bitmask_len = sent_seconded_bitmask.len(), + "Logic bug: group size != sent bitmask len" + ); + + // resize and attempt to continue. + sent_seconded_bitmask.resize(group.len(), true); + } + + let invalid_candidate_output = || ResponseValidationOutput { + request_status: CandidateRequestStatus::Incomplete, + reputation_changes: vec![(requested_peer.clone(), COST_INVALID_RESPONSE)], + }; // sanity-check candidate response. // note: roughly ascending cost of operations - // TODO [now]: check relay-parent - // TODO [now]: check expected para-id - // TODO [now]: check PVD against hash. - // TODO [now]: check candidate hash. + { + if response.candidate_receipt.descriptor.relay_parent != identifier.relay_parent { + return invalid_candidate_output() + } + + if response.candidate_receipt.descriptor.persisted_validation_data_hash != + response.persisted_validation_data.hash() + { + return invalid_candidate_output() + } + + if response.candidate_receipt.hash() != identifier.candidate_hash { + return invalid_candidate_output() + } + } + + // statement checks. + let mut rep_changes = Vec::new(); + let statements = { + let mut statements = + Vec::with_capacity(std::cmp::min(response.statements.len(), group.len() * 2)); + + let mut received_seconded = BitVec::::repeat(false, group.len()); + let mut received_valid = BitVec::::repeat(false, group.len()); + + let index_in_group = |v: ValidatorIndex| group.iter().position(|x| &v == x); - unimplemented!() + let signing_context = + SigningContext { parent_hash: identifier.relay_parent, session_index: session }; + + for unchecked_statement in response.statements.into_iter().take(group.len() * 2) { + // ensure statement is from a validator in the group. + let i = match index_in_group(unchecked_statement.unchecked_validator_index()) { + Some(i) => i, + None => { + rep_changes.push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + }, + }; + + // ensure statement is on the correct candidate hash. + if unchecked_statement.unchecked_payload().candidate_hash() != + &identifier.candidate_hash + { + rep_changes.push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + + // filter out duplicates or statements outside the mask. + // note on indexing: we have ensured that the bitmask and the + // duplicate trackers have the correct size for the group. + match unchecked_statement.unchecked_payload() { + CompactStatement::Seconded(_) => { + if !sent_seconded_bitmask[i] { + rep_changes + .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + + if received_seconded[i] { + rep_changes + .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + }, + CompactStatement::Valid(_) => + if received_valid[i] { + rep_changes + .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + }, + } + + let validator_public = + validator_key_lookup(unchecked_statement.unchecked_validator_index()); + let checked_statement = + match unchecked_statement.try_into_checked(&signing_context, &validator_public) { + Err(_) => { + rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); + continue + }, + Ok(checked) => checked, + }; + + match checked_statement.payload() { + CompactStatement::Seconded(_) => { + received_seconded.set(i, true); + }, + CompactStatement::Valid(_) => { + received_valid.set(i, true); + }, + } + + statements.push(checked_statement); + rep_changes.push((requested_peer.clone(), BENEFIT_VALID_STATEMENT)); + } + + statements + }; + + rep_changes.push((requested_peer.clone(), BENEFIT_VALID_RESPONSE)); + + ResponseValidationOutput { + request_status: CandidateRequestStatus::Complete { + candidate: response.candidate_receipt, + persisted_validation_data: response.persisted_validation_data, + statements, + }, + reputation_changes: rep_changes, + } } /// The status of the candidate request after the handling of a response. @@ -435,6 +555,10 @@ pub enum CandidateRequestStatus { /// expected may not be present, and higher-level code should /// evaluate whether the candidate is still worth storing and whether /// the sender should be punished. + /// + /// This also does not indicate that the para has actually been checked + /// to be one that the group is assigned under. Higher-level code should + /// verify that this is the case and ignore the candidate accordingly if so. Complete { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, From 930383696b3e4f6d5f81ab07510963f174a81555 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 7 Oct 2022 14:48:25 -0500 Subject: [PATCH 047/220] requester -> requests --- node/network/statement-distribution/src/vstaging/mod.rs | 2 +- .../src/vstaging/{requester.rs => requests.rs} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename node/network/statement-distribution/src/vstaging/{requester.rs => requests.rs} (100%) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 8f6be7fdccc8..f1ee709e4996 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -54,7 +54,7 @@ use statement_store::StatementStore; mod candidate_entry; mod cluster; -mod requester; +mod requests; mod statement_store; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); diff --git a/node/network/statement-distribution/src/vstaging/requester.rs b/node/network/statement-distribution/src/vstaging/requests.rs similarity index 100% rename from node/network/statement-distribution/src/vstaging/requester.rs rename to node/network/statement-distribution/src/vstaging/requests.rs From 41a43d6e67d7a3e7c3c6766da0acf20c2e9e9925 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 7 Oct 2022 14:51:18 -0500 Subject: [PATCH 048/220] add some utilities for manipulating requests --- .../statement-distribution/src/vstaging/requests.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index f4675aa96b15..07e899a459f5 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -79,7 +79,17 @@ pub struct RequestedCandidate { } impl RequestedCandidate { - // TODO [now]: add peer to known set + /// Add a peer to the set of known peers. + pub fn add_peer(&mut self, peer: PeerId) { + if !self.known_by.contains(&peer) { + self.known_by.push_back(peer); + } + } + + /// Note that the candidate is required for the cluster. + pub fn note_required_by_cluster(&mut self) { + self.priority.origin = Origin::Cluster; + } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] From e9bd5865d3ad155fa33693a4b0d808f5c923f935 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 7 Oct 2022 15:17:09 -0500 Subject: [PATCH 049/220] begin integrating requester --- .../src/vstaging/mod.rs | 69 +++++++++++++++---- .../src/vstaging/requests.rs | 2 +- .../src/vstaging/statement_store.rs | 19 +++-- 3 files changed, 69 insertions(+), 21 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index f1ee709e4996..b51f4b2ddf41 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -50,6 +50,7 @@ use crate::{ }; use candidate_entry::CandidateEntry; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; +use requests::RequestManager; use statement_store::StatementStore; mod candidate_entry; @@ -112,6 +113,7 @@ pub(crate) struct State { keystore: SyncCryptoStorePtr, topology_storage: SessionGridTopologies, authorities: HashMap, + request_manager: RequestManager, } struct PeerState { @@ -367,6 +369,8 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { } }); + // TODO [now]: clean up requests + // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); state.per_session.retain(|s, _| sessions.contains(s)); @@ -442,16 +446,17 @@ pub(crate) async fn share_local_statement( }, }; - if !per_relay_parent.statement_store.insert(compact_statement.clone()) { - gum::warn!( - target: LOG_TARGET, - statement = ?compact_statement.payload(), - "Candidate backing issued redundant statement?", - ); - return Err(JfyiError::InvalidShare) + match per_relay_parent.statement_store.insert(compact_statement.clone()) { + Ok(false) | Err(_) => { + gum::warn!( + target: LOG_TARGET, + statement = ?compact_statement.payload(), + "Candidate backing issued redundant statement?", + ); + return Err(JfyiError::InvalidShare) + }, + Ok(true) => (compact_statement, candidate_hash), } - - (compact_statement, candidate_hash) }; // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, @@ -796,12 +801,46 @@ async fn handle_incoming_statement( checked_statement.payload().clone(), ); - if !per_relay_parent.statement_store.insert(checked_statement) { - return + let sender_index = checked_statement.validator_index(); + let candidate_hash = *checked_statement.payload().candidate_hash(); + let was_fresh = match per_relay_parent.statement_store.insert(checked_statement) { + Err(_) => { + // sanity: should never happen. + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?sender_index, + "Error -Cluster accepted message from unknown validator." + ); + + return + }, + Ok(known) => known, + }; + + let sender_group_index = per_relay_parent + .statement_store + .validator_group_index(sender_index) + .expect("validator confirmed to be known by statement_store.insert; qed"); + + // Insert an unconfirmed candidate entry if needed + let candidate_entry = per_relay_parent + .candidates + .entry(candidate_hash) + .or_insert_with(|| CandidateEntry::unconfirmed(candidate_hash)); + + // If the candidate is not confirmed, note that we should attempt + // to request it from the given peer. + if !candidate_entry.is_confirmed() { + let mut request_entry = + state + .request_manager + .get_or_insert(relay_parent, candidate_hash, sender_group_index); + request_entry.get_mut().add_peer(peer); + request_entry.get_mut().set_cluster_priority(); } - // TODO [now]: - // * add a candidate entry if we need to - // * issue requests for the candidate if we need to - // * import the statement into backing if we can. + if was_fresh { + // TODO [now]: import to backing if pre-reqs are in place. + } } diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 07e899a459f5..2214c9ba4b4a 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -87,7 +87,7 @@ impl RequestedCandidate { } /// Note that the candidate is required for the cluster. - pub fn note_required_by_cluster(&mut self) { + pub fn set_cluster_priority(&mut self) { self.priority.origin = Origin::Cluster; } } diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index b6336150f8c6..1d18d109696f 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -67,20 +67,26 @@ impl StatementStore { } } + /// Get the group index of a validator by index. If any statements by the validator + /// have been imported successfully, this is guaranteed to succeed. + pub fn validator_group_index(&self, validator: ValidatorIndex) -> Option { + self.validator_meta.get(&validator).map(|g| g.group) + } + /// Insert a statement. Returns `true` if was not known already, `false` if it was. - /// Ignores statements by unknown validators and returns `false`. - pub fn insert(&mut self, statement: SignedStatement) -> bool { + /// Ignores statements by unknown validators and returns an error. + pub fn insert(&mut self, statement: SignedStatement) -> Result { let validator_index = statement.validator_index(); let validator_meta = match self.validator_meta.get_mut(&validator_index) { - None => return false, + None => return Err(ValidatorUnknown), Some(m) => m, }; let compact = statement.payload().clone(); let fingerprint = (validator_index, compact.clone()); match self.known_statements.entry(fingerprint) { - HEntry::Occupied(_) => return false, + HEntry::Occupied(_) => return Ok(false), HEntry::Vacant(mut e) => { e.insert(statement); }, @@ -110,7 +116,7 @@ impl StatementStore { } } - true + Ok(true) } /// Get a bit-slice of validators in the group which have issued statements of the @@ -156,6 +162,9 @@ impl StatementStore { } } +/// Error indicating that the validator was unknown. +pub struct ValidatorUnknown; + type Fingerprint = (ValidatorIndex, CompactStatement); struct ValidatorMeta { From a23792458b024c33bcb9c756f007825590340eff Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 7 Oct 2022 16:55:30 -0500 Subject: [PATCH 050/220] start grid module --- .../src/vstaging/grid.rs | 44 +++++++++++++++++++ .../src/vstaging/mod.rs | 8 ++-- 2 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 node/network/statement-distribution/src/vstaging/grid.rs diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs new file mode 100644 index 000000000000..af6973d43160 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -0,0 +1,44 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Utilities for handling distribution of backed candidates along +//! the grid. + +use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex}; + +use std::collections::{HashMap, HashSet}; + +/// Our local view of a subset of the grid topology organized around a specific group. +/// +/// This tracks which authorities we expect to communicate with concerning +/// candidates from the group. This includes both the authorities we are +/// expected to send to as well as the authorities we expect to receive from. +/// +/// In the case that this group is the group that we are locally assigned to, +/// the 'receiving' side will be empty. +struct SubTopologyGroupLocalView { + sending: HashSet, + receiving: HashSet, +} + +/// A tracker of knowledge from authorities within the grid for a +/// specific relay-parent. +struct PerRelayParentGridTracker { + by_authority: HashMap<(AuthorityDiscoveryId, GroupIndex), Knowledge> +} + +struct Knowledge { +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index b51f4b2ddf41..59823eab5428 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -55,6 +55,7 @@ use statement_store::StatementStore; mod candidate_entry; mod cluster; +mod grid; mod requests; mod statement_store; @@ -463,8 +464,7 @@ pub(crate) async fn share_local_statement( // send a `Seconded` statement as well. send_statement_direct(ctx, state, relay_parent, local_group, compact_statement).await; - // TODO [now]: - // 4. If the candidate is now backed, trigger 'backed candidate announcement' logic. + // TODO [now]: send along grid if backed, send statement to backing if we can Ok(()) } @@ -801,6 +801,7 @@ async fn handle_incoming_statement( checked_statement.payload().clone(), ); + let statement = checked_statement.payload().clone(); let sender_index = checked_statement.validator_index(); let candidate_hash = *checked_statement.payload().candidate_hash(); let was_fresh = match per_relay_parent.statement_store.insert(checked_statement) { @@ -841,6 +842,7 @@ async fn handle_incoming_statement( } if was_fresh { - // TODO [now]: import to backing if pre-reqs are in place. + // both of the below probably in some shared function. + // TODO [now]: send along grid if backed, send statement to backing if we can } } From cfd25b9f6acccfd6b7caea3924951228f000e623 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 11 Oct 2022 16:27:35 -0500 Subject: [PATCH 051/220] tiny --- .../src/vstaging/grid.rs | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index af6973d43160..28df9b2d0500 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -34,6 +34,22 @@ struct SubTopologyGroupLocalView { receiving: HashSet, } +/// Our local view of the topology for a session, as it pertains to backed +/// candidate distribution. +struct TopologyView { + group_views: HashMap +} + +/// TODO [now]: build topology for the session. +/// For groups that we are part of: we receive from nobody and send to our X/Y peers. +/// For groups that we are not part of: we receive from any validator in the group we share a slice with. +/// and send to the corresponding X/Y slice. +/// For any validators we don't share a slice with, we receive from the nodes +/// which share a slice with them. +fn build_session_topology() -> TopologyView { + unimplemented!() +} + /// A tracker of knowledge from authorities within the grid for a /// specific relay-parent. struct PerRelayParentGridTracker { @@ -41,4 +57,11 @@ struct PerRelayParentGridTracker { } struct Knowledge { + // TODO [now] + // keep track of all the seconded statements they either have _claimed_ or + // have sent us. + // + // we need to do some spam protection here. similar to cluster - we will need + // to begrudgingly accept some overflow but we will need to ignore manifests + // which don't contain a `Seconded` statement from a validator under the limit. } From 55474044f9b95d696858f231e2a2b9710aaddf7e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 11 Oct 2022 21:45:26 -0500 Subject: [PATCH 052/220] refactor grid topology to expose more info to subsystems --- Cargo.lock | 1 + node/network/approval-distribution/src/lib.rs | 43 ++- .../approval-distribution/src/tests.rs | 95 +++++-- node/network/bitfield-distribution/Cargo.toml | 1 + node/network/bitfield-distribution/src/lib.rs | 33 ++- .../bitfield-distribution/src/tests.rs | 75 +++--- node/network/bridge/src/rx/mod.rs | 70 +++-- node/network/gossip-support/src/lib.rs | 78 ++---- node/network/gossip-support/src/tests.rs | 63 +++-- node/network/protocol/src/grid_topology.rs | 244 ++++++++++++++++-- .../src/legacy_v1/mod.rs | 36 ++- .../src/legacy_v1/tests.rs | 106 +++++--- .../src/vstaging/grid.rs | 4 +- .../src/vstaging/mod.rs | 8 +- node/subsystem-types/src/messages.rs | 19 +- .../src/messages/network_bridge_event.rs | 39 +-- .../implementers-guide/src/types/network.md | 20 +- .../src/types/overseer-protocol.md | 17 +- 18 files changed, 619 insertions(+), 333 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82ba1f23d278..015e6730fc3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6090,6 +6090,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "sp-application-crypto", + "sp-authority-discovery", "sp-core", "sp-keyring", "sp-keystore", diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index ca6212701f3e..35627c56767d 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -370,9 +370,13 @@ impl State { }) }, NetworkBridgeEvent::NewGossipTopology(topology) => { - let session = topology.session; - self.handle_new_session_topology(ctx, session, SessionGridTopology::from(topology)) - .await; + self.handle_new_session_topology( + ctx, + topology.session, + topology.topology, + topology.local_index, + ) + .await; }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { self.handle_peer_view_change(ctx, metrics, peer_id, view, rng).await; @@ -529,8 +533,14 @@ impl State { ctx: &mut Context, session: SessionIndex, topology: SessionGridTopology, + local_index: Option, ) { - self.topologies.insert_topology(session, topology); + if local_index.is_none() { + // this subsystem only matters to validators. + return + } + + self.topologies.insert_topology(session, topology, local_index); let topology = self.topologies.get_topology(session).expect("just inserted above; qed"); adjust_required_routing_and_propagate( @@ -541,7 +551,9 @@ impl State { |block_entry| block_entry.session == session, |required_routing, local, validator_index| { if *required_routing == RequiredRouting::PendingTopology { - *required_routing = topology.required_routing_by_index(*validator_index, local); + *required_routing = topology + .local_grid_neighbors() + .required_routing_by_index(*validator_index, local); } }, ) @@ -903,7 +915,7 @@ impl State { let local = source == MessageSource::Local; let required_routing = topology.map_or(RequiredRouting::PendingTopology, |t| { - t.required_routing_by_index(validator_index, local) + t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { @@ -944,7 +956,10 @@ impl State { return false } - if let Some(true) = topology.as_ref().map(|t| t.route_to_peer(required_routing, peer)) { + if let Some(true) = topology + .as_ref() + .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, peer)) + { return true } @@ -1233,7 +1248,8 @@ impl State { // the assignment to all aware peers in the required routing _except_ the original // source of the assignment. Hence the `in_topology_check`. // 3. Any randomly selected peers have been sent the assignment already. - let in_topology = topology.map_or(false, |t| t.route_to_peer(required_routing, peer)); + let in_topology = topology + .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; @@ -1383,9 +1399,9 @@ impl State { let required_routing = message_state.required_routing; let rng = &mut *rng; let mut peer_filter = move |peer_id| { - let in_topology = topology - .as_ref() - .map_or(false, |t| t.route_to_peer(required_routing, peer_id)); + let in_topology = topology.as_ref().map_or(false, |t| { + t.local_grid_neighbors().route_to_peer(required_routing, peer_id) + }); in_topology || { let route_random = random_routing.sample(total_peers, rng); if route_random { @@ -1645,7 +1661,10 @@ async fn adjust_required_routing_and_propagate network_bridge_event::NewGossipTopology { - let mut t = network_bridge_event::NewGossipTopology { - session, - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), + // This builds a grid topology which is a square matrix. + // The local validator occupies the top left-hand corner. + // The X peers occupy the same row and the Y peers occupy + // the same column. + + let local_index = 1; + + assert_eq!( + neighbors_x.len(), + neighbors_y.len(), + "mocking grid topology only implemented for squares", + ); + + let d = neighbors_x.len() + 1; + + let grid_size = d * d; + assert!(grid_size > 0); + assert!(all_peers.len() >= grid_size); + + let peer_info = |i: usize| TopologyPeerInfo { + peer_ids: vec![all_peers[i].0.clone()], + validator_index: ValidatorIndex::from(i as u32), + discovery_id: all_peers[i].1.clone(), }; - for &i in neighbors_x { - t.our_neighbors_x.insert( - all_peers[i].1.clone(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], - validator_index: ValidatorIndex::from(i as u32), - }, - ); + let mut canonical_shuffling: Vec<_> = (0..) + .filter(|i| local_index != *i) + .filter(|i| !neighbors_x.contains(i)) + .filter(|i| !neighbors_y.contains(i)) + .take(grid_size) + .map(peer_info) + .collect(); + + // filled with junk except for own. + let mut shuffled_indices = vec![d + 1; grid_size]; + shuffled_indices[local_index] = 0; + canonical_shuffling[0] = peer_info(local_index); + + for (x_pos, v) in neighbors_x.iter().enumerate() { + let pos = 1 + x_pos; + canonical_shuffling[pos] = peer_info(*v); } - for &i in neighbors_y { - t.our_neighbors_y.insert( - all_peers[i].1.clone(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], - validator_index: ValidatorIndex::from(i as u32), - }, - ); + for (y_pos, v) in neighbors_y.iter().enumerate() { + let pos = d * (1 + y_pos); + canonical_shuffling[pos] = peer_info(*v); + } + + let topology = SessionGridTopology::new(shuffled_indices, canonical_shuffling); + + // sanity check. + { + let g_n = topology + .compute_grid_neighbors_for(ValidatorIndex(local_index as _)) + .expect("topology just constructed with this validator index"); + + assert_eq!(g_n.validator_indices_x.len(), neighbors_x.len()); + assert_eq!(g_n.validator_indices_y.len(), neighbors_y.len()); + + for i in neighbors_x { + assert!(g_n.validator_indices_x.contains(&ValidatorIndex(*i as _))); + } + + for i in neighbors_y { + assert!(g_n.validator_indices_y.contains(&ValidatorIndex(*i as _))); + } } - t + network_bridge_event::NewGossipTopology { + session, + topology, + local_index: Some(ValidatorIndex(local_index as _)), + } } async fn setup_gossip_topology( diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index a77180c246b6..9f655cecf659 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -19,6 +19,7 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } maplit = "1.0.2" diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 56fe2727497a..8c34feed8eb1 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -28,7 +28,7 @@ use futures::{channel::oneshot, FutureExt}; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{ - RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology, + GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, @@ -368,7 +368,7 @@ async fn handle_bitfield_distribution( }; let msg = BitfieldGossipMessage { relay_parent, signed_availability }; - let topology = state.topologies.get_topology_or_fallback(session_idx); + let topology = state.topologies.get_topology_or_fallback(session_idx).local_grid_neighbors(); let required_routing = topology.required_routing_by_index(validator_index, true); relay_message( @@ -393,7 +393,7 @@ async fn handle_bitfield_distribution( async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, - topology: &SessionGridTopology, + topology_neighbors: &GridNeighbors, peers: &mut HashMap, validator: ValidatorId, message: BitfieldGossipMessage, @@ -425,7 +425,7 @@ async fn relay_message( let message_needed = job_data.message_from_validator_needed_by_peer(&peer, &validator); if message_needed { - let in_topology = topology.route_to_peer(required_routing, &peer); + let in_topology = topology_neighbors.route_to_peer(required_routing, &peer); let need_routing = in_topology || { let route_random = random_routing.sample(total_peers, rng); if route_random { @@ -608,7 +608,8 @@ async fn process_incoming_peer_message( let topology = state .topologies - .get_topology_or_fallback(job_data.signing_context.session_index); + .get_topology_or_fallback(job_data.signing_context.session_index) + .local_grid_neighbors(); let required_routing = topology.required_routing_by_index(validator_index, false); metrics.on_bitfield_received(); @@ -657,14 +658,24 @@ async fn handle_network_msg( }, NetworkBridgeEvent::NewGossipTopology(gossip_topology) => { let session_index = gossip_topology.session; - let new_topology = SessionGridTopology::from(gossip_topology); - let newly_added = new_topology.peers_diff(&new_topology); - state.topologies.update_topology(session_index, new_topology); + let new_topology = gossip_topology.topology; + let prev_neighbors = + state.topologies.get_current_topology().local_grid_neighbors().clone(); + + state.topologies.update_topology( + session_index, + new_topology, + gossip_topology.local_index, + ); + let current_topology = state.topologies.get_current_topology(); + + let newly_added = current_topology.local_grid_neighbors().peers_diff(&prev_neighbors); + gum::debug!( target: LOG_TARGET, ?session_index, - "New gossip topology received {} unseen peers", - newly_added.len() + newly_added_peers = ?newly_added.len(), + "New gossip topology received", ); for new_peer in newly_added { @@ -748,7 +759,7 @@ async fn handle_peer_view_change( let added = peer_data.view.replace_difference(view).cloned().collect::>(); - let topology = state.topologies.get_current_topology(); + let topology = state.topologies.get_current_topology().local_grid_neighbors(); let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &origin); let lucky = is_gossip_peer || util::gen_ratio_rng( diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index bf58d9363a64..f8341dc9f5ed 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -20,8 +20,10 @@ use bitvec::bitvec; use futures::executor; use maplit::hashmap; use polkadot_node_network_protocol::{ - grid_topology::SessionBoundGridTopologyStorage, our_view, peer_set::ValidationVersion, view, - ObservedRole, + grid_topology::{SessionBoundGridTopologyStorage, SessionGridTopology, TopologyPeerInfo}, + our_view, + peer_set::ValidationVersion, + view, ObservedRole, }; use polkadot_node_subsystem::{ jaeger, @@ -32,6 +34,7 @@ use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{AvailabilityBitfield, Signed, ValidatorIndex}; use rand_chacha::ChaCha12Rng; use sp_application_crypto::AppKey; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::Pair as PairT; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; @@ -65,10 +68,11 @@ fn prewarmed_state( peers: Vec, ) -> ProtocolState { let relay_parent = known_message.relay_parent.clone(); - let mut topology: SessionGridTopology = Default::default(); - topology.peers_x = peers.iter().cloned().collect(); let mut topologies = SessionBoundGridTopologyStorage::default(); - topologies.update_topology(0_u32, topology); + topologies.update_topology(0_u32, SessionGridTopology::new(Vec::new(), Vec::new()), None); + topologies.get_current_topology_mut().local_grid_neighbors_mut().peers_x = + peers.iter().cloned().collect(); + ProtocolState { per_relay_parent: hashmap! { relay_parent.clone() => @@ -482,10 +486,9 @@ fn do_not_relay_message_twice() { let mut rng = dummy_rng(); executor::block_on(async move { - let gossip_peers = SessionGridTopology { - peers_x: HashSet::from_iter(vec![peer_a.clone(), peer_b.clone()].into_iter()), - ..Default::default() - }; + let mut gossip_peers = GridNeighbors::empty(); + gossip_peers.peers_x = HashSet::from_iter(vec![peer_a.clone(), peer_b.clone()].into_iter()); + relay_message( &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), @@ -821,33 +824,43 @@ fn topology_test() { .try_init(); let hash: Hash = [0; 32].into(); - let peers_x = (0..25).map(|_| PeerId::random()).collect::>(); - let peers_y = (0..25).map(|_| PeerId::random()).collect::>(); - - // ensure all unique - assert_eq!( - peers_x.iter().chain(peers_y.iter()).collect::>().len(), - peers_x.len() + peers_y.len() - ); // validator 0 key pair let (mut state, signing_context, keystore, validator) = state_with_view(our_view![hash], hash); - // Create a simple grid - let mut topology: SessionGridTopology = Default::default(); - topology.peers_x = peers_x.iter().cloned().collect::>(); - topology.validator_indices_x = peers_x + // Create a simple grid without any shuffling. We occupy position 1. + let topology_peer_info: Vec<_> = (0..49) + .map(|i| TopologyPeerInfo { + peer_ids: vec![PeerId::random()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(); + + let topology = SessionGridTopology::new((0usize..49).collect(), topology_peer_info.clone()); + state.topologies.update_topology(0_u32, topology, Some(ValidatorIndex(1))); + + let peers_x: Vec<_> = [0, 2, 3, 4, 5, 6] .iter() - .enumerate() - .map(|(idx, _)| ValidatorIndex(idx as u32)) - .collect::>(); - topology.peers_y = peers_y.iter().cloned().collect::>(); - topology.validator_indices_y = peers_y + .cloned() + .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .collect(); + + let peers_y: Vec<_> = [8, 15, 22, 29, 36, 43] .iter() - .enumerate() - .map(|(idx, _)| ValidatorIndex((idx + peers_x.len()) as u32)) - .collect::>(); - state.topologies.update_topology(0_u32, topology); + .cloned() + .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .collect(); + + { + let t = state.topologies.get_current_topology().local_grid_neighbors(); + for p_x in &peers_x { + assert!(t.peers_x.contains(p_x)); + } + for p_y in &peers_y { + assert!(t.peers_y.contains(p_y)); + } + } // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -904,7 +917,7 @@ fn topology_test() { AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), ) => { - let topology = state.topologies.get_current_topology(); + let topology = state.topologies.get_current_topology().local_grid_neighbors(); // It should send message to all peers in y direction and to 4 random peers in x direction assert_eq!(peers_y.len() + 4, peers.len()); assert!(topology.peers_y.iter().all(|peer| peers.contains(&peer))); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 1a5bf97e73d0..c462d8087368 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -27,6 +27,7 @@ use sp_consensus::SyncOracle; use polkadot_node_network_protocol::{ self as net_protocol, + grid_topology::{SessionGridTopology, TopologyPeerInfo}, peer_set::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, @@ -38,10 +39,9 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem::{ errors::SubsystemError, messages::{ - network_bridge_event::{NewGossipTopology, TopologyPeerInfo}, - ApprovalDistributionMessage, BitfieldDistributionMessage, CollatorProtocolMessage, - GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeRxMessage, - StatementDistributionMessage, + network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, + BitfieldDistributionMessage, CollatorProtocolMessage, GossipSupportMessage, + NetworkBridgeEvent, NetworkBridgeRxMessage, StatementDistributionMessage, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, }; @@ -130,28 +130,6 @@ where } } -async fn update_gossip_peers_1d( - ads: &mut AD, - neighbors: N, -) -> HashMap -where - AD: validator_discovery::AuthorityDiscovery, - N: IntoIterator, - N::IntoIter: std::iter::ExactSizeIterator, -{ - let neighbors = neighbors.into_iter(); - let mut peers = HashMap::with_capacity(neighbors.len()); - for (authority, validator_index) in neighbors { - let addr = get_peer_id_by_authority_id(ads, authority.clone()).await; - - if let Some(peer_id) = addr { - peers.insert(authority, TopologyPeerInfo { peer_ids: vec![peer_id], validator_index }); - } - } - - peers -} - async fn handle_network_messages( mut sender: impl overseer::NetworkBridgeRxSenderTrait, mut network_service: impl Network, @@ -560,6 +538,25 @@ where } } +async fn flesh_out_topology_peers(ads: &mut AD, neighbors: N) -> Vec +where + AD: validator_discovery::AuthorityDiscovery, + N: IntoIterator, + N::IntoIter: std::iter::ExactSizeIterator, +{ + let neighbors = neighbors.into_iter(); + let mut peers = Vec::with_capacity(neighbors.len()); + for (discovery_id, validator_index) in neighbors { + let addr = get_peer_id_by_authority_id(ads, discovery_id.clone()).await; + + if let Some(peer_id) = addr { + peers.push(TopologyPeerInfo { peer_ids: vec![peer_id], validator_index, discovery_id }); + } + } + + peers +} + #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] async fn run_incoming_orchestra_signals( mut ctx: Context, @@ -585,29 +582,28 @@ where msg: NetworkBridgeRxMessage::NewGossipTopology { session, - our_neighbors_x, - our_neighbors_y, + local_index, + canonical_shuffling, + shuffled_indices, }, } => { gum::debug!( target: LOG_TARGET, action = "NewGossipTopology", - neighbors_x = our_neighbors_x.len(), - neighbors_y = our_neighbors_y.len(), + ?session, + ?local_index, "Gossip topology has changed", ); - let gossip_peers_x = - update_gossip_peers_1d(&mut authority_discovery_service, our_neighbors_x).await; - - let gossip_peers_y = - update_gossip_peers_1d(&mut authority_discovery_service, our_neighbors_y).await; + let topology_peers = + flesh_out_topology_peers(&mut authority_discovery_service, canonical_shuffling) + .await; dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::NewGossipTopology(NewGossipTopology { session, - our_neighbors_x: gossip_peers_x, - our_neighbors_y: gossip_peers_y, + topology: SessionGridTopology::new(shuffled_indices, topology_peers), + local_index, }), ctx.sender(), ); diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 28c4abaf16ee..073bea620d0b 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -529,73 +529,37 @@ async fn update_gossip_topology( sp_core::blake2_256(&subject) }; - // shuffle the indices - let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed); - let len = authorities.len(); - let mut indices: Vec = (0..len).collect(); - indices.shuffle(&mut rng); - let our_shuffled_position = indices - .iter() - .position(|i| *i == our_index) - .expect("our_index < len; indices contains it; qed"); - - let neighbors = matrix_neighbors(our_shuffled_position, len); - let row_neighbors = neighbors - .row_neighbors - .map(|i| indices[i]) - .map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32))) - .collect(); - - let column_neighbors = neighbors - .column_neighbors - .map(|i| indices[i]) - .map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32))) - .collect(); + // shuffle the validators and create the index mapping + let (shuffled_indices, canonical_shuffling) = { + let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed); + let len = authorities.len(); + let mut shuffled_indices = vec![0; len]; + let mut canonical_shuffling: Vec<_> = authorities + .iter() + .enumerate() + .map(|(i, a)| (a.clone(), ValidatorIndex(i as _))) + .collect(); + + canonical_shuffling.shuffle(&mut rng); + for (i, (_, validator_index)) in canonical_shuffling.iter().enumerate() { + shuffled_indices[validator_index.0 as usize] = i; + } + + (shuffled_indices, canonical_shuffling) + }; sender .send_message(NetworkBridgeRxMessage::NewGossipTopology { session: session_index, - our_neighbors_x: row_neighbors, - our_neighbors_y: column_neighbors, + local_index: Some(ValidatorIndex(our_index as _)), + canonical_shuffling, + shuffled_indices, }) .await; Ok(()) } -struct MatrixNeighbors { - row_neighbors: R, - column_neighbors: C, -} - -/// Compute our row and column neighbors in a matrix -fn matrix_neighbors( - our_index: usize, - len: usize, -) -> MatrixNeighbors, impl Iterator> { - assert!(our_index < len, "our_index is computed using `enumerate`; qed"); - - // e.g. for size 11 the matrix would be - // - // 0 1 2 - // 3 4 5 - // 6 7 8 - // 9 10 - // - // and for index 10, the neighbors would be 1, 4, 7, 9 - - let sqrt = (len as f64).sqrt() as usize; - let our_row = our_index / sqrt; - let our_column = our_index % sqrt; - let row_neighbors = our_row * sqrt..std::cmp::min(our_row * sqrt + sqrt, len); - let column_neighbors = (our_column..len).step_by(sqrt); - - MatrixNeighbors { - row_neighbors: row_neighbors.filter(move |i| *i != our_index), - column_neighbors: column_neighbors.filter(move |i| *i != our_index), - } -} - #[overseer::subsystem(GossipSupport, error = SubsystemError, prefix = self::overseer)] impl GossipSupport where diff --git a/node/network/gossip-support/src/tests.rs b/node/network/gossip-support/src/tests.rs index cde47e2ba977..79f2a9a6db42 100644 --- a/node/network/gossip-support/src/tests.rs +++ b/node/network/gossip-support/src/tests.rs @@ -29,6 +29,7 @@ use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch use sp_core::crypto::Pair as PairT; use sp_keyring::Sr25519Keyring; +use polkadot_node_network_protocol::grid_topology::{SessionGridTopology, TopologyPeerInfo}; use polkadot_node_subsystem::{ jaeger, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, @@ -73,13 +74,15 @@ lazy_static! { // [1 3] // [0 ] - static ref ROW_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![ - (Sr25519Keyring::Charlie.public().into(), ValidatorIndex::from(2)), + static ref EXPECTED_SHUFFLING: Vec = vec![6, 4, 0, 5, 2, 3, 1]; + + static ref ROW_NEIGHBORS: Vec = vec![ + ValidatorIndex::from(2), ]; - static ref COLUMN_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![ - (Sr25519Keyring::Two.public().into(), ValidatorIndex::from(5)), - (Sr25519Keyring::Eve.public().into(), ValidatorIndex::from(3)), + static ref COLUMN_NEIGHBORS: Vec = vec![ + ValidatorIndex::from(3), + ValidatorIndex::from(5), ]; } @@ -257,12 +260,31 @@ async fn test_neighbors(overseer: &mut VirtualOverseer, expected_session: Sessio overseer_recv(overseer).await, AllMessages::NetworkBridgeRx(NetworkBridgeRxMessage::NewGossipTopology { session: got_session, - our_neighbors_x, - our_neighbors_y, + local_index, + canonical_shuffling, + shuffled_indices, }) => { assert_eq!(expected_session, got_session); - let mut got_row: Vec<_> = our_neighbors_x.into_iter().collect(); - let mut got_column: Vec<_> = our_neighbors_y.into_iter().collect(); + assert_eq!(local_index, Some(ValidatorIndex(6))); + assert_eq!(shuffled_indices, EXPECTED_SHUFFLING.clone()); + + let grid_topology = SessionGridTopology::new( + shuffled_indices, + canonical_shuffling.into_iter() + .map(|(a, v)| TopologyPeerInfo { + validator_index: v, + discovery_id: a, + peer_ids: Vec::new(), + }) + .collect(), + ); + + let grid_neighbors = grid_topology + .compute_grid_neighbors_for(local_index.unwrap()) + .unwrap(); + + let mut got_row: Vec<_> = grid_neighbors.validator_indices_x.into_iter().collect(); + let mut got_column: Vec<_> = grid_neighbors.validator_indices_y.into_iter().collect(); got_row.sort(); got_column.sort(); assert_eq!(got_row, ROW_NEIGHBORS.clone()); @@ -694,26 +716,3 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() { assert_eq!(state.last_session_index, Some(1)); assert!(state.last_failure.is_none()); } - -#[test] -fn test_matrix_neighbors() { - for (our_index, len, expected_row, expected_column) in vec![ - (0usize, 1usize, vec![], vec![]), - (1, 2, vec![], vec![0usize]), - (0, 9, vec![1, 2], vec![3, 6]), - (9, 10, vec![], vec![0, 3, 6]), - (10, 11, vec![9], vec![1, 4, 7]), - (7, 11, vec![6, 8], vec![1, 4, 10]), - ] - .into_iter() - { - let matrix = matrix_neighbors(our_index, len); - let mut row_result: Vec<_> = matrix.row_neighbors.collect(); - let mut column_result: Vec<_> = matrix.column_neighbors.collect(); - row_result.sort(); - column_result.sort(); - - assert_eq!(row_result, expected_row); - assert_eq!(column_result, expected_column); - } -} diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 73de9cfc25b1..609d5e2ba630 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -30,7 +30,7 @@ //! use crate::PeerId; -use polkadot_primitives::v2::{SessionIndex, ValidatorIndex}; +use polkadot_primitives::v2::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; use rand::{CryptoRng, Rng}; use std::{ collections::{hash_map, HashMap, HashSet}, @@ -48,9 +48,102 @@ pub const DEFAULT_RANDOM_SAMPLE_RATE: usize = crate::MIN_GOSSIP_PEERS; /// The number of peers to randomly propagate messages to. pub const DEFAULT_RANDOM_CIRCULATION: usize = 4; -/// Topology representation -#[derive(Default, Clone, Debug)] +/// Information about a peer in the gossip topology for a session. +#[derive(Debug, Clone, PartialEq)] +pub struct TopologyPeerInfo { + /// The validator's known peer IDs. + pub peer_ids: Vec, + /// The index of the validator in the discovery keys of the corresponding + /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. + pub validator_index: ValidatorIndex, + /// The authority discovery public key of the validator in the corresponding + /// `SessionInfo`. + pub discovery_id: AuthorityDiscoveryId, +} + +/// Topology representation for a session. +#[derive(Default, Clone, Debug, PartialEq)] pub struct SessionGridTopology { + /// An array mapping validator indices to their indices in the + /// shuffling itself. This has the same size as the number of validators + /// in the session. + shuffled_indices: Vec, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec, +} + +impl SessionGridTopology { + /// Create a new session grid topology. + pub fn new(shuffled_indices: Vec, canonical_shuffling: Vec) -> Self { + SessionGridTopology { shuffled_indices, canonical_shuffling } + } + + /// Produces the outgoing routing logic for a particular peer. + /// + /// This fails if the validator index is out of bounds. + pub fn compute_grid_neighbors_for(&self, v: ValidatorIndex) -> Option { + let shuffled_val_index = *self.shuffled_indices.get(v.0 as usize)?; + let neighbors = matrix_neighbors(shuffled_val_index, self.shuffled_indices.len())?; + + let mut grid_subset = GridNeighbors::empty(); + for r_n in neighbors.row_neighbors { + let n = &self.canonical_shuffling[r_n]; + grid_subset.validator_indices_x.insert(n.validator_index); + for p in &n.peer_ids { + grid_subset.peers_x.insert(p.clone()); + } + } + + for c_n in neighbors.column_neighbors { + let n = &self.canonical_shuffling[c_n]; + grid_subset.validator_indices_y.insert(n.validator_index); + for p in &n.peer_ids { + grid_subset.peers_y.insert(p.clone()); + } + } + + Some(grid_subset) + } +} + +struct MatrixNeighbors { + row_neighbors: R, + column_neighbors: C, +} + +/// Compute our row and column neighbors in a matrix +fn matrix_neighbors( + val_index: usize, + len: usize, +) -> Option, impl Iterator>> { + if val_index >= len { + return None + } + + // e.g. for size 11 the matrix would be + // + // 0 1 2 + // 3 4 5 + // 6 7 8 + // 9 10 + // + // and for index 10, the neighbors would be 1, 4, 7, 9 + + let sqrt = (len as f64).sqrt() as usize; + let our_row = val_index / sqrt; + let our_column = val_index % sqrt; + let row_neighbors = our_row * sqrt..std::cmp::min(our_row * sqrt + sqrt, len); + let column_neighbors = (our_column..len).step_by(sqrt); + + Some(MatrixNeighbors { + row_neighbors: row_neighbors.filter(move |i| *i != val_index), + column_neighbors: column_neighbors.filter(move |i| *i != val_index), + }) +} + +/// Information about the grid neighbors for a particular node in the topology. +#[derive(Debug, Clone, PartialEq)] +pub struct GridNeighbors { /// Represent peers in the X axis pub peers_x: HashSet, /// Represent validators in the X axis @@ -61,7 +154,18 @@ pub struct SessionGridTopology { pub validator_indices_y: HashSet, } -impl SessionGridTopology { +impl GridNeighbors { + /// Utility function for creating an empty set of grid neighbors. + /// Useful for testing. + pub fn empty() -> Self { + GridNeighbors { + peers_x: HashSet::new(), + validator_indices_x: HashSet::new(), + peers_y: HashSet::new(), + validator_indices_y: HashSet::new(), + } + } + /// Given the originator of a message as a validator index, indicates the part of the topology /// we're meant to send the message to. pub fn required_routing_by_index( @@ -123,7 +227,7 @@ impl SessionGridTopology { } /// Returns the difference between this and the `other` topology as a vector of peers - pub fn peers_diff(&self, other: &SessionGridTopology) -> Vec { + pub fn peers_diff(&self, other: &Self) -> Vec { self.peers_x .iter() .chain(self.peers_y.iter()) @@ -138,15 +242,39 @@ impl SessionGridTopology { } } +/// An entry tracking a session grid topology and some memoized local neighbors. +#[derive(Debug)] +pub struct SessionGridTopologyEntry { + topology: SessionGridTopology, + local_neighbors: GridNeighbors, +} + +impl SessionGridTopologyEntry { + /// Access the local grid neighbors. + pub fn local_grid_neighbors(&self) -> &GridNeighbors { + &self.local_neighbors + } + + /// Access the local grid neighbors mutably. + pub fn local_grid_neighbors_mut(&mut self) -> &mut GridNeighbors { + &mut self.local_neighbors + } + + /// Access the underlying topology. + pub fn get(&self) -> &SessionGridTopology { + &self.topology + } +} + /// A set of topologies indexed by session #[derive(Default)] pub struct SessionGridTopologies { - inner: HashMap, usize)>, + inner: HashMap, usize)>, } impl SessionGridTopologies { /// Returns a topology for the specific session index - pub fn get_topology(&self, session: SessionIndex) -> Option<&SessionGridTopology> { + pub fn get_topology(&self, session: SessionIndex) -> Option<&SessionGridTopologyEntry> { self.inner.get(&session).and_then(|val| val.0.as_ref()) } @@ -166,63 +294,112 @@ impl SessionGridTopologies { } /// Insert a new topology, no-op if already present. - pub fn insert_topology(&mut self, session: SessionIndex, topology: SessionGridTopology) { + pub fn insert_topology( + &mut self, + session: SessionIndex, + topology: SessionGridTopology, + local_index: Option, + ) { let entry = self.inner.entry(session).or_insert((None, 0)); if entry.0.is_none() { - entry.0 = Some(topology); + let local_neighbors = local_index + .and_then(|l| topology.compute_grid_neighbors_for(l)) + .unwrap_or_else(GridNeighbors::empty); + + entry.0 = Some(SessionGridTopologyEntry { topology, local_neighbors }); } } } /// A simple storage for a topology and the corresponding session index -#[derive(Default, Debug)] -pub struct GridTopologySessionBound { - topology: SessionGridTopology, +#[derive(Debug)] +struct GridTopologySessionBound { + entry: SessionGridTopologyEntry, session_index: SessionIndex, } /// A storage for the current and maybe previous topology -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SessionBoundGridTopologyStorage { current_topology: GridTopologySessionBound, prev_topology: Option, } +impl Default for SessionBoundGridTopologyStorage { + fn default() -> Self { + // having this struct be `Default` is objectively stupid + // but used in a few places + SessionBoundGridTopologyStorage { + current_topology: GridTopologySessionBound { + // session 0 is valid so we should use the upper bound + // as the default instead of the lower bound. + session_index: SessionIndex::max_value(), + entry: SessionGridTopologyEntry { + topology: SessionGridTopology { + shuffled_indices: Vec::new(), + canonical_shuffling: Vec::new(), + }, + local_neighbors: GridNeighbors::empty(), + }, + }, + prev_topology: None, + } + } +} + impl SessionBoundGridTopologyStorage { /// Return a grid topology based on the session index: /// If we need a previous session and it is registered in the storage, then return that session. /// Otherwise, return a current session to have some grid topology in any case - pub fn get_topology_or_fallback(&self, idx: SessionIndex) -> &SessionGridTopology { - self.get_topology(idx).unwrap_or(&self.current_topology.topology) + pub fn get_topology_or_fallback(&self, idx: SessionIndex) -> &SessionGridTopologyEntry { + self.get_topology(idx).unwrap_or(&self.current_topology.entry) } /// Return the grid topology for the specific session index, if no such a session is stored /// returns `None`. - pub fn get_topology(&self, idx: SessionIndex) -> Option<&SessionGridTopology> { + pub fn get_topology(&self, idx: SessionIndex) -> Option<&SessionGridTopologyEntry> { if let Some(prev_topology) = &self.prev_topology { if idx == prev_topology.session_index { - return Some(&prev_topology.topology) + return Some(&prev_topology.entry) } } if self.current_topology.session_index == idx { - return Some(&self.current_topology.topology) + return Some(&self.current_topology.entry) } None } /// Update the current topology preserving the previous one - pub fn update_topology(&mut self, session_index: SessionIndex, topology: SessionGridTopology) { + pub fn update_topology( + &mut self, + session_index: SessionIndex, + topology: SessionGridTopology, + local_index: Option, + ) { + let local_neighbors = local_index + .and_then(|l| topology.compute_grid_neighbors_for(l)) + .unwrap_or_else(GridNeighbors::empty); + let old_current = std::mem::replace( &mut self.current_topology, - GridTopologySessionBound { topology, session_index }, + GridTopologySessionBound { + entry: SessionGridTopologyEntry { topology, local_neighbors }, + session_index, + }, ); self.prev_topology.replace(old_current); } /// Returns a current grid topology - pub fn get_current_topology(&self) -> &SessionGridTopology { - &self.current_topology.topology + pub fn get_current_topology(&self) -> &SessionGridTopologyEntry { + &self.current_topology.entry + } + + /// Access the current grid topology mutably. Dangerous and intended + /// to be used in tests. + pub fn get_current_topology_mut(&mut self) -> &mut SessionGridTopologyEntry { + &mut self.current_topology.entry } } @@ -365,4 +542,27 @@ mod tests { let mut random_routing = RandomRouting { target: 10, sent: 0, sample_rate: 10 }; assert_eq!(run_random_routing(&mut random_routing, &mut rng, 10, 100), 10); } + + #[test] + fn test_matrix_neighbors() { + for (our_index, len, expected_row, expected_column) in vec![ + (0usize, 1usize, vec![], vec![]), + (1, 2, vec![], vec![0usize]), + (0, 9, vec![1, 2], vec![3, 6]), + (9, 10, vec![], vec![0, 3, 6]), + (10, 11, vec![9], vec![1, 4, 7]), + (7, 11, vec![6, 8], vec![1, 4, 10]), + ] + .into_iter() + { + let matrix = matrix_neighbors(our_index, len); + let mut row_result: Vec<_> = matrix.row_neighbors.collect(); + let mut column_result: Vec<_> = matrix.column_neighbors.collect(); + row_result.sort(); + column_result.sort(); + + assert_eq!(row_result, expected_row); + assert_eq!(column_result, expected_column); + } + } } diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 104716213cee..a502a62a4eb2 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -18,7 +18,9 @@ use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, + grid_topology::{ + GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology, + }, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, @@ -904,7 +906,9 @@ async fn circulate_statement_and_dependents( .with_candidate(statement.payload().candidate_hash()) .with_stage(jaeger::Stage::StatementDistribution); - let topology = topology_store.get_topology_or_fallback(active_head.session_index); + let topology = topology_store + .get_topology_or_fallback(active_head.session_index) + .local_grid_neighbors(); // First circulate the statement directly to all peers needing it. // The borrow of `active_head` needs to encompass only this (Rust) statement. let outputs: Option<(CandidateHash, Vec)> = { @@ -1001,7 +1005,7 @@ fn is_statement_large(statement: &SignedFullStatement) -> (bool, Option) #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn circulate_statement<'a, Context>( required_routing: RequiredRouting, - topology: &SessionGridTopology, + topology: &GridNeighbors, peers: &mut HashMap, ctx: &mut Context, relay_parent: Hash, @@ -1037,6 +1041,7 @@ async fn circulate_statement<'a, Context>( rng, MIN_GOSSIP_PEERS, ); + // We don't want to use less peers, than we would without any priority peers: let min_size = std::cmp::max(peers_to_send.len(), MIN_GOSSIP_PEERS); // Make set full: @@ -1380,7 +1385,8 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await; let topology = match session_index { - Ok(session_index) => topology_storage.get_topology_or_fallback(session_index), + Ok(session_index) => + topology_storage.get_topology_or_fallback(session_index).local_grid_neighbors(), Err(e) => { gum::debug!( target: LOG_TARGET, @@ -1389,7 +1395,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( e ); - topology_storage.get_current_topology() + topology_storage.get_current_topology().local_grid_neighbors() }, }; let required_routing = @@ -1675,7 +1681,7 @@ async fn handle_incoming_message<'a, Context>( #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn update_peer_view_and_maybe_send_unlocked( peer: PeerId, - topology: &SessionGridTopology, + topology: &GridNeighbors, peer_data: &mut PeerData, ctx: &mut Context, active_heads: &HashMap, @@ -1778,16 +1784,22 @@ pub(crate) async fn handle_network_update( let _ = metrics.time_network_bridge_update("new_gossip_topology"); let new_session_index = topology.session; - let new_topology: SessionGridTopology = topology.into(); - let old_topology = topology_storage.get_current_topology(); - let newly_added = new_topology.peers_diff(old_topology); - topology_storage.update_topology(new_session_index, new_topology); + let new_topology = topology.topology; + let old_topology = + topology_storage.get_current_topology().local_grid_neighbors().clone(); + topology_storage.update_topology(new_session_index, new_topology, topology.local_index); + + let newly_added = topology_storage + .get_current_topology() + .local_grid_neighbors() + .peers_diff(&old_topology); + for peer in newly_added { if let Some(data) = peers.get_mut(&peer) { let view = std::mem::take(&mut data.view); update_peer_view_and_maybe_send_unlocked( peer, - topology_storage.get_current_topology(), + topology_storage.get_current_topology().local_grid_neighbors(), data, ctx, &*active_heads, @@ -1822,7 +1834,7 @@ pub(crate) async fn handle_network_update( Some(data) => update_peer_view_and_maybe_send_unlocked( peer, - topology_storage.get_current_topology(), + topology_storage.get_current_topology().local_grid_neighbors(), data, ctx, &*active_heads, diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index f3db07757e06..66888d57b549 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -22,6 +22,7 @@ use futures::executor::{self, block_on}; use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ + grid_topology::TopologyPeerInfo, peer_set::ValidationVersion, request_response::{ v1::{StatementFetchingRequest, StatementFetchingResponse}, @@ -534,7 +535,7 @@ fn peer_view_update_sends_messages() { let peer = PeerId::random(); executor::block_on(async move { - let mut topology: SessionGridTopology = Default::default(); + let mut topology = GridNeighbors::empty(); topology.peers_x = HashSet::from_iter(vec![peer.clone()].into_iter()); update_peer_view_and_maybe_send_unlocked( peer.clone(), @@ -666,7 +667,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { }; let statement = StoredStatement { comparator: &comparator, statement: &statement }; - let mut topology: SessionGridTopology = Default::default(); + let mut topology = GridNeighbors::empty(); topology.peers_x = HashSet::from_iter(vec![peer_a.clone(), peer_b.clone(), peer_c.clone()].into_iter()); let needs_dependents = circulate_statement( @@ -2089,42 +2090,77 @@ fn handle_multiple_seconded_statements() { .await; } - // Explicitly add all `lucky` peers to the gossip peers to ensure that neither `peerA` not `peerB` - // receive statements + // Set up a topology which puts peers a & b in a column together. let gossip_topology = { - let mut t = network_bridge_event::NewGossipTopology { - session: 1, - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), - }; - - // Create a topology to ensure that we send messages not to `peer_a`/`peer_b` - for (i, peer) in lucky_peers.iter().enumerate() { - let authority_id = AuthorityPair::generate().0.public(); - t.our_neighbors_y.insert( - authority_id, - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer.clone()], - validator_index: (i as u32 + 2_u32).into(), - }, - ); + // create a lucky_peers+1 * lucky_peers+1 grid topology where we are at index 2, sharing + // a row with peer_a (0) and peer_b (1) and a column with all the lucky peers. + // the rest is filled with junk. + // This is an absolute garbage hack depending on quirks of the implementation + // and not on sound architecture. + + let n_lucky = lucky_peers.len(); + let dim = n_lucky + 1; + let grid_size = dim * dim; + let topology_peer_info: Vec<_> = (0..grid_size) + .map(|i| { + if i == 0 { + TopologyPeerInfo { + peer_ids: vec![peer_a.clone()], + validator_index: ValidatorIndex(0), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if i == 1 { + TopologyPeerInfo { + peer_ids: vec![peer_b.clone()], + validator_index: ValidatorIndex(1), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if i == 2 { + TopologyPeerInfo { + peer_ids: vec![], + validator_index: ValidatorIndex(2), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if (i - 2) % dim == 0 { + let lucky_index = ((i - 2) / dim) - 1; + TopologyPeerInfo { + peer_ids: vec![lucky_peers[lucky_index].clone()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityPair::generate().0.public(), + } + } else { + TopologyPeerInfo { + peer_ids: vec![PeerId::random()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityPair::generate().0.public(), + } + } + }) + .collect(); + + // also a hack: this is only required to be accurate for + // the validator indices we compute grid neighbors for. + let mut shuffled_indices = vec![0; grid_size]; + shuffled_indices[2] = 2; + + // Some sanity checking to make sure this hack is set up correctly. + let topology = SessionGridTopology::new(shuffled_indices, topology_peer_info); + let grid_neighbors = topology.compute_grid_neighbors_for(ValidatorIndex(2)).unwrap(); + assert_eq!(grid_neighbors.peers_x.len(), 25); + assert!(grid_neighbors.peers_x.contains(&peer_a)); + assert!(grid_neighbors.peers_x.contains(&peer_b)); + assert!(!grid_neighbors.peers_y.contains(&peer_b)); + assert!(!grid_neighbors.route_to_peer(RequiredRouting::GridY, &peer_b)); + assert_eq!(grid_neighbors.peers_y.len(), lucky_peers.len()); + for lucky in &lucky_peers { + assert!(grid_neighbors.peers_y.contains(lucky)); } - t.our_neighbors_x.insert( - AuthorityPair::generate().0.public(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer_a.clone()], - validator_index: 0_u32.into(), - }, - ); - t.our_neighbors_x.insert( - AuthorityPair::generate().0.public(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer_b.clone()], - validator_index: 1_u32.into(), - }, - ); - t + network_bridge_event::NewGossipTopology { + session: 1, + topology, + local_index: Some(ValidatorIndex(2)), + } }; handle diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 28df9b2d0500..aa53219268d5 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -37,7 +37,7 @@ struct SubTopologyGroupLocalView { /// Our local view of the topology for a session, as it pertains to backed /// candidate distribution. struct TopologyView { - group_views: HashMap + group_views: HashMap, } /// TODO [now]: build topology for the session. @@ -53,7 +53,7 @@ fn build_session_topology() -> TopologyView { /// A tracker of knowledge from authorities within the grid for a /// specific relay-parent. struct PerRelayParentGridTracker { - by_authority: HashMap<(AuthorityDiscoveryId, GroupIndex), Knowledge> + by_authority: HashMap<(AuthorityDiscoveryId, GroupIndex), Knowledge>, } struct Knowledge { diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 59823eab5428..5d4b89bb1367 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -179,8 +179,12 @@ pub(crate) async fn handle_network_update( }, NetworkBridgeEvent::NewGossipTopology(topology) => { let new_session_index = topology.session; - let new_topology: SessionGridTopology = topology.into(); - state.topology_storage.insert_topology(new_session_index, new_topology); + let new_topology = topology.topology; + state.topology_storage.insert_topology( + new_session_index, + new_topology, + topology.local_index, + ); // TODO [now]: can we not update authority IDs for peers? diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index faf6daa09357..f74fa1e6f493 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -331,18 +331,13 @@ pub enum NetworkBridgeRxMessage { NewGossipTopology { /// The session info this gossip topology is concerned with. session: SessionIndex, - /// Ids of our neighbors in the X dimensions of the new gossip topology, - /// along with their validator indices within the session. - /// - /// We're not necessarily connected to all of them, but we should - /// try to be. - our_neighbors_x: HashMap, - /// Ids of our neighbors in the X dimensions of the new gossip topology, - /// along with their validator indices within the session. - /// - /// We're not necessarily connected to all of them, but we should - /// try to be. - our_neighbors_y: HashMap, + /// Our validator index in the session, if any. + local_index: Option, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec<(AuthorityDiscoveryId, ValidatorIndex)>, + /// The reverse mapping of `canonical_shuffling`: from validator index + /// to the index in `canonical_shuffling` + shuffled_indices: Vec, }, } diff --git a/node/subsystem-types/src/messages/network_bridge_event.rs b/node/subsystem-types/src/messages/network_bridge_event.rs index cd0bb9894b6b..5abad8a3c22c 100644 --- a/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/node/subsystem-types/src/messages/network_bridge_event.rs @@ -14,10 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{ - collections::{HashMap, HashSet}, - convert::TryFrom, -}; +use std::{collections::HashSet, convert::TryFrom}; pub use sc_network::{PeerId, ReputationChange}; @@ -27,25 +24,15 @@ use polkadot_node_network_protocol::{ }; use polkadot_primitives::v2::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; -/// Information about a peer in the gossip topology for a session. -#[derive(Debug, Clone, PartialEq)] -pub struct TopologyPeerInfo { - /// The validator's known peer IDs. - pub peer_ids: Vec, - /// The index of the validator in the discovery keys of the corresponding - /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. - pub validator_index: ValidatorIndex, -} - /// A struct indicating new gossip topology. #[derive(Debug, Clone, PartialEq)] pub struct NewGossipTopology { /// The session index this topology corresponds to. pub session: SessionIndex, - /// Neighbors in the 'X' dimension of the grid. - pub our_neighbors_x: HashMap, - /// Neighbors in the 'Y' dimension of the grid. - pub our_neighbors_y: HashMap, + /// The topology itself. + pub topology: SessionGridTopology, + /// The local validator index, if any. + pub local_index: Option, } /// Events from network. @@ -122,19 +109,3 @@ impl NetworkBridgeEvent { }) } } - -impl From for SessionGridTopology { - fn from(topology: NewGossipTopology) -> Self { - let peers_x = - topology.our_neighbors_x.values().flat_map(|p| &p.peer_ids).cloned().collect(); - let peers_y = - topology.our_neighbors_y.values().flat_map(|p| &p.peer_ids).cloned().collect(); - - let validator_indices_x = - topology.our_neighbors_x.values().map(|p| p.validator_index.clone()).collect(); - let validator_indices_y = - topology.our_neighbors_y.values().map(|p| p.validator_index.clone()).collect(); - - SessionGridTopology { peers_x, peers_y, validator_indices_x, validator_indices_y } - } -} diff --git a/roadmap/implementers-guide/src/types/network.md b/roadmap/implementers-guide/src/types/network.md index 0d09a682cff2..b698ca2075bf 100644 --- a/roadmap/implementers-guide/src/types/network.md +++ b/roadmap/implementers-guide/src/types/network.md @@ -145,10 +145,19 @@ These updates are posted from the [Network Bridge Subsystem](../node/utility/net struct NewGossipTopology { /// The session index this topology corresponds to. session: SessionIndex, - /// Neighbors in the 'X' dimension of the grid. - our_neighbors_x: HashMap, - /// Neighbors in the 'Y' dimension of the grid. - our_neighbors_y: HashMap, + /// The topology itself. + topology: SessionGridTopology, + /// The local validator index, if any. + local_index: Option, +} + +struct SessionGridTopology { + /// An array mapping validator indices to their indices in the + /// shuffling itself. This has the same size as the number of validators + /// in the session. + shuffled_indices: Vec, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec, } struct TopologyPeerInfo { @@ -157,6 +166,9 @@ struct TopologyPeerInfo { /// The index of the validator in the discovery keys of the corresponding /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. validator_index: ValidatorIndex, + /// The authority discovery public key of the validator in the corresponding + /// `SessionInfo`. + discovery_id: AuthorityDiscoveryId, } enum NetworkBridgeEvent { diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index f47fefe23097..06635bd15127 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -555,14 +555,15 @@ enum NetworkBridgeMessage { /// Inform the distribution subsystems about the new /// gossip network topology formed. NewGossipTopology { - /// The session this topology corresponds to. - session: SessionIndex, - /// Ids of our neighbors in the X dimension of the new gossip topology. - /// We're not necessarily connected to all of them, but we should try to be. - our_neighbors_x: HashSet, - /// Ids of our neighbors in the Y dimension of the new gossip topology. - /// We're not necessarily connected to all of them, but we should try to be. - our_neighbors_y: HashSet, + /// The session info this gossip topology is concerned with. + session: SessionIndex, + /// Our validator index in the session, if any. + local_index: Option, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec<(AuthorityDiscoveryId, ValidatorIndex)>, + /// The reverse mapping of `canonical_shuffling`: from validator index + /// to the index in `canonical_shuffling` + shuffled_indices: Vec, } } ``` From 9df8063c95c8997a0025cc167db6b57e40ea09b4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 11 Oct 2022 23:52:51 -0500 Subject: [PATCH 053/220] fix grid_topology test --- node/network/protocol/src/grid_topology.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 609d5e2ba630..9f1d01921b33 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -555,7 +555,7 @@ mod tests { ] .into_iter() { - let matrix = matrix_neighbors(our_index, len); + let matrix = matrix_neighbors(our_index, len).unwrap(); let mut row_result: Vec<_> = matrix.row_neighbors.collect(); let mut column_result: Vec<_> = matrix.column_neighbors.collect(); row_result.sort(); From 2e27afae406a5bfd4071dc867f743a372aae7a6b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 12 Oct 2022 01:24:05 -0500 Subject: [PATCH 054/220] fix overseer test --- node/overseer/src/tests.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 59a7c3a57e98..2f4fdd1f1e69 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -873,8 +873,9 @@ fn test_network_bridge_tx_msg() -> NetworkBridgeTxMessage { fn test_network_bridge_rx_msg() -> NetworkBridgeRxMessage { NetworkBridgeRxMessage::NewGossipTopology { session: SessionIndex::from(0_u32), - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), + local_index: None, + canonical_shuffling: Vec::new(), + shuffled_indices: Vec::new(), } } From 3b6d5dab6aa04ecfd82b339b07aa8af4682e07ec Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 13 Oct 2022 01:25:56 -0500 Subject: [PATCH 055/220] implement topology group-based view construction logic --- .../src/vstaging/grid.rs | 113 ++++++++++++++++-- 1 file changed, 103 insertions(+), 10 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index aa53219268d5..e4775e9ae16c 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -17,11 +17,18 @@ //! Utilities for handling distribution of backed candidates along //! the grid. -use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex}; +use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, ValidatorIndex}; +use polkadot_node_network_protocol::{ + PeerId, + grid_topology::SessionGridTopology, +}; use std::collections::{HashMap, HashSet}; -/// Our local view of a subset of the grid topology organized around a specific group. +use super::LOG_TARGET; + +/// Our local view of a subset of the grid topology organized around a specific validator +/// group. /// /// This tracks which authorities we expect to communicate with concerning /// candidates from the group. This includes both the authorities we are @@ -29,25 +36,111 @@ use std::collections::{HashMap, HashSet}; /// /// In the case that this group is the group that we are locally assigned to, /// the 'receiving' side will be empty. -struct SubTopologyGroupLocalView { - sending: HashSet, - receiving: HashSet, +struct GroupSubView { + sending: HashSet, + receiving: HashSet, } /// Our local view of the topology for a session, as it pertains to backed /// candidate distribution. -struct TopologyView { - group_views: HashMap, +struct SessionTopologyView { + group_views: HashMap, } -/// TODO [now]: build topology for the session. +/// Build a view of the topology for the session. /// For groups that we are part of: we receive from nobody and send to our X/Y peers. /// For groups that we are not part of: we receive from any validator in the group we share a slice with. /// and send to the corresponding X/Y slice. /// For any validators we don't share a slice with, we receive from the nodes /// which share a slice with them. -fn build_session_topology() -> TopologyView { - unimplemented!() +fn build_session_topology( + groups: &[Vec], + topology: &SessionGridTopology, + our_index: ValidatorIndex, +) -> SessionTopologyView { + let mut view = SessionTopologyView { + group_views: HashMap::new(), + }; + + let our_neighbors = match topology.compute_grid_neighbors_for(our_index) { + None => { + gum::warn!( + target: LOG_TARGET, + ?our_index, + "our index unrecognized in topology?" + ); + + return view; + }, + Some(n) => n, + }; + + for (i, group) in groups.into_iter().enumerate() { + let mut sub_view = GroupSubView { + sending: HashSet::new(), + receiving: HashSet::new(), + }; + + if group.contains(&our_index) { + sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); + sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); + } else { + for &group_val in group { + // If the validator shares a slice with us, we expect to + // receive from them and send to our neighbors in the other + // dimension. + + if our_neighbors.validator_indices_x.contains(&group_val) { + sub_view.receiving.insert(group_val); + sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); + + continue + } + + if our_neighbors.validator_indices_y.contains(&group_val) { + sub_view.receiving.insert(group_val); + sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); + + continue + } + + // If they don't share a slice with us, we don't send to anybody + // but receive from any peers sharing a dimension with both of us. + let their_neighbors = match topology.compute_grid_neighbors_for(group_val) { + None => { + gum::warn!( + target: LOG_TARGET, + index = ?group_val, + "validator index unrecognized in topology?" + ); + + continue; + } + Some(n) => n, + }; + + // their X, our Y + for potential_link in &their_neighbors.validator_indices_x { + if our_neighbors.validator_indices_y.contains(potential_link) { + sub_view.receiving.insert(*potential_link); + break; // one max + } + } + + // their Y, our X + for potential_link in &their_neighbors.validator_indices_y { + if our_neighbors.validator_indices_x.contains(potential_link) { + sub_view.receiving.insert(*potential_link); + break; // one max + } + } + } + } + + view.group_views.insert(GroupIndex(i as _), sub_view); + } + + view } /// A tracker of knowledge from authorities within the grid for a From 6cf0187ae106f0d91277a0ac0a4ac5736ec37f4e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 19:09:23 -0500 Subject: [PATCH 056/220] fmt --- node/core/backing/src/lib.rs | 5 ++--- node/network/protocol/src/request_response/vstaging.rs | 4 ++-- node/network/statement-distribution/src/legacy_v1/mod.rs | 4 +--- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a743c529165a..ef63178c8f26 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -651,7 +651,7 @@ async fn validate_and_make_available( let pov = match pov { PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => match request_pov( &mut sender, relay_parent, @@ -674,8 +674,7 @@ async fn validate_and_make_available( }, Err(err) => return Err(err), Ok(pov) => pov, - } - } + }, }; let v = { diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index f76cb32bbd55..819bc9881af4 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -20,8 +20,8 @@ use parity_scale_codec::{Decode, Encode}; use bitvec::{order::Lsb0, vec::BitVec}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, PersistedValidationData, UncheckedSignedStatement, - Id as ParaId, Hash, + CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, + UncheckedSignedStatement, }; use super::{IsRequest, Protocol}; diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 506ae5a6ff39..e24547461042 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -18,9 +18,7 @@ use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{ - GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage, - }, + grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, From 2d628dbc81b9da43c2591adefed1e08d001f2818 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 19:09:30 -0500 Subject: [PATCH 057/220] flesh out grid slightly more --- .../src/vstaging/grid.rs | 60 ++++++++++--------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index e4775e9ae16c..4405618c5bb8 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -17,14 +17,15 @@ //! Utilities for handling distribution of backed candidates along //! the grid. -use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, ValidatorIndex}; -use polkadot_node_network_protocol::{ - PeerId, - grid_topology::SessionGridTopology, +use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, PeerId}; +use polkadot_primitives::vstaging::{ + AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ValidatorIndex, }; use std::collections::{HashMap, HashSet}; +use bitvec::vec::BitVec; + use super::LOG_TARGET; /// Our local view of a subset of the grid topology organized around a specific validator @@ -58,28 +59,19 @@ fn build_session_topology( topology: &SessionGridTopology, our_index: ValidatorIndex, ) -> SessionTopologyView { - let mut view = SessionTopologyView { - group_views: HashMap::new(), - }; + let mut view = SessionTopologyView { group_views: HashMap::new() }; let our_neighbors = match topology.compute_grid_neighbors_for(our_index) { None => { - gum::warn!( - target: LOG_TARGET, - ?our_index, - "our index unrecognized in topology?" - ); + gum::warn!(target: LOG_TARGET, ?our_index, "our index unrecognized in topology?"); - return view; + return view }, Some(n) => n, }; for (i, group) in groups.into_iter().enumerate() { - let mut sub_view = GroupSubView { - sending: HashSet::new(), - receiving: HashSet::new(), - }; + let mut sub_view = GroupSubView { sending: HashSet::new(), receiving: HashSet::new() }; if group.contains(&our_index) { sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); @@ -114,8 +106,8 @@ fn build_session_topology( "validator index unrecognized in topology?" ); - continue; - } + continue + }, Some(n) => n, }; @@ -123,7 +115,7 @@ fn build_session_topology( for potential_link in &their_neighbors.validator_indices_x { if our_neighbors.validator_indices_y.contains(potential_link) { sub_view.receiving.insert(*potential_link); - break; // one max + break // one max } } @@ -131,7 +123,7 @@ fn build_session_topology( for potential_link in &their_neighbors.validator_indices_y { if our_neighbors.validator_indices_x.contains(potential_link) { sub_view.receiving.insert(*potential_link); - break; // one max + break // one max } } } @@ -146,15 +138,25 @@ fn build_session_topology( /// A tracker of knowledge from authorities within the grid for a /// specific relay-parent. struct PerRelayParentGridTracker { - by_authority: HashMap<(AuthorityDiscoveryId, GroupIndex), Knowledge>, + by_validator: HashMap<(ValidatorIndex, GroupIndex), Knowledge>, +} + +struct ManifestSummary { + claimed_parent_hash: Hash, + seconded_in_group: BitVec, + validated_in_group: BitVec, } struct Knowledge { - // TODO [now] - // keep track of all the seconded statements they either have _claimed_ or - // have sent us. - // - // we need to do some spam protection here. similar to cluster - we will need - // to begrudgingly accept some overflow but we will need to ignore manifests - // which don't contain a `Seconded` statement from a validator under the limit. + manifests: HashMap, + seconded_counts: Vec, +} + +impl Knowledge {} + +#[cfg(tests)] +mod tests { + use super::*; + + // TODO [now]: test that grid topology views are set up correctly. } From 01133411dbeea0a81f773ebeb0ca76c0aaa48bd5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 19:23:08 -0500 Subject: [PATCH 058/220] add indexed groups utility --- .../src/vstaging/groups.rs | 67 +++++++++++++++++++ .../src/vstaging/mod.rs | 1 + 2 files changed, 68 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/groups.rs diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs new file mode 100644 index 000000000000..bd8f390e87da --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -0,0 +1,67 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A utility for tracking groups and their members within a session. + +use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, ValidatorIndex}; + +use std::collections::HashMap; + +/// Validator groups within a session, plus some helpful indexing for +/// looking up groups by validator indices or authority discovery ID. +#[derive(Debug, Clone)] +pub struct Groups { + groups: Vec>, + by_validator_index: HashMap, + by_discovery_key: HashMap, +} + +impl Groups { + /// Create a new [`Groups`] tracker with the groups and discovery keys + /// from the session. + pub fn new(groups: Vec>, discovery_keys: &[AuthorityDiscoveryId]) -> Self { + let mut by_validator_index = HashMap::new(); + let mut by_discovery_key = HashMap::new(); + + for (i, group) in groups.iter().enumerate() { + let index = GroupIndex(i as _); + for v in group { + by_validator_index.insert(*v, index); + if let Some(discovery_key) = discovery_keys.get(v.0 as usize) { + // GIGO: malformed session data leads to incomplete index. + by_discovery_key.insert(discovery_key.clone(), index); + } + } + } + + Groups { groups, by_validator_index, by_discovery_key } + } + + /// Get the underlying group validators by group index. + pub fn get(&self, group_index: GroupIndex) -> Option<&[ValidatorIndex]> { + self.groups.get(group_index.0 as usize).map(|x| &x[..]) + } + + /// Get the group index for a validator by index. + pub fn by_validator_index(&self, validator_index: ValidatorIndex) -> Option { + self.by_validator_index.get(&validator_index).map(|x| *x) + } + + /// Get the group index for a validator by its discovery key. + pub fn by_discovery_key(&self, discovery_key: AuthorityDiscoveryId) -> Option { + self.by_discovery_key.get(&discovery_key).map(|x| *x) + } +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 5d4b89bb1367..6dec118c4d41 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -56,6 +56,7 @@ use statement_store::StatementStore; mod candidate_entry; mod cluster; mod grid; +mod groups; mod requests; mod statement_store; From 45366464a9c2fee72f1ac9331cbe7cff861e4a04 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 19:31:28 -0500 Subject: [PATCH 059/220] integrate Groups into per-session info --- .../src/vstaging/mod.rs | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 6dec118c4d41..67b176267a44 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -50,6 +50,7 @@ use crate::{ }; use candidate_entry::CandidateEntry; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; +use groups::Groups; use requests::RequestManager; use statement_store::StatementStore; @@ -104,13 +105,18 @@ struct LocalValidatorState { cluster_tracker: ClusterTracker, } +struct PerSessionState { + session_info: SessionInfo, + groups: Groups, +} + pub(crate) struct State { /// The utility for managing the implicit and explicit views in a consistent way. /// /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, per_relay_parent: HashMap, - per_session: HashMap, + per_session: HashMap, peers: HashMap, keystore: SyncCryptoStorePtr, topology_storage: SessionGridTopologies, @@ -282,18 +288,27 @@ pub(crate) async fn handle_activated_leaf( Some(s) => s, }; - state.per_session.insert(session_index, session_info); + let groups = Groups::new( + session_info.validator_groups.clone(), + &session_info.discovery_keys, + ); + + state.per_session.insert(session_index, PerSessionState { + session_info, + groups, + }); } - let session_info = state + let per_session = state .per_session .get(&session_index) .expect("either existed or just inserted; qed"); + let session_info = &per_session.session_info; let local_validator = find_local_validator_state( &session_info.validators, &state.keystore, - &session_info.validator_groups, + &per_session.groups, &availability_cores, ) .await; @@ -321,17 +336,13 @@ pub(crate) async fn handle_activated_leaf( async fn find_local_validator_state( validators: &[ValidatorId], keystore: &SyncCryptoStorePtr, - groups: &[Vec], + groups: &Groups, availability_cores: &[CoreState], ) -> Option { - if groups.is_empty() { - return None - } - let (validator_id, validator_index) = polkadot_node_subsystem_util::signing_key_and_index(validators, keystore).await?; - let our_group = polkadot_node_subsystem_util::find_validator_group(groups, validator_index)?; + let our_group = groups.by_validator_index(validator_index)?; // note: this won't work well for parathreads because it only works // when core assignments to paras are static throughout the session. @@ -339,7 +350,7 @@ async fn find_local_validator_state( let para_for_group = |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); - let group_validators = groups[our_group.0 as usize].clone(); + let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { index: validator_index, group: our_group, @@ -503,10 +514,11 @@ async fn send_statement_direct( None => return, }; - let session_info = match state.per_session.get(&per_relay_parent.session) { + let per_session = match state.per_session.get(&per_relay_parent.session) { Some(s) => s, None => return, }; + let session_info = &per_session.session_info; let candidate_hash = statement.payload().candidate_hash().clone(); @@ -530,7 +542,7 @@ async fn send_statement_direct( None => return, // sanity: should be impossible to reach this. }; - let current_group = local_validator + let cluster_targets = local_validator .cluster_tracker .targets() .iter() @@ -539,7 +551,7 @@ async fn send_statement_direct( // TODO [now]: extend with grid targets, dedup - let targets = current_group + let targets = cluster_targets .filter_map(|(v, k)| { session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) }) @@ -717,7 +729,7 @@ async fn handle_incoming_statement( Some(p) => p, }; - let session_info = match state.per_session.get(&per_relay_parent.session) { + let per_session = match state.per_session.get(&per_relay_parent.session) { None => { gum::warn!( target: LOG_TARGET, @@ -729,6 +741,7 @@ async fn handle_incoming_statement( }, Some(s) => s, }; + let session_info = &per_session.session_info; let local_validator = match per_relay_parent.local_validator.as_mut() { None => { From 950ae304e57ae4b3f13ed972f3cc1801d61261c8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 19:38:55 -0500 Subject: [PATCH 060/220] refactor statement store to borrow Groups --- .../src/vstaging/groups.rs | 5 ++ .../src/vstaging/mod.rs | 59 +++++++++++-------- .../src/vstaging/statement_store.rs | 33 +++++++---- 3 files changed, 62 insertions(+), 35 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index bd8f390e87da..ebfb6421e429 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -50,6 +50,11 @@ impl Groups { Groups { groups, by_validator_index, by_discovery_key } } + /// Access all the underlying groups. + pub fn all(&self) -> &[Vec] { + &self.groups + } + /// Get the underlying group validators by group index. pub fn get(&self, group_index: GroupIndex) -> Option<&[ValidatorIndex]> { self.groups.get(group_index.0 as usize).map(|x| &x[..]) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 67b176267a44..7111df9c54df 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -288,25 +288,21 @@ pub(crate) async fn handle_activated_leaf( Some(s) => s, }; - let groups = Groups::new( - session_info.validator_groups.clone(), - &session_info.discovery_keys, - ); + let groups = + Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); - state.per_session.insert(session_index, PerSessionState { - session_info, - groups, - }); + state + .per_session + .insert(session_index, PerSessionState { session_info, groups }); } let per_session = state .per_session .get(&session_index) .expect("either existed or just inserted; qed"); - let session_info = &per_session.session_info; let local_validator = find_local_validator_state( - &session_info.validators, + &per_session.session_info.validators, &state.keystore, &per_session.groups, &availability_cores, @@ -319,7 +315,7 @@ pub(crate) async fn handle_activated_leaf( validator_state: HashMap::new(), candidates: HashMap::new(), local_validator, - statement_store: StatementStore::new(session_info.validator_groups.clone()), + statement_store: StatementStore::new(&per_session.groups), session: session_index, }, ); @@ -339,6 +335,10 @@ async fn find_local_validator_state( groups: &Groups, availability_cores: &[CoreState], ) -> Option { + if groups.all().is_empty() { + return None + } + let (validator_id, validator_index) = polkadot_node_subsystem_util::signing_key_and_index(validators, keystore).await?; @@ -406,6 +406,11 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; + let per_session = match state.per_session.get(&per_relay_parent.session) { + Some(s) => s, + None => return Ok(()), + }; + let (local_index, local_assignment, local_group) = match per_relay_parent.local_validator.as_ref() { None => return Err(JfyiError::InvalidShare), @@ -463,7 +468,10 @@ pub(crate) async fn share_local_statement( }, }; - match per_relay_parent.statement_store.insert(compact_statement.clone()) { + match per_relay_parent + .statement_store + .insert(&per_session.groups, compact_statement.clone()) + { Ok(false) | Err(_) => { gum::warn!( target: LOG_TARGET, @@ -822,20 +830,21 @@ async fn handle_incoming_statement( let statement = checked_statement.payload().clone(); let sender_index = checked_statement.validator_index(); let candidate_hash = *checked_statement.payload().candidate_hash(); - let was_fresh = match per_relay_parent.statement_store.insert(checked_statement) { - Err(_) => { - // sanity: should never happen. - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - validator_index = ?sender_index, - "Error -Cluster accepted message from unknown validator." - ); + let was_fresh = + match per_relay_parent.statement_store.insert(&per_session.groups, checked_statement) { + Err(_) => { + // sanity: should never happen. + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?sender_index, + "Error -Cluster accepted message from unknown validator." + ); - return - }, - Ok(known) => known, - }; + return + }, + Ok(known) => known, + }; let sender_group_index = per_relay_parent .statement_store diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 1d18d109696f..85b5af442bb6 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -29,10 +29,11 @@ use polkadot_primitives::vstaging::{ }; use std::collections::hash_map::{Entry as HEntry, HashMap}; +use super::groups::Groups; + /// Storage for statements. Intended to be used for statements signed under /// the same relay-parent. See module docs for more details. pub struct StatementStore { - groups: Vec>, validator_meta: HashMap, // we keep statements per-group because even though only one group _should_ be @@ -44,9 +45,9 @@ pub struct StatementStore { impl StatementStore { /// Create a new [`StatementStore`] - pub fn new(groups: Vec>) -> Self { + pub fn new(groups: &Groups) -> Self { let mut validator_meta = HashMap::new(); - for (g, group) in groups.iter().enumerate() { + for (g, group) in groups.all().iter().enumerate() { for (i, v) in group.iter().enumerate() { validator_meta.insert( v, @@ -60,7 +61,6 @@ impl StatementStore { } StatementStore { - groups, validator_meta: HashMap::new(), group_statements: HashMap::new(), known_statements: HashMap::new(), @@ -75,7 +75,11 @@ impl StatementStore { /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. - pub fn insert(&mut self, statement: SignedStatement) -> Result { + pub fn insert( + &mut self, + groups: &Groups, + statement: SignedStatement, + ) -> Result { let validator_index = statement.validator_index(); let validator_meta = match self.validator_meta.get_mut(&validator_index) { @@ -98,10 +102,18 @@ impl StatementStore { // cross-reference updates. { let group_index = validator_meta.group; - let group = self.groups.get(group_index.0 as usize).expect( - "we only have meta info on validators confirmed to be \ - in groups at construction; qed", - ); + let group = match groups.get(group_index) { + Some(g) => g, + None => { + gum::error!( + target: crate::LOG_TARGET, + ?group_index, + "groups passed into `insert` differ from those used at store creation" + ); + + return Err(ValidatorUnknown) + }, + }; let group_statements = self .group_statements @@ -139,11 +151,12 @@ impl StatementStore { /// Get an iterator over signed statements of the given form by the given group. pub fn group_statements<'a>( &'a self, + groups: &'a Groups, group_index: GroupIndex, statement: CompactStatement, ) -> impl Iterator + 'a { let bitslice = self.group_statement_bitslice(group_index, statement.clone()); - let group_validators = self.groups.get(group_index.0 as usize); + let group_validators = groups.get(group_index); bitslice .into_iter() From bc6db743630f60ff757fb8bcf9c4c7fcea6bdb57 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 14 Oct 2022 20:24:51 -0500 Subject: [PATCH 061/220] implement manifest knowledge utility --- .../src/vstaging/grid.rs | 157 ++++++++++++++++-- 1 file changed, 147 insertions(+), 10 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 4405618c5bb8..7f9f1ddae851 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -22,9 +22,9 @@ use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ValidatorIndex, }; -use std::collections::{HashMap, HashSet}; +use std::collections::{hash_map::{Entry, HashMap}, HashSet}; -use bitvec::vec::BitVec; +use bitvec::{vec::BitVec, order::Lsb0, slice::BitSlice}; use super::LOG_TARGET; @@ -97,7 +97,7 @@ fn build_session_topology( } // If they don't share a slice with us, we don't send to anybody - // but receive from any peers sharing a dimension with both of us. + // but receive from any peers sharing a dimension with both of us let their_neighbors = match topology.compute_grid_neighbors_for(group_val) { None => { gum::warn!( @@ -138,25 +138,162 @@ fn build_session_topology( /// A tracker of knowledge from authorities within the grid for a /// specific relay-parent. struct PerRelayParentGridTracker { - by_validator: HashMap<(ValidatorIndex, GroupIndex), Knowledge>, + by_validator: HashMap, } struct ManifestSummary { claimed_parent_hash: Hash, - seconded_in_group: BitVec, - validated_in_group: BitVec, + claimed_group_index: GroupIndex, + seconded_in_group: BitVec, + validated_in_group: BitVec, } -struct Knowledge { - manifests: HashMap, - seconded_counts: Vec, +#[derive(Debug, Clone)] +enum ManifestImportError { + // The manifest conflicts with another, previously sent manifest. + Conflicting, + // The manifest has overflowed beyond the limits of what the + // counterparty was allowed to send us. + Overflow, } -impl Knowledge {} +/// The knowledge we are awawre of counterparties having of manifests. +#[derive(Default)] +struct CounterPartyManifestKnowledge { + received: HashMap, + seconded_counts: HashMap>, +} + +impl CounterPartyManifestKnowledge { + fn new(group_size: usize) -> Self { + CounterPartyManifestKnowledge { + received: HashMap::new(), + seconded_counts: HashMap::new(), + } + } + + /// Attempt to import a received manifest from a counterparty. + /// + /// This will reject manifests which are either duplicate, conflicting, + /// or imply an irrational amount of `Seconded` statements. + /// + /// This assumes that the manifest has already been checked for + /// validity - i.e. that the bitvecs match the claimed group in size + /// and that that the manifest includes at least one `Seconded` + /// attestation and includes enough attestations for the candidate + /// to be backed. + /// + /// This also should only be invoked when we are intended to track + /// the knowledge of this peer as determined by the [`SessionTopology`]. + fn import_received( + &mut self, + group_size: usize, + seconding_limit: usize, + candidate_hash: CandidateHash, + manifest_summary: ManifestSummary, + ) -> Result<(), ManifestImportError> { + match self.received.entry(candidate_hash) { + Entry::Occupied(mut e) => { + // occupied entry. + + // filter out clearly conflicting data. + { + let prev = e.get(); + if prev.claimed_group_index != manifest_summary.claimed_group_index { + return Err(ManifestImportError::Conflicting); + } + + if prev.claimed_parent_hash != manifest_summary.claimed_parent_hash { + return Err(ManifestImportError::Conflicting); + } + + if !manifest_summary.seconded_in_group.contains(&prev.seconded_in_group) { + return Err(ManifestImportError::Conflicting); + } + + if !manifest_summary.validated_in_group.contains(&prev.validated_in_group) { + return Err(ManifestImportError::Conflicting); + } + + let mut fresh_seconded = manifest_summary.seconded_in_group.clone(); + fresh_seconded |= &prev.seconded_in_group; + + let mut fresh_validated = manifest_summary.validated_in_group.clone(); + fresh_validated |= &prev.validated_in_group; + + let within_limits = updating_ensure_within_seconding_limit( + &mut self.seconded_counts, + manifest_summary.claimed_group_index, + group_size, + seconding_limit, + &*fresh_seconded, + ); + + if !within_limits { + return Err(ManifestImportError::Overflow); + } + } + + // All checks passed. Overwrite: guaranteed to be + // superset. + *e.get_mut() = manifest_summary; + Ok(()) + } + Entry::Vacant(e) => { + let within_limits = updating_ensure_within_seconding_limit( + &mut self.seconded_counts, + manifest_summary.claimed_group_index, + group_size, + seconding_limit, + &*manifest_summary.seconded_in_group, + ); + + if within_limits { + e.insert(manifest_summary); + Ok(()) + } else { + Err(ManifestImportError::Overflow) + } + } + } + } +} + +// updates validator-seconded records but only if the new statements +// are OK. returns `true` if alright and `false` otherwise. +fn updating_ensure_within_seconding_limit( + seconded_counts: &mut HashMap>, + group_index: GroupIndex, + group_size: usize, + seconding_limit: usize, + new_seconded: &BitSlice, +) -> bool { + if seconding_limit == 0 { return false } + + // due to the check above, if this was non-existent this function will + // always return `true`. + let counts = seconded_counts + .entry(group_index) + .or_insert_with(|| vec![0; group_size]); + + for i in new_seconded.iter_ones() { + if counts[i] == seconding_limit { return false } + } + + for i in new_seconded.iter_ones() { + counts[i] += 1; + } + + true +} #[cfg(tests)] mod tests { use super::*; // TODO [now]: test that grid topology views are set up correctly. + + // TODO [now]: tests that conflicting manifests are rejected. + + // TODO [now]: test that overflowing manifests are rejected. } From d3cfa768d57428a056012c670b61dcf6045bcd2f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 18 Oct 2022 19:51:12 -0500 Subject: [PATCH 062/220] add a test for topology setup --- .../src/vstaging/grid.rs | 111 +++++++++++++++++- 1 file changed, 108 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 7f9f1ddae851..e09ebd52fddd 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -37,6 +37,7 @@ use super::LOG_TARGET; /// /// In the case that this group is the group that we are locally assigned to, /// the 'receiving' side will be empty. +#[derive(PartialEq)] struct GroupSubView { sending: HashSet, receiving: HashSet, @@ -57,10 +58,15 @@ struct SessionTopologyView { fn build_session_topology( groups: &[Vec], topology: &SessionGridTopology, - our_index: ValidatorIndex, + our_index: Option, ) -> SessionTopologyView { let mut view = SessionTopologyView { group_views: HashMap::new() }; + let our_index = match our_index { + None => return view, + Some(i) => i, + }; + let our_neighbors = match topology.compute_grid_neighbors_for(our_index) { None => { gum::warn!(target: LOG_TARGET, ?our_index, "our index unrecognized in topology?"); @@ -287,11 +293,110 @@ fn updating_ensure_within_seconding_limit( true } -#[cfg(tests)] +#[cfg(test)] mod tests { use super::*; + use polkadot_node_network_protocol::grid_topology::TopologyPeerInfo; + use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; + use sp_core::crypto::Pair as PairT; + + #[test] + fn topology_empty_for_no_index() { + let base_topology = SessionGridTopology::new( + vec![0, 1, 2], + vec![ + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(0), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(1), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(2), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + ], + ); + + let t = build_session_topology( + &[ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + ], + &base_topology, + None, + ); + + assert!(t.group_views.is_empty()); + } - // TODO [now]: test that grid topology views are set up correctly. + #[test] + fn topology_setup() { + let base_topology = SessionGridTopology::new( + (0..9).collect(), + (0..9) + .map(|i| TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(i), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(), + ); + + let t = build_session_topology( + &[ + vec![ValidatorIndex(0), ValidatorIndex(3), ValidatorIndex(6)], + vec![ValidatorIndex(4), ValidatorIndex(2), ValidatorIndex(7)], + vec![ValidatorIndex(8), ValidatorIndex(5), ValidatorIndex(1)], + ], + &base_topology, + Some(ValidatorIndex(0)), + ); + + assert_eq!(t.group_views.len(), 3); + + // 0 1 2 + // 3 4 5 + // 6 7 8 + + // our group: we send to all row/column neighbors and receive nothing + assert_eq!( + t.group_views.get(&GroupIndex(0)).unwrap().sending, + vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!( + t.group_views.get(&GroupIndex(0)).unwrap().receiving, + HashSet::new(), + ); + + // we share a row with '2' and have indirect connections to '4' and '7'. + + assert_eq!( + t.group_views.get(&GroupIndex(1)).unwrap().sending, + vec![3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!( + t.group_views.get(&GroupIndex(1)).unwrap().receiving, + vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + + // we share a row with '1' and have indirect connections to '5' and '8'. + + assert_eq!( + t.group_views.get(&GroupIndex(2)).unwrap().sending, + vec![3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!( + t.group_views.get(&GroupIndex(2)).unwrap().receiving, + vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + } // TODO [now]: tests that conflicting manifests are rejected. From dc2e1418fdaa70fc5559ed7ff4cbc99d6e82bb48 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 18 Oct 2022 19:53:47 -0500 Subject: [PATCH 063/220] don't send to group members --- .../statement-distribution/src/vstaging/grid.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index e09ebd52fddd..b44a0234d205 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -90,14 +90,26 @@ fn build_session_topology( if our_neighbors.validator_indices_x.contains(&group_val) { sub_view.receiving.insert(group_val); - sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); + sub_view.sending.extend( + our_neighbors + .validator_indices_y + .iter() + .filter(|v| !group.contains(v)) + .cloned(), + ); continue } if our_neighbors.validator_indices_y.contains(&group_val) { sub_view.receiving.insert(group_val); - sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); + sub_view.sending.extend( + our_neighbors + .validator_indices_x + .iter() + .filter(|v| !group.contains(v)) + .cloned() + ); continue } From 5fd17ef2cbd4b40d114eafa636a4e8606c20ca37 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 18 Oct 2022 20:01:47 -0500 Subject: [PATCH 064/220] test for conflicting manifests --- .../src/vstaging/grid.rs | 87 ++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index b44a0234d205..a58c859d2640 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -183,7 +183,7 @@ struct CounterPartyManifestKnowledge { } impl CounterPartyManifestKnowledge { - fn new(group_size: usize) -> Self { + fn new() -> Self { CounterPartyManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new(), @@ -308,6 +308,7 @@ fn updating_ensure_within_seconding_limit( #[cfg(test)] mod tests { use super::*; + use assert_matches::assert_matches; use polkadot_node_network_protocol::grid_topology::TopologyPeerInfo; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::crypto::Pair as PairT; @@ -410,7 +411,89 @@ mod tests { ); } - // TODO [now]: tests that conflicting manifests are rejected. + #[test] + fn knowledge_rejects_conflicting_manifest() { + let mut knowledge = CounterPartyManifestKnowledge::new(); + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ).unwrap(); + + // conflicting group + + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(1), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ), + Err(ManifestImportError::Conflicting) + ); + + // conflicting parent hash + + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(3), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ), + Err(ManifestImportError::Conflicting) + ); + + // conflicting seconded statements bitfield + + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ), + Err(ManifestImportError::Conflicting) + ); + + // conflicting valid statements bitfield + + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + }, + ), + Err(ManifestImportError::Conflicting) + ); + } // TODO [now]: test that overflowing manifests are rejected. } From 57c027fe135ddea7fb2525ca25bf557cb1cfac03 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 18 Oct 2022 20:04:09 -0500 Subject: [PATCH 065/220] manifest knowledge tests --- .../src/vstaging/grid.rs | 55 ++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index a58c859d2640..2de1977267a4 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -495,5 +495,58 @@ mod tests { ); } - // TODO [now]: test that overflowing manifests are rejected. + #[test] + fn reject_overflowing_manifests() { + let mut knowledge = CounterPartyManifestKnowledge::new(); + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xA), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ).unwrap(); + + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(2)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xB), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ).unwrap(); + + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(3)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ), + Err(ManifestImportError::Overflow) + ); + + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(3)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ).unwrap(); + } } From f9cf743dccee8642a4132b10244cf5eb3af34e83 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 18 Oct 2022 20:04:26 -0500 Subject: [PATCH 066/220] fmt --- .../src/vstaging/grid.rs | 155 +++++++++--------- 1 file changed, 79 insertions(+), 76 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 2de1977267a4..cf451c580c30 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -22,9 +22,12 @@ use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ValidatorIndex, }; -use std::collections::{hash_map::{Entry, HashMap}, HashSet}; +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; -use bitvec::{vec::BitVec, order::Lsb0, slice::BitSlice}; +use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use super::LOG_TARGET; @@ -105,10 +108,10 @@ fn build_session_topology( sub_view.receiving.insert(group_val); sub_view.sending.extend( our_neighbors - .validator_indices_x - .iter() - .filter(|v| !group.contains(v)) - .cloned() + .validator_indices_x + .iter() + .filter(|v| !group.contains(v)) + .cloned(), ); continue @@ -184,10 +187,7 @@ struct CounterPartyManifestKnowledge { impl CounterPartyManifestKnowledge { fn new() -> Self { - CounterPartyManifestKnowledge { - received: HashMap::new(), - seconded_counts: HashMap::new(), - } + CounterPartyManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } } /// Attempt to import a received manifest from a counterparty. @@ -218,19 +218,19 @@ impl CounterPartyManifestKnowledge { { let prev = e.get(); if prev.claimed_group_index != manifest_summary.claimed_group_index { - return Err(ManifestImportError::Conflicting); + return Err(ManifestImportError::Conflicting) } if prev.claimed_parent_hash != manifest_summary.claimed_parent_hash { - return Err(ManifestImportError::Conflicting); + return Err(ManifestImportError::Conflicting) } if !manifest_summary.seconded_in_group.contains(&prev.seconded_in_group) { - return Err(ManifestImportError::Conflicting); + return Err(ManifestImportError::Conflicting) } if !manifest_summary.validated_in_group.contains(&prev.validated_in_group) { - return Err(ManifestImportError::Conflicting); + return Err(ManifestImportError::Conflicting) } let mut fresh_seconded = manifest_summary.seconded_in_group.clone(); @@ -248,7 +248,7 @@ impl CounterPartyManifestKnowledge { ); if !within_limits { - return Err(ManifestImportError::Overflow); + return Err(ManifestImportError::Overflow) } } @@ -256,7 +256,7 @@ impl CounterPartyManifestKnowledge { // superset. *e.get_mut() = manifest_summary; Ok(()) - } + }, Entry::Vacant(e) => { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, @@ -272,7 +272,7 @@ impl CounterPartyManifestKnowledge { } else { Err(ManifestImportError::Overflow) } - } + }, } } } @@ -286,16 +286,18 @@ fn updating_ensure_within_seconding_limit( seconding_limit: usize, new_seconded: &BitSlice, ) -> bool { - if seconding_limit == 0 { return false } + if seconding_limit == 0 { + return false + } // due to the check above, if this was non-existent this function will // always return `true`. - let counts = seconded_counts - .entry(group_index) - .or_insert_with(|| vec![0; group_size]); + let counts = seconded_counts.entry(group_index).or_insert_with(|| vec![0; group_size]); for i in new_seconded.iter_ones() { - if counts[i] == seconding_limit { return false } + if counts[i] == seconding_limit { + return false + } } for i in new_seconded.iter_ones() { @@ -337,11 +339,7 @@ mod tests { ); let t = build_session_topology( - &[ - vec![ValidatorIndex(0)], - vec![ValidatorIndex(1)], - vec![ValidatorIndex(2)], - ], + &[vec![ValidatorIndex(0)], vec![ValidatorIndex(1)], vec![ValidatorIndex(2)]], &base_topology, None, ); @@ -383,10 +381,7 @@ mod tests { t.group_views.get(&GroupIndex(0)).unwrap().sending, vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), ); - assert_eq!( - t.group_views.get(&GroupIndex(0)).unwrap().receiving, - HashSet::new(), - ); + assert_eq!(t.group_views.get(&GroupIndex(0)).unwrap().receiving, HashSet::new(),); // we share a row with '2' and have indirect connections to '4' and '7'. @@ -414,17 +409,19 @@ mod tests { #[test] fn knowledge_rejects_conflicting_manifest() { let mut knowledge = CounterPartyManifestKnowledge::new(); - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(2), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ).unwrap(); + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ) + .unwrap(); // conflicting group @@ -498,29 +495,33 @@ mod tests { #[test] fn reject_overflowing_manifests() { let mut knowledge = CounterPartyManifestKnowledge::new(); - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(0xA), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ).unwrap(); - - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(2)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(0xB), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ).unwrap(); + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xA), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ) + .unwrap(); + + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(2)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xB), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ) + .unwrap(); assert_matches!( knowledge.import_received( @@ -537,16 +538,18 @@ mod tests { Err(ManifestImportError::Overflow) ); - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(3)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(0xC), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ).unwrap(); + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(3)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ) + .unwrap(); } } From fffae31ff5b4f27d2f9b04267fb2a83575a04d75 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 19 Oct 2022 16:09:19 -0500 Subject: [PATCH 067/220] rename field --- node/network/statement-distribution/src/vstaging/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index cf451c580c30..cdd51147fe6b 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -159,7 +159,7 @@ fn build_session_topology( /// A tracker of knowledge from authorities within the grid for a /// specific relay-parent. struct PerRelayParentGridTracker { - by_validator: HashMap, + received: HashMap, } struct ManifestSummary { From b607890769361e9e01e764b12a4c96096989d721 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 19 Oct 2022 20:31:49 -0500 Subject: [PATCH 068/220] garbage collection for grid tracker --- .../src/vstaging/grid.rs | 60 ++++++++++++++++--- 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index cdd51147fe6b..bc111333ae0c 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -48,7 +48,7 @@ struct GroupSubView { /// Our local view of the topology for a session, as it pertains to backed /// candidate distribution. -struct SessionTopologyView { +pub struct SessionTopologyView { group_views: HashMap, } @@ -58,7 +58,7 @@ struct SessionTopologyView { /// and send to the corresponding X/Y slice. /// For any validators we don't share a slice with, we receive from the nodes /// which share a slice with them. -fn build_session_topology( +pub fn build_session_topology( groups: &[Vec], topology: &SessionGridTopology, our_index: Option, @@ -156,10 +156,31 @@ fn build_session_topology( view } -/// A tracker of knowledge from authorities within the grid for a -/// specific relay-parent. -struct PerRelayParentGridTracker { +/// A tracker of knowledge from authorities within the grid for the +/// entire session. This stores only data on manifests sent within a bounded +/// set of relay-parents. +#[derive(Default)] +pub struct PerSessionGridTracker { received: HashMap, + pending_unknown: HashMap>, +} + +impl PerSessionGridTracker { + /// Collect all garbage, provided a function that informs us on the state + /// of the view. + pub fn collect_garbage(&mut self, view_contains: impl Fn(&Hash) -> bool) { + for (v, knowledge) in &mut self.received { + let pruned = knowledge.collect_garbage(&view_contains); + for pruned_candidate in pruned { + if let Entry::Occupied(mut e) = self.pending_unknown.entry(pruned_candidate) { + e.get_mut().remove(v); + if e.get().is_empty() { + e.remove(); + } + } + } + } + } } struct ManifestSummary { @@ -182,7 +203,8 @@ enum ManifestImportError { #[derive(Default)] struct CounterPartyManifestKnowledge { received: HashMap, - seconded_counts: HashMap>, + // (group, relay parent) -> seconded counts. + seconded_counts: HashMap<(GroupIndex, Hash), Vec>, } impl CounterPartyManifestKnowledge { @@ -190,6 +212,23 @@ impl CounterPartyManifestKnowledge { CounterPartyManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } } + /// Collect all garbage: anything outside the provided view. + /// Returns a set of all pruned candidate hashes. + fn collect_garbage(&mut self, view_contains: impl Fn(&Hash) -> bool) -> Vec { + let mut v = Vec::new(); + self.seconded_counts.retain(|&(_, ref r_p), _| view_contains(r_p)); + self.received.retain(|c_hash, summary| { + if !view_contains(&summary.claimed_parent_hash) { + v.push(*c_hash); + false + } else { + true + } + }); + + v + } + /// Attempt to import a received manifest from a counterparty. /// /// This will reject manifests which are either duplicate, conflicting, @@ -242,6 +281,7 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, + manifest_summary.claimed_parent_hash, group_size, seconding_limit, &*fresh_seconded, @@ -261,6 +301,7 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, + manifest_summary.claimed_parent_hash, group_size, seconding_limit, &*manifest_summary.seconded_in_group, @@ -280,8 +321,9 @@ impl CounterPartyManifestKnowledge { // updates validator-seconded records but only if the new statements // are OK. returns `true` if alright and `false` otherwise. fn updating_ensure_within_seconding_limit( - seconded_counts: &mut HashMap>, + seconded_counts: &mut HashMap<(GroupIndex, Hash), Vec>, group_index: GroupIndex, + relay_parent: Hash, group_size: usize, seconding_limit: usize, new_seconded: &BitSlice, @@ -292,7 +334,9 @@ fn updating_ensure_within_seconding_limit( // due to the check above, if this was non-existent this function will // always return `true`. - let counts = seconded_counts.entry(group_index).or_insert_with(|| vec![0; group_size]); + let counts = seconded_counts + .entry((group_index, relay_parent)) + .or_insert_with(|| vec![0; group_size]); for i in new_seconded.iter_ones() { if counts[i] == seconding_limit { From 6ffbd391b3266e178ebe7efe902d5475394d1194 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 19 Oct 2022 20:48:36 -0500 Subject: [PATCH 069/220] routines for finding correct/incorrect advertisers --- .../src/vstaging/grid.rs | 163 ++++++++++++------ 1 file changed, 109 insertions(+), 54 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index bc111333ae0c..c0af00154da5 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -156,13 +156,23 @@ pub fn build_session_topology( view } +/// Advertisers of a candidate which became known. +#[derive(Default)] +pub struct CandidateAdvertisers { + /// Advertisers who got the details correct. + pub correct: Vec, + /// Advertisers who got the details incorrect. + pub incorrect: Vec, +} + /// A tracker of knowledge from authorities within the grid for the /// entire session. This stores only data on manifests sent within a bounded /// set of relay-parents. #[derive(Default)] pub struct PerSessionGridTracker { received: HashMap, - pending_unknown: HashMap>, + // advertisers of currently unknown candidates. + unknown_advertisers: HashMap>, } impl PerSessionGridTracker { @@ -172,7 +182,7 @@ impl PerSessionGridTracker { for (v, knowledge) in &mut self.received { let pruned = knowledge.collect_garbage(&view_contains); for pruned_candidate in pruned { - if let Entry::Occupied(mut e) = self.pending_unknown.entry(pruned_candidate) { + if let Entry::Occupied(mut e) = self.unknown_advertisers.entry(pruned_candidate) { e.get_mut().remove(v); if e.get().is_empty() { e.remove(); @@ -181,9 +191,48 @@ impl PerSessionGridTracker { } } } + + /// Note that a candidate's full receipt has been acquired, with its actual group index, + /// relay-parent, and parent head data. + /// + /// Returns validator indices which advertised the candidate both + /// correctly and incorrectly. + pub fn note_acquired_candidate( + &mut self, + candidate_hash: CandidateHash, + relay_parent: Hash, + group_index: GroupIndex, + parent_hash: Hash, + ) -> CandidateAdvertisers { + let mut advertisers = CandidateAdvertisers::default(); + for v in self.unknown_advertisers.remove(&candidate_hash).into_iter().flat_map(|x| x) { + let knowledge = match self.received.get(&v) { + None => continue, + Some(k) => k, + }; + + let m = match knowledge.received.get(&candidate_hash) { + None => continue, + Some(m) => m, + }; + + if m.claimed_relay_parent != relay_parent || + m.claimed_group_index != group_index || + m.claimed_parent_hash != parent_hash + { + advertisers.incorrect.push(v); + } else { + advertisers.correct.push(v) + } + } + + advertisers + } } +#[derive(Clone)] struct ManifestSummary { + claimed_relay_parent: Hash, claimed_parent_hash: Hash, claimed_group_index: GroupIndex, seconded_in_group: BitVec, @@ -256,6 +305,10 @@ impl CounterPartyManifestKnowledge { // filter out clearly conflicting data. { let prev = e.get(); + if prev.claimed_relay_parent != manifest_summary.claimed_relay_parent { + return Err(ManifestImportError::Conflicting) + } + if prev.claimed_group_index != manifest_summary.claimed_group_index { return Err(ManifestImportError::Conflicting) } @@ -281,7 +334,7 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, - manifest_summary.claimed_parent_hash, + manifest_summary.claimed_relay_parent, group_size, seconding_limit, &*fresh_seconded, @@ -301,7 +354,7 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, - manifest_summary.claimed_parent_hash, + manifest_summary.claimed_relay_parent, group_size, seconding_limit, &*manifest_summary.seconded_in_group, @@ -453,85 +506,66 @@ mod tests { #[test] fn knowledge_rejects_conflicting_manifest() { let mut knowledge = CounterPartyManifestKnowledge::new(); + + let expected_manifest_summary = ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(0), + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }; + knowledge .import_received( 3, 2, CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(2), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, + expected_manifest_summary.clone(), ) .unwrap(); + // conflicting relay-parent + + let mut s = expected_manifest_summary.clone(); + s.claimed_relay_parent = Hash::repeat_byte(1); + assert_matches!( + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), + Err(ManifestImportError::Conflicting) + ); + // conflicting group + let mut s = expected_manifest_summary.clone(); + s.claimed_group_index = GroupIndex(1); assert_matches!( - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(2), - claimed_group_index: GroupIndex(1), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ), + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) ); // conflicting parent hash + let mut s = expected_manifest_summary.clone(); + s.claimed_parent_hash = Hash::repeat_byte(3); assert_matches!( - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(3), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ), + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) ); // conflicting seconded statements bitfield + let mut s = expected_manifest_summary.clone(); + s.seconded_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; assert_matches!( - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(2), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ), + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) ); // conflicting valid statements bitfield + let mut s = expected_manifest_summary.clone(); + s.validated_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; assert_matches!( - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(1)), - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(2), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - }, - ), + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) ); } @@ -545,6 +579,7 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(1)), ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xA), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], @@ -559,6 +594,7 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(2)), ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xB), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], @@ -573,6 +609,7 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(3)), ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], @@ -588,6 +625,7 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(3)), ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], @@ -595,5 +633,22 @@ mod tests { }, ) .unwrap(); + + // different relay-parent: different counters. + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(4)), + ManifestSummary { + claimed_relay_parent: Hash::repeat_byte(1), + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + ), + Ok(()) + ); } } From f5de6774a05d57fabe946f298ea2b567611d7bae Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 19 Oct 2022 21:22:58 -0500 Subject: [PATCH 070/220] add manifest import logic --- node/core/backing/src/lib.rs | 3 +- .../src/vstaging/grid.rs | 202 +++++++----------- .../src/vstaging/groups.rs | 17 ++ .../src/vstaging/mod.rs | 3 + 4 files changed, 105 insertions(+), 120 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index ef63178c8f26..7d8671cef236 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -387,7 +387,8 @@ struct AttestingData { /// How many votes we need to consider a candidate backed. /// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module and +/// statement distribution. fn minimum_votes(n_validators: usize) -> usize { std::cmp::min(2, n_validators) } diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index c0af00154da5..c2d9387090f0 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -29,7 +29,7 @@ use std::collections::{ use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; -use super::LOG_TARGET; +use super::{groups::Groups, LOG_TARGET}; /// Our local view of a subset of the grid topology organized around a specific validator /// group. @@ -169,91 +169,112 @@ pub struct CandidateAdvertisers { /// entire session. This stores only data on manifests sent within a bounded /// set of relay-parents. #[derive(Default)] -pub struct PerSessionGridTracker { +pub struct PerRelayParentGridTracker { received: HashMap, - // advertisers of currently unknown candidates. - unknown_advertisers: HashMap>, } -impl PerSessionGridTracker { - /// Collect all garbage, provided a function that informs us on the state - /// of the view. - pub fn collect_garbage(&mut self, view_contains: impl Fn(&Hash) -> bool) { - for (v, knowledge) in &mut self.received { - let pruned = knowledge.collect_garbage(&view_contains); - for pruned_candidate in pruned { - if let Entry::Occupied(mut e) = self.unknown_advertisers.entry(pruned_candidate) { - e.get_mut().remove(v); - if e.get().is_empty() { - e.remove(); - } - } - } - } - } - - /// Note that a candidate's full receipt has been acquired, with its actual group index, - /// relay-parent, and parent head data. +impl PerRelayParentGridTracker { + /// Attempt to import a manifest. /// - /// Returns validator indices which advertised the candidate both - /// correctly and incorrectly. - pub fn note_acquired_candidate( + /// This checks whether the peer is allowed to send us manifests + /// about this group at this relay-parent. This also does sanity + /// checks on the format of the manifest and the amount of votes + /// it contains. It has effects on the stored state only when successful. + pub fn import_manifest( &mut self, + session_topology: &SessionTopologyView, + groups: &Groups, candidate_hash: CandidateHash, - relay_parent: Hash, - group_index: GroupIndex, - parent_hash: Hash, - ) -> CandidateAdvertisers { - let mut advertisers = CandidateAdvertisers::default(); - for v in self.unknown_advertisers.remove(&candidate_hash).into_iter().flat_map(|x| x) { - let knowledge = match self.received.get(&v) { - None => continue, - Some(k) => k, - }; + seconding_limit: usize, + manifest: ManifestSummary, + sender: ValidatorIndex, + ) -> Result<(), ManifestImportError> { + let claimed_group_index = manifest.claimed_group_index; + + if session_topology + .group_views + .get(&manifest.claimed_group_index) + .map_or(false, |g| g.receiving.contains(&sender)) + { + return Err(ManifestImportError::Disallowed) + } - let m = match knowledge.received.get(&candidate_hash) { - None => continue, - Some(m) => m, + let (group_size, backing_threshold) = + match groups.get_size_and_backing_threshold(manifest.claimed_group_index) { + Some(x) => x, + None => return Err(ManifestImportError::Malformed), }; - if m.claimed_relay_parent != relay_parent || - m.claimed_group_index != group_index || - m.claimed_parent_hash != parent_hash - { - advertisers.incorrect.push(v); - } else { - advertisers.correct.push(v) - } + if manifest.seconded_in_group.len() != group_size || + manifest.validated_in_group.len() != group_size + { + return Err(ManifestImportError::Malformed) } - advertisers + if manifest.seconded_in_group.count_ones() == 0 { + return Err(ManifestImportError::Malformed) + } + + // ensure votes are sufficient to back. + let votes = manifest + .seconded_in_group + .iter() + .by_vals() + .zip(manifest.validated_in_group.iter().by_vals()) + .map(|(s, v)| s || v) + .count(); + + if votes < backing_threshold { + return Err(ManifestImportError::Malformed) + } + + self.received.entry(sender).or_default().import_received( + group_size, + seconding_limit, + candidate_hash, + manifest, + ) } } +/// A summary of a manifest being sent by a counterparty. #[derive(Clone)] -struct ManifestSummary { - claimed_relay_parent: Hash, - claimed_parent_hash: Hash, - claimed_group_index: GroupIndex, - seconded_in_group: BitVec, - validated_in_group: BitVec, +pub struct ManifestSummary { + /// The claimed parent head data hash of the candidate. + pub claimed_parent_hash: Hash, + /// The claimed group index assigned to the candidate. + pub claimed_group_index: GroupIndex, + /// A bitfield of validators in the group which seconded the + /// candidate. + pub seconded_in_group: BitVec, + /// A bitfield of validators in the group which validated the + /// candidate. + pub validated_in_group: BitVec, } +/// Errors in importing a manifest. #[derive(Debug, Clone)] -enum ManifestImportError { - // The manifest conflicts with another, previously sent manifest. +pub enum ManifestImportError { + /// The manifest conflicts with another, previously sent manifest. Conflicting, - // The manifest has overflowed beyond the limits of what the - // counterparty was allowed to send us. + /// The manifest has overflowed beyond the limits of what the + /// counterparty was allowed to send us. Overflow, + /// The manifest claims insufficient attestations to achieve the backing + /// threshold. + Insufficient, + /// The manifest is malformed. + Malformed, + /// The manifest was not allowed to be sent. + Disallowed, } /// The knowledge we are awawre of counterparties having of manifests. #[derive(Default)] struct CounterPartyManifestKnowledge { received: HashMap, - // (group, relay parent) -> seconded counts. - seconded_counts: HashMap<(GroupIndex, Hash), Vec>, + // group -> seconded counts. + seconded_counts: HashMap>, } impl CounterPartyManifestKnowledge { @@ -261,23 +282,6 @@ impl CounterPartyManifestKnowledge { CounterPartyManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } } - /// Collect all garbage: anything outside the provided view. - /// Returns a set of all pruned candidate hashes. - fn collect_garbage(&mut self, view_contains: impl Fn(&Hash) -> bool) -> Vec { - let mut v = Vec::new(); - self.seconded_counts.retain(|&(_, ref r_p), _| view_contains(r_p)); - self.received.retain(|c_hash, summary| { - if !view_contains(&summary.claimed_parent_hash) { - v.push(*c_hash); - false - } else { - true - } - }); - - v - } - /// Attempt to import a received manifest from a counterparty. /// /// This will reject manifests which are either duplicate, conflicting, @@ -305,10 +309,6 @@ impl CounterPartyManifestKnowledge { // filter out clearly conflicting data. { let prev = e.get(); - if prev.claimed_relay_parent != manifest_summary.claimed_relay_parent { - return Err(ManifestImportError::Conflicting) - } - if prev.claimed_group_index != manifest_summary.claimed_group_index { return Err(ManifestImportError::Conflicting) } @@ -334,7 +334,6 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, - manifest_summary.claimed_relay_parent, group_size, seconding_limit, &*fresh_seconded, @@ -354,7 +353,6 @@ impl CounterPartyManifestKnowledge { let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, - manifest_summary.claimed_relay_parent, group_size, seconding_limit, &*manifest_summary.seconded_in_group, @@ -374,9 +372,8 @@ impl CounterPartyManifestKnowledge { // updates validator-seconded records but only if the new statements // are OK. returns `true` if alright and `false` otherwise. fn updating_ensure_within_seconding_limit( - seconded_counts: &mut HashMap<(GroupIndex, Hash), Vec>, + seconded_counts: &mut HashMap>, group_index: GroupIndex, - relay_parent: Hash, group_size: usize, seconding_limit: usize, new_seconded: &BitSlice, @@ -387,9 +384,7 @@ fn updating_ensure_within_seconding_limit( // due to the check above, if this was non-existent this function will // always return `true`. - let counts = seconded_counts - .entry((group_index, relay_parent)) - .or_insert_with(|| vec![0; group_size]); + let counts = seconded_counts.entry(group_index).or_insert_with(|| vec![0; group_size]); for i in new_seconded.iter_ones() { if counts[i] == seconding_limit { @@ -508,7 +503,6 @@ mod tests { let mut knowledge = CounterPartyManifestKnowledge::new(); let expected_manifest_summary = ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(2), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], @@ -524,15 +518,6 @@ mod tests { ) .unwrap(); - // conflicting relay-parent - - let mut s = expected_manifest_summary.clone(); - s.claimed_relay_parent = Hash::repeat_byte(1); - assert_matches!( - knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), - Err(ManifestImportError::Conflicting) - ); - // conflicting group let mut s = expected_manifest_summary.clone(); @@ -579,7 +564,6 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(1)), ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xA), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], @@ -594,7 +578,6 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(2)), ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xB), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], @@ -609,7 +592,6 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(3)), ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], @@ -625,7 +607,6 @@ mod tests { 2, CandidateHash(Hash::repeat_byte(3)), ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(0), claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], @@ -633,22 +614,5 @@ mod tests { }, ) .unwrap(); - - // different relay-parent: different counters. - assert_matches!( - knowledge.import_received( - 3, - 2, - CandidateHash(Hash::repeat_byte(4)), - ManifestSummary { - claimed_relay_parent: Hash::repeat_byte(1), - claimed_parent_hash: Hash::repeat_byte(0xC), - claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - }, - ), - Ok(()) - ); } } diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index ebfb6421e429..5f3618cd7946 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -60,6 +60,14 @@ impl Groups { self.groups.get(group_index.0 as usize).map(|x| &x[..]) } + /// Get the backing group size and backing threshold. + pub fn get_size_and_backing_threshold( + &self, + group_index: GroupIndex, + ) -> Option<(usize, usize)> { + self.get(group_index).map(|g| (g.len(), minimum_votes(g.len()))) + } + /// Get the group index for a validator by index. pub fn by_validator_index(&self, validator_index: ValidatorIndex) -> Option { self.by_validator_index.get(&validator_index).map(|x| *x) @@ -70,3 +78,12 @@ impl Groups { self.by_discovery_key.get(&discovery_key).map(|x| *x) } } + +/// How many votes we need to consider a candidate backed. +/// +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module and +/// the backing subsystem. +// TODO [now]: extract to shared primitives. +fn minimum_votes(n_validators: usize) -> usize { + std::cmp::min(2, n_validators) +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 7111df9c54df..ce5677c96faf 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -82,6 +82,9 @@ const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid st struct PerRelayParentState { validator_state: HashMap, + // TODO [now]: this should be a global view which tracks + // advertisers' claimed relay-parent, group, para-head for unconfirmed + // candidates. that will be used to report them when confirming. candidates: HashMap, local_validator: Option, statement_store: StatementStore, From e969ed04bf684cb5b91a66d9cb370ab2b77046fa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 22 Oct 2022 16:34:20 -0500 Subject: [PATCH 071/220] tweak naming --- .../statement-distribution/src/vstaging/grid.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index c2d9387090f0..024056e3f173 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -170,7 +170,7 @@ pub struct CandidateAdvertisers { /// set of relay-parents. #[derive(Default)] pub struct PerRelayParentGridTracker { - received: HashMap, + received: HashMap, } impl PerRelayParentGridTracker { @@ -271,15 +271,15 @@ pub enum ManifestImportError { /// The knowledge we are awawre of counterparties having of manifests. #[derive(Default)] -struct CounterPartyManifestKnowledge { +struct ManifestKnowledge { received: HashMap, // group -> seconded counts. seconded_counts: HashMap>, } -impl CounterPartyManifestKnowledge { +impl ManifestKnowledge { fn new() -> Self { - CounterPartyManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } + ManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } } /// Attempt to import a received manifest from a counterparty. @@ -328,9 +328,6 @@ impl CounterPartyManifestKnowledge { let mut fresh_seconded = manifest_summary.seconded_in_group.clone(); fresh_seconded |= &prev.seconded_in_group; - let mut fresh_validated = manifest_summary.validated_in_group.clone(); - fresh_validated |= &prev.validated_in_group; - let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, manifest_summary.claimed_group_index, @@ -500,7 +497,7 @@ mod tests { #[test] fn knowledge_rejects_conflicting_manifest() { - let mut knowledge = CounterPartyManifestKnowledge::new(); + let mut knowledge = ManifestKnowledge::default(); let expected_manifest_summary = ManifestSummary { claimed_parent_hash: Hash::repeat_byte(2), @@ -557,7 +554,7 @@ mod tests { #[test] fn reject_overflowing_manifests() { - let mut knowledge = CounterPartyManifestKnowledge::new(); + let mut knowledge = ManifestKnowledge::default(); knowledge .import_received( 3, @@ -615,4 +612,6 @@ mod tests { ) .unwrap(); } + + // TODO [now]: tests for malformed or disallowed manifests. } From ab152c62ab196761462ba772d3c56c10622d13d2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 23 Oct 2022 18:17:17 -0500 Subject: [PATCH 072/220] more tests for manifest import --- .../src/vstaging/grid.rs | 275 +++++++++++++++++- 1 file changed, 263 insertions(+), 12 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 024056e3f173..4be90ae7b8d8 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -156,15 +156,6 @@ pub fn build_session_topology( view } -/// Advertisers of a candidate which became known. -#[derive(Default)] -pub struct CandidateAdvertisers { - /// Advertisers who got the details correct. - pub correct: Vec, - /// Advertisers who got the details incorrect. - pub incorrect: Vec, -} - /// A tracker of knowledge from authorities within the grid for the /// entire session. This stores only data on manifests sent within a bounded /// set of relay-parents. @@ -194,7 +185,7 @@ impl PerRelayParentGridTracker { if session_topology .group_views .get(&manifest.claimed_group_index) - .map_or(false, |g| g.receiving.contains(&sender)) + .map_or(true, |g| !g.receiving.contains(&sender)) { return Err(ManifestImportError::Disallowed) } @@ -221,7 +212,7 @@ impl PerRelayParentGridTracker { .iter() .by_vals() .zip(manifest.validated_in_group.iter().by_vals()) - .map(|(s, v)| s || v) + .filter(|&(s, v)| s || v) .count(); if votes < backing_threshold { @@ -613,5 +604,265 @@ mod tests { .unwrap(); } - // TODO [now]: tests for malformed or disallowed manifests. + #[test] + fn reject_disallowed_manifest() { + let mut tracker = PerRelayParentGridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![ + (GroupIndex(0), GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }) + ].into_iter().collect(), + }; + + let groups = Groups::new( + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + &[ + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + ], + ); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!( + groups.get_size_and_backing_threshold(GroupIndex(0)), + Some((3, 2)), + ); + + // Known group, disallowed receiving validator. + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + ValidatorIndex(1), + ), + Err(ManifestImportError::Disallowed) + ); + + // Unknown group + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(1), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Disallowed) + ); + } + + #[test] + fn reject_malformed_wrong_group_size() { + let mut tracker = PerRelayParentGridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![ + (GroupIndex(0), GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }) + ].into_iter().collect(), + }; + + let groups = Groups::new( + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + &[ + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + ], + ); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!( + groups.get_size_and_backing_threshold(GroupIndex(0)), + Some((3, 2)), + ); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1, 0], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + } + + #[test] + fn reject_malformed_no_seconders() { + let mut tracker = PerRelayParentGridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![ + (GroupIndex(0), GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }) + ].into_iter().collect(), + }; + + let groups = Groups::new( + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + &[ + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + ], + ); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!( + groups.get_size_and_backing_threshold(GroupIndex(0)), + Some((3, 2)), + ); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + } + + #[test] + fn reject_malformed_below_threshold() { + let mut tracker = PerRelayParentGridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![ + (GroupIndex(0), GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }) + ].into_iter().collect(), + }; + + let groups = Groups::new( + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + &[ + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + AuthorityDiscoveryPair::generate().0.public(), + ], + ); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!( + groups.get_size_and_backing_threshold(GroupIndex(0)), + Some((3, 2)), + ); + + // only one vote + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + + // seconding + validating still not enough to reach '2' threshold + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + }, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + + // finally good. + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + }, + ValidatorIndex(0), + ), + Ok(()) + ); + } } From 5762b9f502e4f4c323b6e7441773cfd6f552af78 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 23 Oct 2022 18:17:42 -0500 Subject: [PATCH 073/220] add comment --- node/network/statement-distribution/src/vstaging/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 4be90ae7b8d8..c67c85320819 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -212,7 +212,7 @@ impl PerRelayParentGridTracker { .iter() .by_vals() .zip(manifest.validated_in_group.iter().by_vals()) - .filter(|&(s, v)| s || v) + .filter(|&(s, v)| s || v) // no double-counting .count(); if votes < backing_threshold { From 8a6e8f517ae76d234861c30e08ee3c6a4188e609 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 24 Oct 2022 11:36:11 -0500 Subject: [PATCH 074/220] rework candidates into a view-wide tracker --- .../src/vstaging/candidate_entry.rs | 105 ------- .../src/vstaging/candidates.rs | 263 ++++++++++++++++++ .../src/vstaging/mod.rs | 86 +++--- 3 files changed, 299 insertions(+), 155 deletions(-) delete mode 100644 node/network/statement-distribution/src/vstaging/candidate_entry.rs create mode 100644 node/network/statement-distribution/src/vstaging/candidates.rs diff --git a/node/network/statement-distribution/src/vstaging/candidate_entry.rs b/node/network/statement-distribution/src/vstaging/candidate_entry.rs deleted file mode 100644 index e6813e1b60fc..000000000000 --- a/node/network/statement-distribution/src/vstaging/candidate_entry.rs +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A [`CandidateEntry`] tracks all info concerning a candidate block. -//! -//! This entity doesn't actually store the statements about the candidate, -//! just metadata of which validators have seconded or validated the -//! candidate, and the candidate and [`PersistedValidationData`] itself, -//! if that has already been fetched. -//! -//! Note that it is possible for validators for multiple groups to second -//! a candidate. Given that each candidate's para and relay-parent is -//! determined by the candidate hash, and the current scheduling mechanism -//! of the relay-chain only schedules one group per para per relay-parent, -//! this is certainly in error. Nevertheless, if we receive statements about -//! a candidate _prior_ to fetching the candidate itself, we do not have -//! confirmation of which group is assigned to the para in actuality. - -use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, GroupIndex, PersistedValidationData, -}; - -/// A tracker for all validators which have seconded or validated a particular -/// candidate. See module docs for more details. -pub struct CandidateEntry { - candidate_hash: CandidateHash, - state: CandidateState, -} - -impl CandidateEntry { - /// Create an unconfirmed [`CandidateEntry`] - pub fn unconfirmed(candidate_hash: CandidateHash) -> Self { - CandidateEntry { candidate_hash, state: CandidateState::Unconfirmed } - } - - /// Create a confirmed [`CandidateEntry`] - pub fn confirmed( - candidate_hash: CandidateHash, - receipt: CommittedCandidateReceipt, - persisted_validation_data: PersistedValidationData, - ) -> Self { - CandidateEntry { - candidate_hash, - state: CandidateState::Confirmed(receipt, persisted_validation_data), - } - } - - /// Supply the [`CommittedCandidateReceipt`] and [`PersistedValidationData`]. - /// This does not check that the receipt matches the candidate hash nor that the PVD - /// matches the commitment in the candidate's descriptor. - /// - /// No-op if already provided. - pub fn confirm(&mut self, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData) { - if let CandidateState::Confirmed(_, _) = self.state { - return - } - self.state = CandidateState::Confirmed(candidate, pvd); - } - - /// Whether the candidate is confirmed to actually exist. - pub fn is_confirmed(&self) -> bool { - match self.state { - CandidateState::Confirmed(_, _) => true, - CandidateState::Unconfirmed => false, - } - } - - /// The internals of a confirmed candidate. Exists iff confirmed. - pub fn confirmed_internals( - &self, - ) -> Option<(&CommittedCandidateReceipt, &PersistedValidationData)> { - match self.state { - CandidateState::Confirmed(ref c, ref pvd) => Some((c, pvd)), - CandidateState::Unconfirmed => None, - } - } - - /// The receipt of the candidate. Exists iff confirmed. - pub fn receipt(&self) -> Option<&CommittedCandidateReceipt> { - self.confirmed_internals().map(|(c, _)| c) - } - - /// The persisted-validation-data of the candidate. Exists iff confirmed. - pub fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { - self.confirmed_internals().map(|(_, p)| p) - } -} - -enum CandidateState { - Unconfirmed, - Confirmed(CommittedCandidateReceipt, PersistedValidationData), -} diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs new file mode 100644 index 000000000000..ea0ffa159f88 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -0,0 +1,263 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The [`Candidates`] store tracks information about advertised candidates +//! as well as which peers have advertised them. +//! +//! Due to the request-oriented nature of this protocol, we often learn +//! about candidates just as a hash, alongside claimed properties that the +//! receipt would commit to. However, it is only later on that we can +//! confirm those claimed properties. This store lets us keep track of the +//! all candidates which are currently 'relevant' after spam-protection, and +//! gives us the ability to detect mis-advertisements after the fact +//! and punish them accordingly. + +use polkadot_node_network_protocol::PeerId; +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, + PersistedValidationData, +}; + +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; + +/// A tracker for all known candidates in the view. +/// +/// See module docs for more info. +#[derive(Default)] +pub struct Candidates { + candidates: HashMap, +} + +impl Candidates { + /// Insert an advertisement. + /// + /// This should be invoked only after performing + /// spam protection and only for advertisements that + /// are valid within the current view. [`Candidates`] never prunes + /// candidate by peer ID, to avoid peers skirting misbehavior + /// reports by disconnecting intermittently. Therefore, this presumes + /// that spam protection limits the peers which can send advertisements + /// about unconfirmed candidates. + /// + /// It returns either `Ok(())` or an immediate error in the + /// case that the candidate is already known and reality conflicts + /// with the advertisement. + pub fn insert_unconfirmed( + &mut self, + peer: PeerId, + candidate_hash: CandidateHash, + claimed_relay_parent: Hash, + claimed_group_index: GroupIndex, + claimed_parent_hash: Option, + ) -> Result<(), BadAdvertisement> { + let entry = self.candidates.entry(candidate_hash).or_insert_with(|| { + CandidateState::Unconfirmed(UnconfirmedCandidate { claims: Vec::new() }) + }); + + match entry { + CandidateState::Confirmed(ref c) => { + if c.relay_parent() != claimed_relay_parent { + return Err(BadAdvertisement) + } + + if c.group_index() != claimed_group_index { + return Err(BadAdvertisement) + } + + if let Some(claimed_parent_hash) = claimed_parent_hash { + if c.parent_hash() != claimed_parent_hash { + return Err(BadAdvertisement) + } + } + }, + CandidateState::Unconfirmed(ref mut c) => { + c.add_claims( + peer, + CandidateClaims { + relay_parent: claimed_relay_parent, + group_index: claimed_group_index, + parent_hash: claimed_parent_hash, + }, + ); + }, + } + + Ok(()) + } + + /// Note that a candidate has been confirmed, + /// yielding lists of peers which advertised it + /// both correctly and incorrectly. + /// + /// This does no sanity-checking of input data. + pub fn confirm_candidate( + &mut self, + candidate_hash: CandidateHash, + candidate_receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + assigned_group: GroupIndex, + ) -> Option { + let parent_hash = persisted_validation_data.parent_head.hash(); + let relay_parent = candidate_receipt.descriptor().relay_parent; + + let prev_state = self.candidates.insert( + candidate_hash, + CandidateState::Confirmed(ConfirmedCandidate { + receipt: candidate_receipt, + persisted_validation_data, + assigned_group, + parent_hash, + }), + ); + + match prev_state { + None => None, + Some(CandidateState::Confirmed(_)) => None, + Some(CandidateState::Unconfirmed(u)) => Some({ + let mut reckoning = PostConfirmationReckoning { + correct: HashSet::new(), + incorrect: HashSet::new(), + }; + + for (peer, claims) in u.claims { + if claims.check(relay_parent, assigned_group, parent_hash) { + reckoning.correct.insert(peer); + } else { + reckoning.incorrect.insert(peer); + } + } + + reckoning + }), + } + } + + /// Whether a candidate is confirmed. + pub fn is_confirmed(&self, candidate_hash: &CandidateHash) -> bool { + match self.candidates.get(candidate_hash) { + Some(CandidateState::Confirmed(_)) => true, + _ => false, + } + } + + /// Get a reference to the candidate, if it's known and confirmed. + pub fn get_confirmed(&self, candidate_hash: &CandidateHash) -> Option<&ConfirmedCandidate> { + match self.candidates.get(candidate_hash) { + Some(CandidateState::Confirmed(ref c)) => Some(c), + _ => None, + } + } + + /// Prune all candidates according to the relay-parent predicate + /// provided. + pub fn collect_garbage(&mut self, relay_parent_live: impl Fn(Hash) -> bool) { + self.candidates.retain(|_, state| match state { + CandidateState::Confirmed(ref c) => relay_parent_live(c.relay_parent()), + CandidateState::Unconfirmed(ref mut c) => { + c.claims.retain(|c| relay_parent_live(c.1.relay_parent)); + + !c.claims.is_empty() + }, + }) + } +} + +/// This encapsulates the correct and incorrect advertisers +/// post-confirmation of a candidate. +pub struct PostConfirmationReckoning { + /// Peers which advertised correctly. + pub correct: HashSet, + /// Peers which advertised the candidate incorrectly. + pub incorrect: HashSet, +} + +/// A bad advertisement was recognized. +#[derive(Debug)] +pub struct BadAdvertisement; + +enum CandidateState { + Unconfirmed(UnconfirmedCandidate), + Confirmed(ConfirmedCandidate), +} + +/// Claims made alongside the advertisement of a candidate. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct CandidateClaims { + /// The relay-parent committed to by the candidate. + relay_parent: Hash, + /// The group index assigned to this candidate. + group_index: GroupIndex, + /// The hash of the parent head-data. This is optional, + /// as only some types of advertisements include this data. + parent_hash: Option, +} + +impl CandidateClaims { + fn check(&self, relay_parent: Hash, group_index: GroupIndex, parent_hash: Hash) -> bool { + self.relay_parent == relay_parent && + self.group_index == group_index && + self.parent_hash.map_or(true, |p| p == parent_hash) + } +} + +// An unconfirmed candidate may have have been advertised under +// multiple identifiers. We track here, on the basis of unique identifier, +// the peers which advertised each candidate in a specific way. +struct UnconfirmedCandidate { + claims: Vec<(PeerId, CandidateClaims)>, +} + +impl UnconfirmedCandidate { + fn add_claims(&mut self, peer: PeerId, claims: CandidateClaims) { + // This does no deduplication, but this is only called after + // spam prevention is already done. In practice we expect that + // each peer will be able to announce the same candidate about 1 time per live relay-parent, + // but in doing so it limits the amount of other candidates it can advertise. on balance, + // memory consumption is bounded in the same way. + self.claims.push((peer, claims)); + } +} + +/// A confirmed candidate. +pub struct ConfirmedCandidate { + receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + assigned_group: GroupIndex, + parent_hash: Hash, +} + +impl ConfirmedCandidate { + /// Get the relay-parent of the candidate. + pub fn relay_parent(&self) -> Hash { + self.receipt.descriptor().relay_parent + } + + /// Get the para-id of the candidate. + pub fn para_id(&self) -> ParaId { + self.receipt.descriptor().para_id + } + + fn group_index(&self) -> GroupIndex { + self.assigned_group + } + + fn parent_hash(&self) -> Hash { + self.parent_hash + } +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index ce5677c96faf..abe40b1241d9 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -48,13 +48,13 @@ use crate::{ error::{JfyiError, JfyiErrorResult}, LOG_TARGET, }; -use candidate_entry::CandidateEntry; +use candidates::{BadAdvertisement, Candidates}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; use groups::Groups; use requests::RequestManager; use statement_store::StatementStore; -mod candidate_entry; +mod candidates; mod cluster; mod grid; mod groups; @@ -82,10 +82,6 @@ const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid st struct PerRelayParentState { validator_state: HashMap, - // TODO [now]: this should be a global view which tracks - // advertisers' claimed relay-parent, group, para-head for unconfirmed - // candidates. that will be used to report them when confirming. - candidates: HashMap, local_validator: Option, statement_store: StatementStore, session: SessionIndex, @@ -118,6 +114,7 @@ pub(crate) struct State { /// /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, + candidates: Candidates, per_relay_parent: HashMap, per_session: HashMap, peers: HashMap, @@ -316,7 +313,6 @@ pub(crate) async fn handle_activated_leaf( *leaf, PerRelayParentState { validator_state: HashMap::new(), - candidates: HashMap::new(), local_validator, statement_store: StatementStore::new(&per_session.groups), session: session_index, @@ -422,13 +418,16 @@ pub(crate) async fn share_local_statement( // Two possibilities: either the statement is `Seconded` or we already // have the candidate. Sanity: check the para-id is valid. - let expected_para = match statement.payload() { - FullStatementWithPVD::Seconded(ref c, _) => Some(c.descriptor().para_id), - FullStatementWithPVD::Valid(hash) => per_relay_parent - .candidates - .get(&hash) - .and_then(|c| c.receipt()) - .map(|c| c.descriptor().para_id), + let expected = match statement.payload() { + FullStatementWithPVD::Seconded(ref c, _) => + Some((c.descriptor().para_id, c.descriptor().relay_parent)), + FullStatementWithPVD::Valid(hash) => + state.candidates.get_confirmed(&hash).map(|c| (c.para_id(), c.relay_parent())), + }; + + let (expected_para, expected_relay_parent) = match expected { + None => return Err(JfyiError::InvalidShare), + Some(x) => x, }; if local_index != statement.validator_index() { @@ -438,7 +437,7 @@ pub(crate) async fn share_local_statement( // TODO [now]: ensure seconded_count isn't too high. Needs our definition // of 'too high' i.e. max_depth, which isn't done yet. - if expected_para.is_none() || local_assignment != expected_para { + if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { return Err(JfyiError::InvalidShare) } @@ -447,28 +446,8 @@ pub(crate) async fn share_local_statement( let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); - let candidate_entry = match statement.payload() { - FullStatementWithPVD::Seconded(ref c, ref pvd) => { - let candidate_entry = - per_relay_parent.candidates.entry(candidate_hash).or_insert_with(|| { - CandidateEntry::confirmed(candidate_hash, c.clone(), pvd.clone()) - }); - - candidate_entry - }, - FullStatementWithPVD::Valid(_) => { - match per_relay_parent.candidates.get_mut(&candidate_hash) { - None => { - // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare) - }, - Some(ref c) if !c.is_confirmed() => { - // Can't share a 'Valid' statement about a candidate we don't know about! - return Err(JfyiError::InvalidShare) - }, - Some(c) => c, - } - }, + if let FullStatementWithPVD::Seconded(ref c, ref pvd) = statement.payload() { + // TODO [now]: insert confirmed candidate and report peers. }; match per_relay_parent @@ -831,7 +810,7 @@ async fn handle_incoming_statement( ); let statement = checked_statement.payload().clone(); - let sender_index = checked_statement.validator_index(); + let originator_index = checked_statement.validator_index(); let candidate_hash = *checked_statement.payload().candidate_hash(); let was_fresh = match per_relay_parent.statement_store.insert(&per_session.groups, checked_statement) { @@ -840,7 +819,7 @@ async fn handle_incoming_statement( gum::warn!( target: LOG_TARGET, ?relay_parent, - validator_index = ?sender_index, + validator_index = ?originator_index, "Error -Cluster accepted message from unknown validator." ); @@ -849,24 +828,31 @@ async fn handle_incoming_statement( Ok(known) => known, }; - let sender_group_index = per_relay_parent + let originator_group = per_relay_parent .statement_store - .validator_group_index(sender_index) + .validator_group_index(originator_index) .expect("validator confirmed to be known by statement_store.insert; qed"); // Insert an unconfirmed candidate entry if needed - let candidate_entry = per_relay_parent - .candidates - .entry(candidate_hash) - .or_insert_with(|| CandidateEntry::unconfirmed(candidate_hash)); - - // If the candidate is not confirmed, note that we should attempt - // to request it from the given peer. - if !candidate_entry.is_confirmed() { + let res = state.candidates.insert_unconfirmed( + peer.clone(), + candidate_hash, + relay_parent, + originator_group, + None, + ); + + if let Err(BadAdvertisement) = res { + // TODO [now]: punish the peer. + // TODO [now]: return? + } else if !state.candidates.is_confirmed(&candidate_hash) { + // If the candidate is not confirmed, note that we should attempt + // to request it from the given peer. let mut request_entry = state .request_manager - .get_or_insert(relay_parent, candidate_hash, sender_group_index); + .get_or_insert(relay_parent, candidate_hash, originator_group); + request_entry.get_mut().add_peer(peer); request_entry.get_mut().set_cluster_priority(); } From f395301979623b32733d537b7a1d88b5ff634192 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 24 Oct 2022 11:36:14 -0500 Subject: [PATCH 075/220] fmt --- .../src/vstaging/grid.rs | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index c67c85320819..1626f6a99261 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -608,12 +608,15 @@ mod tests { fn reject_disallowed_manifest() { let mut tracker = PerRelayParentGridTracker::default(); let session_topology = SessionTopologyView { - group_views: vec![ - (GroupIndex(0), GroupSubView { + group_views: vec![( + GroupIndex(0), + GroupSubView { sending: HashSet::new(), receiving: vec![ValidatorIndex(0)].into_iter().collect(), - }) - ].into_iter().collect(), + }, + )] + .into_iter() + .collect(), }; let groups = Groups::new( @@ -627,10 +630,7 @@ mod tests { let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - assert_eq!( - groups.get_size_and_backing_threshold(GroupIndex(0)), - Some((3, 2)), - ); + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); // Known group, disallowed receiving validator. @@ -675,12 +675,15 @@ mod tests { fn reject_malformed_wrong_group_size() { let mut tracker = PerRelayParentGridTracker::default(); let session_topology = SessionTopologyView { - group_views: vec![ - (GroupIndex(0), GroupSubView { + group_views: vec![( + GroupIndex(0), + GroupSubView { sending: HashSet::new(), receiving: vec![ValidatorIndex(0)].into_iter().collect(), - }) - ].into_iter().collect(), + }, + )] + .into_iter() + .collect(), }; let groups = Groups::new( @@ -694,10 +697,7 @@ mod tests { let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - assert_eq!( - groups.get_size_and_backing_threshold(GroupIndex(0)), - Some((3, 2)), - ); + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); assert_matches!( tracker.import_manifest( @@ -738,12 +738,15 @@ mod tests { fn reject_malformed_no_seconders() { let mut tracker = PerRelayParentGridTracker::default(); let session_topology = SessionTopologyView { - group_views: vec![ - (GroupIndex(0), GroupSubView { + group_views: vec![( + GroupIndex(0), + GroupSubView { sending: HashSet::new(), receiving: vec![ValidatorIndex(0)].into_iter().collect(), - }) - ].into_iter().collect(), + }, + )] + .into_iter() + .collect(), }; let groups = Groups::new( @@ -757,10 +760,7 @@ mod tests { let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - assert_eq!( - groups.get_size_and_backing_threshold(GroupIndex(0)), - Some((3, 2)), - ); + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); assert_matches!( tracker.import_manifest( @@ -784,12 +784,15 @@ mod tests { fn reject_malformed_below_threshold() { let mut tracker = PerRelayParentGridTracker::default(); let session_topology = SessionTopologyView { - group_views: vec![ - (GroupIndex(0), GroupSubView { + group_views: vec![( + GroupIndex(0), + GroupSubView { sending: HashSet::new(), receiving: vec![ValidatorIndex(0)].into_iter().collect(), - }) - ].into_iter().collect(), + }, + )] + .into_iter() + .collect(), }; let groups = Groups::new( @@ -803,10 +806,7 @@ mod tests { let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - assert_eq!( - groups.get_size_and_backing_threshold(GroupIndex(0)), - Some((3, 2)), - ); + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); // only one vote From cd5fac36dfdcf8a9faf62915e125a896e64f2fd3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 24 Oct 2022 23:10:21 -0500 Subject: [PATCH 076/220] start writing boilerplate for grid sending --- .../src/vstaging/grid.rs | 362 +++++++++++++++++- .../src/vstaging/statement_store.rs | 1 + 2 files changed, 357 insertions(+), 6 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 1626f6a99261..2eff856896a7 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -156,12 +156,19 @@ pub fn build_session_topology( view } +/// Actions that can be taken once affirming that a candidate is backed. +pub enum PostBackingAction { + Acknowledge, + Advertise, +} + /// A tracker of knowledge from authorities within the grid for the /// entire session. This stores only data on manifests sent within a bounded /// set of relay-parents. #[derive(Default)] pub struct PerRelayParentGridTracker { - received: HashMap, + received: HashMap, + known: HashMap, } impl PerRelayParentGridTracker { @@ -219,12 +226,109 @@ impl PerRelayParentGridTracker { return Err(ManifestImportError::Malformed) } + // TODO [now]: disallow manifests when we've advertised to them + // or have already acknowledged their advertisement. can be racy, + // misbehavior should be minor. + self.received.entry(sender).or_default().import_received( group_size, seconding_limit, candidate_hash, manifest, ) + + // TODO [now]: update remote mutual knowledge for candidate. + } + + /// Add a new backed candidate to the tracker. This yields + /// an iterator of validators which we should either advertise to + /// or signal that we know the candidate. + pub fn add_backed_candidate( + &mut self, + session_topology: &SessionTopologyView, + candidate_hash: CandidateHash, + group_index: GroupIndex, + group_size: usize, + ) -> Vec<(ValidatorIndex, PostBackingAction)> { + let c = self.known.entry(candidate_hash).or_insert_with(|| KnownBackedCandidate { + confirmed_backed: false, + group_index, + mutual_knowledge: HashMap::new(), + }); + + c.note_confirmed_backed(); + + let mut actions = Vec::new(); + let group_topology = match session_topology.group_views.get(&group_index) { + None => return actions, + Some(g) => g, + }; + + // advertise onwards ad accept received advertisements + + for &v in &group_topology.sending { + if c.should_advertise(v) { + actions.push((v, PostBackingAction::Advertise)) + } + } + + for &v in &group_topology.receiving { + if c.can_local_acknowledge(v) { + actions.push((v, PostBackingAction::Acknowledge)) + } + } + + actions + } + + /// Note that a backed candidate has been advertised to a + /// given validator. + pub fn note_advertised_to( + &mut self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) { + unimplemented!() + } + + /// Provided a validator index, gives an iterator of candidate + /// hashes which may be advertised to the validator and have not yet + /// been. + pub fn advertisements( + &self, + session_topology: &SessionTopologyView, + validator_index: ValidatorIndex, + ) -> Vec { // TODO [now]: impl iterator + unimplemented!() + } + + /// Whether the given validator is allowed to acknowledge an advertisement + /// via request. + pub fn can_remote_acknowledge( + &self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + unimplemented!() + } + + /// note that a validator peer we advertised a backed candidate to + /// now knows the candidate without a doubt. + pub fn note_known( + &mut self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) { + unimplemented!() + } + + /// Whether a validator peer knows the underlying candidate. + pub fn known_by( + &self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) { + } } @@ -262,15 +366,15 @@ pub enum ManifestImportError { /// The knowledge we are awawre of counterparties having of manifests. #[derive(Default)] -struct ManifestKnowledge { +struct ReceivedManifests { received: HashMap, // group -> seconded counts. seconded_counts: HashMap>, } -impl ManifestKnowledge { +impl ReceivedManifests { fn new() -> Self { - ManifestKnowledge { received: HashMap::new(), seconded_counts: HashMap::new() } + ReceivedManifests { received: HashMap::new(), seconded_counts: HashMap::new() } } /// Attempt to import a received manifest from a counterparty. @@ -387,6 +491,252 @@ fn updating_ensure_within_seconding_limit( true } +// The direction of advertisement about the candidate. +enum AdvertisementDirection { + // We advertised to the remote. + Outgoing, + // They advertised to us. + Incoming, +} + +impl AdvertisementDirection { + fn is_outgoing(&self) -> bool { + match *self { + AdvertisementDirection::Outgoing => true, + AdvertisementDirection::Incoming => false, + } + } + + fn is_incoming(&self) -> bool { + match *self { + AdvertisementDirection::Outgoing => false, + AdvertisementDirection::Incoming => true, + } + } +} + +#[derive(Clone, Copy)] +enum StatementKind { + Seconded, + Valid, +} + +/// Bitfields indicating the statements that are known or undesired +/// about a candidate. +pub struct StatementFilter { + /// Seconded statements. '1' is known or undesired. + pub seconded_in_group: BitVec, + /// Valid statements. '1' is known or undesired. + pub validated_in_group: BitVec, +} + +impl StatementFilter { + /// Create a new filter with the given group size. + pub fn new(group_size: usize) -> Self { + StatementFilter { + seconded_in_group: BitVec::repeat(false, group_size), + validated_in_group: BitVec::repeat(false, group_size), + } + } + + fn contains(&self, index: usize, statement_kind: StatementKind) -> bool { + match statement_kind { + StatementKind::Seconded => self.seconded_in_group.get(index).map_or(false, |x| *x), + StatementKind::Valid => self.validated_in_group.get(index).map_or(false, |x| *x), + } + } + + fn set(&mut self, index: usize, statement_kind: StatementKind) { + let b = match statement_kind { + StatementKind::Seconded => self.seconded_in_group.get_mut(index), + StatementKind::Valid => self.validated_in_group.get_mut(index), + }; + + if let Some(mut b) = b { + *b = true; + } + } +} + +struct MutualKnowledge { + direction: AdvertisementDirection, + // semantically, meaning varies according to advertisement direction + // indicates that the receiver of the advertisement requested or acknowledged + // the candidate. + accepted: bool, + remote_knowledge: StatementFilter, + // specifically, what we have indicated to them - may be subset of what + // we actually know. + local_knowledge: StatementFilter, +} + +// A utility struct for keeping track of metadata about candidates +// we have confirmed as having been backed. +struct KnownBackedCandidate { + confirmed_backed: bool, + group_index: GroupIndex, + mutual_knowledge: HashMap, +} + +impl KnownBackedCandidate { + fn note_confirmed_backed(&mut self) { + self.confirmed_backed = true; + } + + fn known_by(&self, validator: ValidatorIndex) -> bool { + match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => match k.direction { + AdvertisementDirection::Incoming => true, + AdvertisementDirection::Outgoing => k.accepted, + } + } + } + + // should only be invoked for validators which are known + // to be valid recipients of advertisement. + fn should_advertise(&self, validator: ValidatorIndex) -> bool { + self.confirmed_backed && !self.mutual_knowledge.contains_key(&validator) + } + + // is a no-op when either they or we have advertised. + fn note_advertised_to( + &mut self, + validator: ValidatorIndex, + remote_knowledge: StatementFilter, + ) { + self.mutual_knowledge + .entry(validator) + .or_insert_with(|| MutualKnowledge { + direction: AdvertisementDirection::Outgoing, + accepted: false, + local_knowledge: StatementFilter::new( + remote_knowledge.validated_in_group.len(), + ), + remote_knowledge, + }); + } + + // whether we are allowed to acknowledge/request a remote validator's + // advertisement. + fn can_local_acknowledge( + &self, + validator: ValidatorIndex, + ) -> bool { + match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => !k.accepted || k.direction.is_incoming(), + } + } + + // whether a remote is allowed to acknowledge/request our local + // advertisement. + fn can_remote_acknowledge( + &self, + validator: ValidatorIndex, + ) -> bool { + match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => !k.accepted || k.direction.is_outgoing(), + } + } + + fn can_send_direct_statement_to( + &self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + self.confirmed_backed && match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => k.accepted && !k.remote_knowledge.contains( + statement_index_in_group, + statement_kind, + ), + } + } + + fn can_receive_direct_statement_from( + &self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + self.confirmed_backed && match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => k.accepted && !k.local_knowledge.contains( + statement_index_in_group, + statement_kind, + ), + } + } + + fn note_sent_direct_statement_to( + &mut self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) { + if let Some(k) = self.mutual_knowledge.get_mut(&validator) { + if k.accepted { + k.remote_knowledge.set( + statement_index_in_group, + statement_kind, + ) + } + } + } + + fn note_received_direct_statement_from( + &mut self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) { + if let Some(k) = self.mutual_knowledge.get_mut(&validator) { + if k.accepted { + k.local_knowledge.set( + statement_index_in_group, + statement_kind, + ); + + k.remote_knowledge.set( + statement_index_in_group, + statement_kind, + ) + } + } + } + + // no-op if we haven't sent an outgoing advertisement. + fn note_remote_acknowledged( + &mut self, + validator: ValidatorIndex, + remote_knowledge: StatementFilter, + ) { + if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { + if let AdvertisementDirection::Outgoing = k.direction { + k.accepted = true; + k.remote_knowledge = remote_knowledge; + } + } + } + + // no-op if we haven't received an incoming advertisement. + fn note_local_acknowledged( + &mut self, + validator: ValidatorIndex, + local_knowledge: StatementFilter, + ) { + if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { + if let AdvertisementDirection::Incoming = k.direction { + k.accepted = true; + k.local_knowledge = local_knowledge + } + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -488,7 +838,7 @@ mod tests { #[test] fn knowledge_rejects_conflicting_manifest() { - let mut knowledge = ManifestKnowledge::default(); + let mut knowledge = ReceivedManifests::default(); let expected_manifest_summary = ManifestSummary { claimed_parent_hash: Hash::repeat_byte(2), @@ -545,7 +895,7 @@ mod tests { #[test] fn reject_overflowing_manifests() { - let mut knowledge = ManifestKnowledge::default(); + let mut knowledge = ReceivedManifests::default(); knowledge .import_received( 3, diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 85b5af442bb6..d2d980480b8a 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -75,6 +75,7 @@ impl StatementStore { /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. + // TODO [now]: perhaps reject over-seconded statements. pub fn insert( &mut self, groups: &Groups, From 4c0343f5bd39cd0e615fee1e4f2fbd0b8f4cb126 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 24 Oct 2022 23:10:35 -0500 Subject: [PATCH 077/220] fmt --- .../src/vstaging/grid.rs | 94 ++++++------------- 1 file changed, 31 insertions(+), 63 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 2eff856896a7..1e65f20c24fb 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -298,7 +298,8 @@ impl PerRelayParentGridTracker { &self, session_topology: &SessionTopologyView, validator_index: ValidatorIndex, - ) -> Vec { // TODO [now]: impl iterator + ) -> Vec { + // TODO [now]: impl iterator unimplemented!() } @@ -314,22 +315,12 @@ impl PerRelayParentGridTracker { /// note that a validator peer we advertised a backed candidate to /// now knows the candidate without a doubt. - pub fn note_known( - &mut self, - validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - ) { + pub fn note_known(&mut self, validator_index: ValidatorIndex, candidate_hash: CandidateHash) { unimplemented!() } /// Whether a validator peer knows the underlying candidate. - pub fn known_by( - &self, - validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - ) { - - } + pub fn known_by(&self, validator_index: ValidatorIndex, candidate_hash: CandidateHash) {} } /// A summary of a manifest being sent by a counterparty. @@ -589,7 +580,7 @@ impl KnownBackedCandidate { Some(k) => match k.direction { AdvertisementDirection::Incoming => true, AdvertisementDirection::Outgoing => k.accepted, - } + }, } } @@ -600,29 +591,18 @@ impl KnownBackedCandidate { } // is a no-op when either they or we have advertised. - fn note_advertised_to( - &mut self, - validator: ValidatorIndex, - remote_knowledge: StatementFilter, - ) { - self.mutual_knowledge - .entry(validator) - .or_insert_with(|| MutualKnowledge { - direction: AdvertisementDirection::Outgoing, - accepted: false, - local_knowledge: StatementFilter::new( - remote_knowledge.validated_in_group.len(), - ), - remote_knowledge, - }); + fn note_advertised_to(&mut self, validator: ValidatorIndex, remote_knowledge: StatementFilter) { + self.mutual_knowledge.entry(validator).or_insert_with(|| MutualKnowledge { + direction: AdvertisementDirection::Outgoing, + accepted: false, + local_knowledge: StatementFilter::new(remote_knowledge.validated_in_group.len()), + remote_knowledge, + }); } // whether we are allowed to acknowledge/request a remote validator's // advertisement. - fn can_local_acknowledge( - &self, - validator: ValidatorIndex, - ) -> bool { + fn can_local_acknowledge(&self, validator: ValidatorIndex) -> bool { match self.mutual_knowledge.get(&validator) { None => false, Some(k) => !k.accepted || k.direction.is_incoming(), @@ -631,10 +611,7 @@ impl KnownBackedCandidate { // whether a remote is allowed to acknowledge/request our local // advertisement. - fn can_remote_acknowledge( - &self, - validator: ValidatorIndex, - ) -> bool { + fn can_remote_acknowledge(&self, validator: ValidatorIndex) -> bool { match self.mutual_knowledge.get(&validator) { None => false, Some(k) => !k.accepted || k.direction.is_outgoing(), @@ -647,13 +624,13 @@ impl KnownBackedCandidate { statement_index_in_group: usize, statement_kind: StatementKind, ) -> bool { - self.confirmed_backed && match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => k.accepted && !k.remote_knowledge.contains( - statement_index_in_group, - statement_kind, - ), - } + self.confirmed_backed && + match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => + k.accepted && + !k.remote_knowledge.contains(statement_index_in_group, statement_kind), + } } fn can_receive_direct_statement_from( @@ -662,13 +639,13 @@ impl KnownBackedCandidate { statement_index_in_group: usize, statement_kind: StatementKind, ) -> bool { - self.confirmed_backed && match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => k.accepted && !k.local_knowledge.contains( - statement_index_in_group, - statement_kind, - ), - } + self.confirmed_backed && + match self.mutual_knowledge.get(&validator) { + None => false, + Some(k) => + k.accepted && + !k.local_knowledge.contains(statement_index_in_group, statement_kind), + } } fn note_sent_direct_statement_to( @@ -679,10 +656,7 @@ impl KnownBackedCandidate { ) { if let Some(k) = self.mutual_knowledge.get_mut(&validator) { if k.accepted { - k.remote_knowledge.set( - statement_index_in_group, - statement_kind, - ) + k.remote_knowledge.set(statement_index_in_group, statement_kind) } } } @@ -695,15 +669,9 @@ impl KnownBackedCandidate { ) { if let Some(k) = self.mutual_knowledge.get_mut(&validator) { if k.accepted { - k.local_knowledge.set( - statement_index_in_group, - statement_kind, - ); + k.local_knowledge.set(statement_index_in_group, statement_kind); - k.remote_knowledge.set( - statement_index_in_group, - statement_kind, - ) + k.remote_knowledge.set(statement_index_in_group, statement_kind) } } } From 421dd9e2bbd9c291cdb8b41174fe0af44bfc0574 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 26 Oct 2022 16:27:03 -0500 Subject: [PATCH 078/220] some more group boilerplate --- .../src/vstaging/grid.rs | 320 ++++++++++-------- 1 file changed, 179 insertions(+), 141 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 1e65f20c24fb..a2a49f4774c2 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -168,11 +168,12 @@ pub enum PostBackingAction { #[derive(Default)] pub struct PerRelayParentGridTracker { received: HashMap, - known: HashMap, + confirmed_backed: HashMap, + unconfirmed: HashMap>, } impl PerRelayParentGridTracker { - /// Attempt to import a manifest. + /// Attempt to import a manifest advertised by a remote peer. /// /// This checks whether the peer is allowed to send us manifests /// about this group at this relay-parent. This also does sanity @@ -226,18 +227,29 @@ impl PerRelayParentGridTracker { return Err(ManifestImportError::Malformed) } - // TODO [now]: disallow manifests when we've advertised to them - // or have already acknowledged their advertisement. can be racy, - // misbehavior should be minor. + let remote_knowledge = StatementFilter { + seconded_in_group: manifest.seconded_in_group.clone(), + validated_in_group: manifest.validated_in_group.clone(), + }; self.received.entry(sender).or_default().import_received( group_size, seconding_limit, candidate_hash, manifest, - ) + )?; + + if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { + // TODO [now]: send statements they need? + confirmed.note_remote_advertised(sender, remote_knowledge); + } else { + // received prevents conflicting manifests so this is max 1 per validator. + self.unconfirmed.entry(candidate_hash) + .or_default() + .push((sender, claimed_group_index)) + } - // TODO [now]: update remote mutual knowledge for candidate. + Ok(()) } /// Add a new backed candidate to the tracker. This yields @@ -250,15 +262,32 @@ impl PerRelayParentGridTracker { group_index: GroupIndex, group_size: usize, ) -> Vec<(ValidatorIndex, PostBackingAction)> { - let c = self.known.entry(candidate_hash).or_insert_with(|| KnownBackedCandidate { - confirmed_backed: false, - group_index, - mutual_knowledge: HashMap::new(), - }); + let mut actions = Vec::new(); + let c = match self.confirmed_backed.entry(candidate_hash) { + Entry::Occupied(_) => return actions, + Entry::Vacant(v) => v.insert(KnownBackedCandidate { + group_index, + mutual_knowledge: HashMap::new(), + }), + }; - c.note_confirmed_backed(); + // Populate the entry with previously unconfirmed manifests. + for (v, claimed_group_index) in self.unconfirmed.remove(&candidate_hash) + .into_iter() + .flat_map(|x| x) + { + if claimed_group_index != group_index { + // This is misbehavior, but is handled more comprehensively elsewhere + continue; + } + + let statement_filter = self.received.get(&v) + .and_then(|r| r.candidate_statement_filter(&candidate_hash)) + .expect("unconfirmed is only populated by validators who have sent manifest; qed"); + + c.note_remote_advertised(v, statement_filter); + } - let mut actions = Vec::new(); let group_topology = match session_topology.group_views.get(&group_index) { None => return actions, Some(g) => g, @@ -287,20 +316,32 @@ impl PerRelayParentGridTracker { &mut self, validator_index: ValidatorIndex, candidate_hash: CandidateHash, + local_knowledge: StatementFilter, ) { - unimplemented!() + if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { + c.note_advertised_to(validator_index, local_knowledge); + } } /// Provided a validator index, gives an iterator of candidate /// hashes which may be advertised to the validator and have not yet /// been. - pub fn advertisements( - &self, + pub fn advertisements<'a>( + &'a self, session_topology: &SessionTopologyView, validator_index: ValidatorIndex, - ) -> Vec { - // TODO [now]: impl iterator - unimplemented!() + ) -> impl IntoIterator + 'a { + let allowed_groups: HashSet<_> = session_topology + .group_views + .iter() + .filter(|(_, x)| x.sending.contains(&validator_index)) + .map(|(g, x)| *g) + .collect(); + + self.confirmed_backed.iter() + .filter(move |(_, c)| allowed_groups.contains(c.group_index())) + .filter(move |(_, c)| c.should_advertise(validator_index)) + .map(|(c_h, _)| *c_h) } /// Whether the given validator is allowed to acknowledge an advertisement @@ -310,17 +351,44 @@ impl PerRelayParentGridTracker { validator_index: ValidatorIndex, candidate_hash: CandidateHash, ) -> bool { - unimplemented!() + self.confirmed_backed.get(&candidate_hash) + .map_or(false, |c| c.can_remote_acknowledge(validator_index)) + } + + /// Note that a validator peer we advertised a backed candidate to + /// has acknowledged the candidate directly or by requesting it. + pub fn note_remote_acknowledged( + &mut self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + remote_knowledge: StatementFilter, + ) { + if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { + c.note_remote_acknowledged(validator_index, remote_knowledge); + } } - /// note that a validator peer we advertised a backed candidate to - /// now knows the candidate without a doubt. - pub fn note_known(&mut self, validator_index: ValidatorIndex, candidate_hash: CandidateHash) { - unimplemented!() + /// Whether we can acknowledge a remote's advertisement. + pub fn can_local_acknowledge( + &self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + self.confirmed_backed.get(&candidate_hash) + .map_or(false, |c| c.can_local_acknowledge(validator_index)) } - /// Whether a validator peer knows the underlying candidate. - pub fn known_by(&self, validator_index: ValidatorIndex, candidate_hash: CandidateHash) {} + /// Indicate that we've acknowledged a remote's advertisement. + pub fn note_local_acknowledged( + &mut self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + local_knowledge: StatementFilter, + ) { + if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { + c.note_local_acknowledged(validator_index, local_knowledge); + } + } } /// A summary of a manifest being sent by a counterparty. @@ -368,6 +436,15 @@ impl ReceivedManifests { ReceivedManifests { received: HashMap::new(), seconded_counts: HashMap::new() } } + fn candidate_statement_filter(&self, candidate_hash: &CandidateHash) + -> Option + { + self.received.get(candidate_hash).map(|m| StatementFilter { + seconded_in_group: m.seconded_in_group.clone(), + validated_in_group: m.validated_in_group.clone(), + }) + } + /// Attempt to import a received manifest from a counterparty. /// /// This will reject manifests which are either duplicate, conflicting, @@ -482,30 +559,6 @@ fn updating_ensure_within_seconding_limit( true } -// The direction of advertisement about the candidate. -enum AdvertisementDirection { - // We advertised to the remote. - Outgoing, - // They advertised to us. - Incoming, -} - -impl AdvertisementDirection { - fn is_outgoing(&self) -> bool { - match *self { - AdvertisementDirection::Outgoing => true, - AdvertisementDirection::Incoming => false, - } - } - - fn is_incoming(&self) -> bool { - match *self { - AdvertisementDirection::Outgoing => false, - AdvertisementDirection::Incoming => true, - } - } -} - #[derive(Clone, Copy)] enum StatementKind { Seconded, @@ -550,71 +603,100 @@ impl StatementFilter { } struct MutualKnowledge { - direction: AdvertisementDirection, - // semantically, meaning varies according to advertisement direction - // indicates that the receiver of the advertisement requested or acknowledged - // the candidate. - accepted: bool, - remote_knowledge: StatementFilter, - // specifically, what we have indicated to them - may be subset of what - // we actually know. - local_knowledge: StatementFilter, + // Knowledge they have about the candidate. `Some` only if they + // have advertised or requested the candidate. + remote_knowledge: Option, + // Knowledge we have indicated to them about the candidate. + // `Some` only if we have advertised or requested the candidate + // from them. + local_knowledge: Option, } // A utility struct for keeping track of metadata about candidates // we have confirmed as having been backed. struct KnownBackedCandidate { - confirmed_backed: bool, group_index: GroupIndex, mutual_knowledge: HashMap, } impl KnownBackedCandidate { - fn note_confirmed_backed(&mut self) { - self.confirmed_backed = true; - } - fn known_by(&self, validator: ValidatorIndex) -> bool { match self.mutual_knowledge.get(&validator) { None => false, - Some(k) => match k.direction { - AdvertisementDirection::Incoming => true, - AdvertisementDirection::Outgoing => k.accepted, - }, + Some(k) => k.remote_knowledge.is_some(), } } + fn group_index(&self) -> &GroupIndex { + &self.group_index + } + // should only be invoked for validators which are known // to be valid recipients of advertisement. fn should_advertise(&self, validator: ValidatorIndex) -> bool { - self.confirmed_backed && !self.mutual_knowledge.contains_key(&validator) + self.mutual_knowledge.get(&validator) + .map_or(true, |k| k.local_knowledge.is_none()) } // is a no-op when either they or we have advertised. - fn note_advertised_to(&mut self, validator: ValidatorIndex, remote_knowledge: StatementFilter) { - self.mutual_knowledge.entry(validator).or_insert_with(|| MutualKnowledge { - direction: AdvertisementDirection::Outgoing, - accepted: false, - local_knowledge: StatementFilter::new(remote_knowledge.validated_in_group.len()), - remote_knowledge, + fn note_advertised_to(&mut self, validator: ValidatorIndex, local_knowledge: StatementFilter) { + let k = self.mutual_knowledge.entry(validator) + .or_insert_with(|| MutualKnowledge { + remote_knowledge: None, + local_knowledge: None, + }); + + k.local_knowledge = Some(local_knowledge); + } + + fn note_remote_advertised( + &mut self, + validator: ValidatorIndex, + remote_knowledge: StatementFilter, + ) { + let k = self.mutual_knowledge.entry(validator).or_insert_with(|| MutualKnowledge { + remote_knowledge: None, + local_knowledge: None, }); + + k.remote_knowledge = Some(remote_knowledge); } - // whether we are allowed to acknowledge/request a remote validator's - // advertisement. + // whether we are allowed to acknowledge or request a candidate from a remote validator. fn can_local_acknowledge(&self, validator: ValidatorIndex) -> bool { match self.mutual_knowledge.get(&validator) { None => false, - Some(k) => !k.accepted || k.direction.is_incoming(), + Some(k) => k.remote_knowledge.is_some() && k.local_knowledge.is_none(), } } - // whether a remote is allowed to acknowledge/request our local - // advertisement. + // whether a remote is allowed to acknowledge or request a candidate from us fn can_remote_acknowledge(&self, validator: ValidatorIndex) -> bool { match self.mutual_knowledge.get(&validator) { None => false, - Some(k) => !k.accepted || k.direction.is_outgoing(), + Some(k) => k.remote_knowledge.is_none() && k.local_knowledge.is_some(), + } + } + + fn note_local_acknowledged( + &mut self, + validator: ValidatorIndex, + local_knowledge: StatementFilter, + ) { + if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { + k.local_knowledge = Some(local_knowledge); + // TODO [now]: return something for sending statements they need. + } + } + + fn note_remote_acknowledged( + &mut self, + validator: ValidatorIndex, + remote_knowledge: StatementFilter, + ) { + if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { + k.remote_knowledge = Some(remote_knowledge); + // TODO [now]: return something for sending statements they need. } } @@ -624,13 +706,12 @@ impl KnownBackedCandidate { statement_index_in_group: usize, statement_kind: StatementKind, ) -> bool { - self.confirmed_backed && - match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => - k.accepted && - !k.remote_knowledge.contains(statement_index_in_group, statement_kind), - } + match self.mutual_knowledge.get(&validator) { + Some(MutualKnowledge { remote_knowledge: Some(r), local_knowledge: Some(_) }) => { + !r.contains(statement_index_in_group, statement_kind) + }, + _ => false, + } } fn can_receive_direct_statement_from( @@ -639,67 +720,24 @@ impl KnownBackedCandidate { statement_index_in_group: usize, statement_kind: StatementKind, ) -> bool { - self.confirmed_backed && - match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => - k.accepted && - !k.local_knowledge.contains(statement_index_in_group, statement_kind), - } - } - - fn note_sent_direct_statement_to( - &mut self, - validator: ValidatorIndex, - statement_index_in_group: usize, - statement_kind: StatementKind, - ) { - if let Some(k) = self.mutual_knowledge.get_mut(&validator) { - if k.accepted { - k.remote_knowledge.set(statement_index_in_group, statement_kind) - } + match self.mutual_knowledge.get(&validator) { + Some(MutualKnowledge { remote_knowledge: Some(_), local_knowledge: Some(l) }) => { + !l.contains(statement_index_in_group, statement_kind) + }, + _ => false, } } - fn note_received_direct_statement_from( + fn note_sent_or_received_direct_statement( &mut self, validator: ValidatorIndex, statement_index_in_group: usize, statement_kind: StatementKind, ) { if let Some(k) = self.mutual_knowledge.get_mut(&validator) { - if k.accepted { - k.local_knowledge.set(statement_index_in_group, statement_kind); - - k.remote_knowledge.set(statement_index_in_group, statement_kind) - } - } - } - - // no-op if we haven't sent an outgoing advertisement. - fn note_remote_acknowledged( - &mut self, - validator: ValidatorIndex, - remote_knowledge: StatementFilter, - ) { - if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { - if let AdvertisementDirection::Outgoing = k.direction { - k.accepted = true; - k.remote_knowledge = remote_knowledge; - } - } - } - - // no-op if we haven't received an incoming advertisement. - fn note_local_acknowledged( - &mut self, - validator: ValidatorIndex, - local_knowledge: StatementFilter, - ) { - if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { - if let AdvertisementDirection::Incoming = k.direction { - k.accepted = true; - k.local_knowledge = local_knowledge + if let (Some(r), Some(l)) = (k.remote_knowledge.as_mut(), k.local_knowledge.as_mut()) { + r.set(statement_index_in_group, statement_kind); + l.set(statement_index_in_group, statement_kind); } } } From db59d335c9ba2063cbc72d966b9cefdc85535793 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 26 Oct 2022 19:27:15 -0500 Subject: [PATCH 079/220] refactor handling of topology and authority IDs --- .../src/vstaging/mod.rs | 162 +++++++++++++----- 1 file changed, 116 insertions(+), 46 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index abe40b1241d9..33f497ca77b4 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -19,7 +19,7 @@ use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{RequiredRouting, SessionGridTopologies, SessionGridTopology}, + grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; @@ -42,7 +42,7 @@ use sp_keystore::SyncCryptoStorePtr; use indexmap::IndexMap; -use std::collections::{HashMap, HashSet}; +use std::collections::{hash_map::{HashMap, Entry}, HashSet}; use crate::{ error::{JfyiError, JfyiErrorResult}, @@ -107,6 +107,54 @@ struct LocalValidatorState { struct PerSessionState { session_info: SessionInfo, groups: Groups, + authority_lookup: HashMap, + // is only `None` in the time between seeing a session and + // getting the topology from the gossip-support subsystem + grid_view: Option, + local_validator: Option, +} + +impl PerSessionState { + async fn new( + session_info: SessionInfo, + keystore: &SyncCryptoStorePtr, + ) -> Self { + let groups = Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); + let mut authority_lookup = HashMap::new(); + for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { + authority_lookup.insert(ad, ValidatorIndex(i as _)); + } + + let local_validator = polkadot_node_subsystem_util::signing_key_and_index( + &session_info.validators, + keystore, + ).await; + + PerSessionState { + session_info, + groups, + authority_lookup, + grid_view: None, + local_validator: local_validator.map(|(_key, index)| index), + } + } + + fn supply_topology( + &mut self, + topology: &SessionGridTopology, + ) { + let grid_view = grid::build_session_topology( + &self.session_info.validator_groups[..], + topology, + self.local_validator, + ); + + self.grid_view = Some(grid_view); + } + + fn authority_index_in_session(&self, discovery_key: &AuthorityDiscoveryId) -> Option { + self.authority_lookup.get(discovery_key).map(|x| *x) + } } pub(crate) struct State { @@ -119,15 +167,26 @@ pub(crate) struct State { per_session: HashMap, peers: HashMap, keystore: SyncCryptoStorePtr, - topology_storage: SessionGridTopologies, authorities: HashMap, request_manager: RequestManager, } +// For the provided validator index, if there is a connected peer +// controlling the given authority ID, +fn connected_validator_peer( + authorities: &HashMap, + per_session: &PerSessionState, + validator_index: ValidatorIndex, +) -> Option { + per_session.session_info.discovery_keys.get(validator_index.0 as usize) + .and_then(|k| authorities.get(k)) + .map(|p| p.clone()) +} + struct PeerState { view: View, implicit_view: HashSet, - maybe_authority: Option>, + discovery_ids: Option>, } impl PeerState { @@ -141,7 +200,7 @@ impl PeerState { } fn is_authority(&self, authority_id: &AuthorityDiscoveryId) -> bool { - self.maybe_authority.as_ref().map_or(false, |x| x.contains(authority_id)) + self.discovery_ids.as_ref().map_or(false, |x| x.contains(authority_id)) } } @@ -159,41 +218,58 @@ pub(crate) async fn handle_network_update( update: NetworkBridgeEvent, ) { match update { - NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, authority_ids) => { + NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); if protocol_version != ValidationVersion::VStaging.into() { return } + if let Some(ref mut authority_ids) = authority_ids { + authority_ids.retain(|a| { + match state.authorities.entry(a.clone()) { + Entry::Vacant(e) => { + e.insert(peer_id); + true + } + Entry::Occupied(e) => { + gum::trace!( + target: LOG_TARGET, + authority_id = ?a, + existing_peer = ?e.get(), + new_peer = ?peer_id, + "Ignoring new peer with duplicate authority ID as a bearer of that identity" + ); + + false + } + } + }); + } + state.peers.insert( peer_id, PeerState { view: View::default(), implicit_view: HashSet::new(), - maybe_authority: authority_ids.clone(), + discovery_ids: authority_ids, }, ); - - if let Some(authority_ids) = authority_ids { - authority_ids.into_iter().for_each(|a| { - state.authorities.insert(a, peer_id); - }) - } }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { - state.peers.remove(&peer_id); + if let Some(p) = state.peers.remove(&peer_id) { + for discovery_key in p.discovery_ids.into_iter().flat_map(|x| x) { + state.authorities.remove(&discovery_key); + } + } }, NetworkBridgeEvent::NewGossipTopology(topology) => { let new_session_index = topology.session; let new_topology = topology.topology; - state.topology_storage.insert_topology( - new_session_index, - new_topology, - topology.local_index, - ); - // TODO [now]: can we not update authority IDs for peers? + if let Some(per_session) = state.per_session.get_mut(&new_session_index) { + per_session.supply_topology(&new_topology); + } // TODO [now] for all relay-parents with this session, send all grid peers // any `BackedCandidateInv` messages they might need. @@ -288,12 +364,12 @@ pub(crate) async fn handle_activated_leaf( Some(s) => s, }; - let groups = - Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); - state .per_session - .insert(session_index, PerSessionState { session_info, groups }); + .insert( + session_index, + PerSessionState::new(session_info, &state.keystore).await, + ); } let per_session = state @@ -301,13 +377,12 @@ pub(crate) async fn handle_activated_leaf( .get(&session_index) .expect("either existed or just inserted; qed"); - let local_validator = find_local_validator_state( + let local_validator = per_session.local_validator.and_then(|v| find_local_validator_state( + v, &per_session.session_info.validators, - &state.keystore, &per_session.groups, &availability_cores, - ) - .await; + )); state.per_relay_parent.insert( *leaf, @@ -319,8 +394,6 @@ pub(crate) async fn handle_activated_leaf( }, ); - state.topology_storage.inc_session_refs(session_index); - // TODO [now]: update peers which have the leaf in their view. // update their implicit view. send any messages accordingly. } @@ -328,9 +401,9 @@ pub(crate) async fn handle_activated_leaf( Ok(()) } -async fn find_local_validator_state( +fn find_local_validator_state( + validator_index: ValidatorIndex, validators: &[ValidatorId], - keystore: &SyncCryptoStorePtr, groups: &Groups, availability_cores: &[CoreState], ) -> Option { @@ -338,8 +411,7 @@ async fn find_local_validator_state( return None } - let (validator_id, validator_index) = - polkadot_node_subsystem_util::signing_key_and_index(validators, keystore).await?; + let validator_id = validators.get(validator_index.0 as usize)?.clone(); let our_group = groups.by_validator_index(validator_index)?; @@ -373,19 +445,10 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { } // clean up per-relay-parent data based on everything removed. - let topology_storage = &mut state.topology_storage; - state.per_relay_parent.retain(|r, x| { - if relay_parents.contains(r) { - true - } else { - // clean up topology storage. - topology_storage.dec_session_refs(x.session); - - false - } - }); + state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); // TODO [now]: clean up requests + // TODO [now]: clean up candidates // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); @@ -447,7 +510,14 @@ pub(crate) async fn share_local_statement( let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); if let FullStatementWithPVD::Seconded(ref c, ref pvd) = statement.payload() { - // TODO [now]: insert confirmed candidate and report peers. + if let Some(reckoning) = state.candidates.confirm_candidate( + candidate_hash, + c.clone(), + pvd.clone(), + local_group, + ) { + // TODO [now] apply the reckoning. + } }; match per_relay_parent From 978335dcb564a7da2ff0bfa531dd52542c214c63 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 26 Oct 2022 19:27:24 -0500 Subject: [PATCH 080/220] fmt --- .../src/vstaging/grid.rs | 66 +++++++------- .../src/vstaging/mod.rs | 88 +++++++++---------- 2 files changed, 78 insertions(+), 76 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index a2a49f4774c2..10eab817cf34 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -244,7 +244,8 @@ impl PerRelayParentGridTracker { confirmed.note_remote_advertised(sender, remote_knowledge); } else { // received prevents conflicting manifests so this is max 1 per validator. - self.unconfirmed.entry(candidate_hash) + self.unconfirmed + .entry(candidate_hash) .or_default() .push((sender, claimed_group_index)) } @@ -265,23 +266,22 @@ impl PerRelayParentGridTracker { let mut actions = Vec::new(); let c = match self.confirmed_backed.entry(candidate_hash) { Entry::Occupied(_) => return actions, - Entry::Vacant(v) => v.insert(KnownBackedCandidate { - group_index, - mutual_knowledge: HashMap::new(), - }), + Entry::Vacant(v) => + v.insert(KnownBackedCandidate { group_index, mutual_knowledge: HashMap::new() }), }; // Populate the entry with previously unconfirmed manifests. - for (v, claimed_group_index) in self.unconfirmed.remove(&candidate_hash) - .into_iter() - .flat_map(|x| x) + for (v, claimed_group_index) in + self.unconfirmed.remove(&candidate_hash).into_iter().flat_map(|x| x) { if claimed_group_index != group_index { // This is misbehavior, but is handled more comprehensively elsewhere - continue; + continue } - let statement_filter = self.received.get(&v) + let statement_filter = self + .received + .get(&v) .and_then(|r| r.candidate_statement_filter(&candidate_hash)) .expect("unconfirmed is only populated by validators who have sent manifest; qed"); @@ -338,7 +338,8 @@ impl PerRelayParentGridTracker { .map(|(g, x)| *g) .collect(); - self.confirmed_backed.iter() + self.confirmed_backed + .iter() .filter(move |(_, c)| allowed_groups.contains(c.group_index())) .filter(move |(_, c)| c.should_advertise(validator_index)) .map(|(c_h, _)| *c_h) @@ -351,7 +352,8 @@ impl PerRelayParentGridTracker { validator_index: ValidatorIndex, candidate_hash: CandidateHash, ) -> bool { - self.confirmed_backed.get(&candidate_hash) + self.confirmed_backed + .get(&candidate_hash) .map_or(false, |c| c.can_remote_acknowledge(validator_index)) } @@ -374,7 +376,8 @@ impl PerRelayParentGridTracker { validator_index: ValidatorIndex, candidate_hash: CandidateHash, ) -> bool { - self.confirmed_backed.get(&candidate_hash) + self.confirmed_backed + .get(&candidate_hash) .map_or(false, |c| c.can_local_acknowledge(validator_index)) } @@ -436,9 +439,10 @@ impl ReceivedManifests { ReceivedManifests { received: HashMap::new(), seconded_counts: HashMap::new() } } - fn candidate_statement_filter(&self, candidate_hash: &CandidateHash) - -> Option - { + fn candidate_statement_filter( + &self, + candidate_hash: &CandidateHash, + ) -> Option { self.received.get(candidate_hash).map(|m| StatementFilter { seconded_in_group: m.seconded_in_group.clone(), validated_in_group: m.validated_in_group.clone(), @@ -634,17 +638,17 @@ impl KnownBackedCandidate { // should only be invoked for validators which are known // to be valid recipients of advertisement. fn should_advertise(&self, validator: ValidatorIndex) -> bool { - self.mutual_knowledge.get(&validator) + self.mutual_knowledge + .get(&validator) .map_or(true, |k| k.local_knowledge.is_none()) } // is a no-op when either they or we have advertised. fn note_advertised_to(&mut self, validator: ValidatorIndex, local_knowledge: StatementFilter) { - let k = self.mutual_knowledge.entry(validator) - .or_insert_with(|| MutualKnowledge { - remote_knowledge: None, - local_knowledge: None, - }); + let k = self + .mutual_knowledge + .entry(validator) + .or_insert_with(|| MutualKnowledge { remote_knowledge: None, local_knowledge: None }); k.local_knowledge = Some(local_knowledge); } @@ -654,10 +658,10 @@ impl KnownBackedCandidate { validator: ValidatorIndex, remote_knowledge: StatementFilter, ) { - let k = self.mutual_knowledge.entry(validator).or_insert_with(|| MutualKnowledge { - remote_knowledge: None, - local_knowledge: None, - }); + let k = self + .mutual_knowledge + .entry(validator) + .or_insert_with(|| MutualKnowledge { remote_knowledge: None, local_knowledge: None }); k.remote_knowledge = Some(remote_knowledge); } @@ -707,9 +711,8 @@ impl KnownBackedCandidate { statement_kind: StatementKind, ) -> bool { match self.mutual_knowledge.get(&validator) { - Some(MutualKnowledge { remote_knowledge: Some(r), local_knowledge: Some(_) }) => { - !r.contains(statement_index_in_group, statement_kind) - }, + Some(MutualKnowledge { remote_knowledge: Some(r), local_knowledge: Some(_) }) => + !r.contains(statement_index_in_group, statement_kind), _ => false, } } @@ -721,9 +724,8 @@ impl KnownBackedCandidate { statement_kind: StatementKind, ) -> bool { match self.mutual_knowledge.get(&validator) { - Some(MutualKnowledge { remote_knowledge: Some(_), local_knowledge: Some(l) }) => { - !l.contains(statement_index_in_group, statement_kind) - }, + Some(MutualKnowledge { remote_knowledge: Some(_), local_knowledge: Some(l) }) => + !l.contains(statement_index_in_group, statement_kind), _ => false, } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 33f497ca77b4..1148728776f6 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -42,7 +42,10 @@ use sp_keystore::SyncCryptoStorePtr; use indexmap::IndexMap; -use std::collections::{hash_map::{HashMap, Entry}, HashSet}; +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; use crate::{ error::{JfyiError, JfyiErrorResult}, @@ -115,20 +118,17 @@ struct PerSessionState { } impl PerSessionState { - async fn new( - session_info: SessionInfo, - keystore: &SyncCryptoStorePtr, - ) -> Self { - let groups = Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); + async fn new(session_info: SessionInfo, keystore: &SyncCryptoStorePtr) -> Self { + let groups = + Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); let mut authority_lookup = HashMap::new(); for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { authority_lookup.insert(ad, ValidatorIndex(i as _)); } - let local_validator = polkadot_node_subsystem_util::signing_key_and_index( - &session_info.validators, - keystore, - ).await; + let local_validator = + polkadot_node_subsystem_util::signing_key_and_index(&session_info.validators, keystore) + .await; PerSessionState { session_info, @@ -139,10 +139,7 @@ impl PerSessionState { } } - fn supply_topology( - &mut self, - topology: &SessionGridTopology, - ) { + fn supply_topology(&mut self, topology: &SessionGridTopology) { let grid_view = grid::build_session_topology( &self.session_info.validator_groups[..], topology, @@ -152,7 +149,10 @@ impl PerSessionState { self.grid_view = Some(grid_view); } - fn authority_index_in_session(&self, discovery_key: &AuthorityDiscoveryId) -> Option { + fn authority_index_in_session( + &self, + discovery_key: &AuthorityDiscoveryId, + ) -> Option { self.authority_lookup.get(discovery_key).map(|x| *x) } } @@ -178,7 +178,10 @@ fn connected_validator_peer( per_session: &PerSessionState, validator_index: ValidatorIndex, ) -> Option { - per_session.session_info.discovery_keys.get(validator_index.0 as usize) + per_session + .session_info + .discovery_keys + .get(validator_index.0 as usize) .and_then(|k| authorities.get(k)) .map(|p| p.clone()) } @@ -226,24 +229,22 @@ pub(crate) async fn handle_network_update( } if let Some(ref mut authority_ids) = authority_ids { - authority_ids.retain(|a| { - match state.authorities.entry(a.clone()) { - Entry::Vacant(e) => { - e.insert(peer_id); - true - } - Entry::Occupied(e) => { - gum::trace!( - target: LOG_TARGET, - authority_id = ?a, - existing_peer = ?e.get(), - new_peer = ?peer_id, - "Ignoring new peer with duplicate authority ID as a bearer of that identity" - ); - - false - } - } + authority_ids.retain(|a| match state.authorities.entry(a.clone()) { + Entry::Vacant(e) => { + e.insert(peer_id); + true + }, + Entry::Occupied(e) => { + gum::trace!( + target: LOG_TARGET, + authority_id = ?a, + existing_peer = ?e.get(), + new_peer = ?peer_id, + "Ignoring new peer with duplicate authority ID as a bearer of that identity" + ); + + false + }, }); } @@ -366,10 +367,7 @@ pub(crate) async fn handle_activated_leaf( state .per_session - .insert( - session_index, - PerSessionState::new(session_info, &state.keystore).await, - ); + .insert(session_index, PerSessionState::new(session_info, &state.keystore).await); } let per_session = state @@ -377,12 +375,14 @@ pub(crate) async fn handle_activated_leaf( .get(&session_index) .expect("either existed or just inserted; qed"); - let local_validator = per_session.local_validator.and_then(|v| find_local_validator_state( - v, - &per_session.session_info.validators, - &per_session.groups, - &availability_cores, - )); + let local_validator = per_session.local_validator.and_then(|v| { + find_local_validator_state( + v, + &per_session.session_info.validators, + &per_session.groups, + &availability_cores, + ) + }); state.per_relay_parent.insert( *leaf, From 671d801e3ee46ef06c6febfc14d884dbdb4b1c29 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 26 Oct 2022 20:08:24 -0500 Subject: [PATCH 081/220] send statements directly to grid peers where possible --- .../src/vstaging/candidates.rs | 6 +- .../src/vstaging/grid.rs | 99 +++++++++++++++++-- .../src/vstaging/mod.rs | 25 ++++- 3 files changed, 112 insertions(+), 18 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index ea0ffa159f88..7c20e58daa6b 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -166,11 +166,11 @@ impl Candidates { /// Prune all candidates according to the relay-parent predicate /// provided. - pub fn collect_garbage(&mut self, relay_parent_live: impl Fn(Hash) -> bool) { + pub fn collect_garbage(&mut self, relay_parent_live: impl Fn(&Hash) -> bool) { self.candidates.retain(|_, state| match state { - CandidateState::Confirmed(ref c) => relay_parent_live(c.relay_parent()), + CandidateState::Confirmed(ref c) => relay_parent_live(&c.relay_parent()), CandidateState::Unconfirmed(ref mut c) => { - c.claims.retain(|c| relay_parent_live(c.1.relay_parent)); + c.claims.retain(|c| relay_parent_live(&c.1.relay_parent)); !c.claims.is_empty() }, diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 10eab817cf34..b4c812798d50 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -19,7 +19,7 @@ use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, PeerId}; use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, GroupIndex, Hash, ValidatorIndex, + AuthorityDiscoveryId, CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, }; use std::collections::{ @@ -162,17 +162,16 @@ pub enum PostBackingAction { Advertise, } -/// A tracker of knowledge from authorities within the grid for the -/// entire session. This stores only data on manifests sent within a bounded -/// set of relay-parents. +/// A tracker of knowledge from authorities within the grid for a particular +/// relay-parent. #[derive(Default)] -pub struct PerRelayParentGridTracker { +pub struct GridTracker { received: HashMap, confirmed_backed: HashMap, unconfirmed: HashMap>, } -impl PerRelayParentGridTracker { +impl GridTracker { /// Attempt to import a manifest advertised by a remote peer. /// /// This checks whether the peer is allowed to send us manifests @@ -392,6 +391,64 @@ impl PerRelayParentGridTracker { c.note_local_acknowledged(validator_index, local_knowledge); } } + + /// Determine the validators which can receive a statement by direct + /// broadcast. + pub fn direct_statement_recipients( + &self, + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, + ) -> Vec { + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return Vec::new(), + Some(x) => x, + }; + + self.confirmed_backed + .get(&c_h) + .map(|k| k.direct_statement_recipients(g, in_group, kind)) + .unwrap_or_default() + } + + /// Note that a direct statement about a given candidate was sent to or + /// received from the given validator. + pub fn note_sent_or_received_direct_statement( + &mut self, + groups: &Groups, + originator: ValidatorIndex, + counterparty: ValidatorIndex, + statement: &CompactStatement, + ) { + if let Some((g, c_h, kind, in_group)) = + extract_statement_and_group_info(groups, originator, statement) + { + if let Some(known) = self.confirmed_backed.get_mut(&c_h) { + known.note_sent_or_received_direct_statement(counterparty, in_group, kind); + } + } + } +} + +fn extract_statement_and_group_info( + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, +) -> Option<(GroupIndex, CandidateHash, StatementKind, usize)> { + let (statement_kind, candidate_hash) = match statement { + CompactStatement::Seconded(h) => (StatementKind::Seconded, h), + CompactStatement::Valid(h) => (StatementKind::Valid, h), + }; + + let group = match groups.by_validator_index(originator) { + None => return None, + Some(g) => g, + }; + + let index_in_group = groups.get(group)?.iter().position(|v| v == &originator)?; + + Some((group, *candidate_hash, statement_kind, index_in_group)) } /// A summary of a manifest being sent by a counterparty. @@ -717,6 +774,28 @@ impl KnownBackedCandidate { } } + fn direct_statement_recipients( + &self, + group_index: GroupIndex, + originator_index_in_group: usize, + statement_kind: StatementKind, + ) -> Vec { + if group_index != self.group_index { + return Vec::new() + } + + self.mutual_knowledge + .iter() + .filter(|(_, k)| k.local_knowledge.is_some()) + .filter(|(_, k)| { + k.remote_knowledge + .as_ref() + .map_or(false, |r| !r.contains(originator_index_in_group, statement_kind)) + }) + .map(|(v, _)| *v) + .collect() + } + fn can_receive_direct_statement_from( &self, validator: ValidatorIndex, @@ -964,7 +1043,7 @@ mod tests { #[test] fn reject_disallowed_manifest() { - let mut tracker = PerRelayParentGridTracker::default(); + let mut tracker = GridTracker::default(); let session_topology = SessionTopologyView { group_views: vec![( GroupIndex(0), @@ -1031,7 +1110,7 @@ mod tests { #[test] fn reject_malformed_wrong_group_size() { - let mut tracker = PerRelayParentGridTracker::default(); + let mut tracker = GridTracker::default(); let session_topology = SessionTopologyView { group_views: vec![( GroupIndex(0), @@ -1094,7 +1173,7 @@ mod tests { #[test] fn reject_malformed_no_seconders() { - let mut tracker = PerRelayParentGridTracker::default(); + let mut tracker = GridTracker::default(); let session_topology = SessionTopologyView { group_views: vec![( GroupIndex(0), @@ -1140,7 +1219,7 @@ mod tests { #[test] fn reject_malformed_below_threshold() { - let mut tracker = PerRelayParentGridTracker::default(); + let mut tracker = GridTracker::default(); let session_topology = SessionTopologyView { group_views: vec![( GroupIndex(0), diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1148728776f6..37e2c5cd9113 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -53,6 +53,7 @@ use crate::{ }; use candidates::{BadAdvertisement, Candidates}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; +use grid::{GridTracker, ManifestSummary, StatementFilter}; use groups::Groups; use requests::RequestManager; use statement_store::StatementStore; @@ -105,6 +106,8 @@ struct LocalValidatorState { assignment: Option, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, } struct PerSessionState { @@ -431,6 +434,7 @@ fn find_local_validator_state( todo!(), // TODO [now]: seconding limit? ) .expect("group is non-empty because we are in it; qed"), + grid_tracker: GridTracker::default(), }) } @@ -448,7 +452,7 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); // TODO [now]: clean up requests - // TODO [now]: clean up candidates + state.candidates.collect_garbage(|h| relay_parents.contains(h)); // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); @@ -596,6 +600,7 @@ async fn send_statement_direct( Grid, } + let originator = statement.validator_index(); let (local_validator, targets) = { let local_validator = match per_relay_parent.local_validator.as_mut() { Some(v) => v, @@ -609,9 +614,15 @@ async fn send_statement_direct( .filter(|&v| v != &local_validator.index) .map(|v| (*v, TargetKind::Cluster)); - // TODO [now]: extend with grid targets, dedup + let grid_targets = local_validator + .grid_tracker + .direct_statement_recipients(&per_session.groups, originator, &compact_statement) + .into_iter() + .filter(|v| !local_validator.cluster_tracker.targets().contains(v)) + .map(|v| (v, TargetKind::Grid)); let targets = cluster_targets + .chain(grid_targets) .filter_map(|(v, k)| { session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) }) @@ -620,8 +631,6 @@ async fn send_statement_direct( (local_validator, targets) }; - let originator = statement.validator_index(); - let mut prior_to = Vec::new(); let mut statement_to = Vec::new(); for (target, authority_id, kind) in targets { @@ -696,7 +705,13 @@ async fn send_statement_direct( } }, TargetKind::Grid => { - // TODO [now] + statement_to.push(peer_id); + local_validator.grid_tracker.note_sent_or_received_direct_statement( + &per_session.groups, + originator, + target, + &compact_statement, + ); }, } } From 958765aa75d1ff5af9def141c386f36d3d658a6b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 26 Oct 2022 21:03:55 -0500 Subject: [PATCH 082/220] send to cluster only if statement belongs to cluster --- .../src/vstaging/cluster.rs | 1 - .../src/vstaging/mod.rs | 43 +++++++++++-------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index ca193608c692..9c17e3e17e14 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -98,7 +98,6 @@ enum TaggedKnowledge { pub struct ClusterTracker { validators: Vec, seconding_limit: usize, - knowledge: HashMap>, } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 37e2c5cd9113..e144c406f34b 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -549,6 +549,13 @@ pub(crate) async fn share_local_statement( Ok(()) } +// two kinds of targets: those in our 'cluster' (currently just those in the same group), +// and those we are propagating to through the grid. +enum DirectTargetKind { + Cluster, + Grid, +} + // Circulates a compact statement to all peers who need it: those in the current group of the // local validator, those in the next group for the parachain, and grid peers which have already // indicated that they know the candidate as backed. @@ -593,13 +600,6 @@ async fn send_statement_direct( CompactStatement::Valid(_) => false, }; - // two kinds of targets: those in our 'cluster' (currently just those in the same group), - // and those we are propagating to through the grid. - enum TargetKind { - Cluster, - Grid, - } - let originator = statement.validator_index(); let (local_validator, targets) = { let local_validator = match per_relay_parent.local_validator.as_mut() { @@ -607,21 +607,30 @@ async fn send_statement_direct( None => return, // sanity: should be impossible to reach this. }; - let cluster_targets = local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, TargetKind::Cluster)); + let statement_group = per_session.groups.by_validator_index(originator); + + let cluster_relevant = Some(local_validator.group) == statement_group; + let cluster_targets = if cluster_relevant { + Some(local_validator + .cluster_tracker + .targets() + .iter() + .filter(|&v| v != &local_validator.index) + .map(|v| (*v, DirectTargetKind::Cluster))) + } else { + None + }; let grid_targets = local_validator .grid_tracker .direct_statement_recipients(&per_session.groups, originator, &compact_statement) .into_iter() - .filter(|v| !local_validator.cluster_tracker.targets().contains(v)) - .map(|v| (v, TargetKind::Grid)); + .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .map(|v| (v, DirectTargetKind::Grid)); let targets = cluster_targets + .into_iter() + .flat_map(|c| c) .chain(grid_targets) .filter_map(|(v, k)| { session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) @@ -643,7 +652,7 @@ async fn send_statement_direct( }; match kind { - TargetKind::Cluster => { + DirectTargetKind::Cluster => { if !local_validator.cluster_tracker.knows_candidate(target, candidate_hash) && !is_seconded { @@ -704,7 +713,7 @@ async fn send_statement_direct( statement_to.push(peer_id); } }, - TargetKind::Grid => { + DirectTargetKind::Grid => { statement_to.push(peer_id); local_validator.grid_tracker.note_sent_or_received_direct_statement( &per_session.groups, From 0bc269f5d69dc09286ecbd839175510ac55e6471 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 27 Oct 2022 21:47:36 -0500 Subject: [PATCH 083/220] improve handling of cluster statements --- .../src/vstaging/mod.rs | 227 +++++++++++------- 1 file changed, 140 insertions(+), 87 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index e144c406f34b..78173f59159c 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -80,9 +80,13 @@ const COST_IMPROPERLY_DECODED_RESPONSE: Rep = const COST_INVALID_RESPONSE: Rep = Rep::CostMajor("Invalid Candidate Response"); const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = Rep::CostMajor("Un-requested Statement In Response"); +const COST_INACCURATE_ADVERTISEMENT: Rep = + Rep::CostMajor("Peer advertised a candidate inaccurately"); const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); +const BENEFIT_VALID_STATEMENT_FIRST: Rep = + Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); struct PerRelayParentState { validator_state: HashMap, @@ -540,11 +544,8 @@ pub(crate) async fn share_local_statement( } }; - // send the compact version of the statement to nodes in current group and next-up. If not a `Seconded` statement, - // send a `Seconded` statement as well. - send_statement_direct(ctx, state, relay_parent, local_group, compact_statement).await; - - // TODO [now]: send along grid if backed, send statement to backing if we can + // send the compact version of the statement to any peers which need it. + circulate_statement(ctx, state, relay_parent, local_group, compact_statement).await; Ok(()) } @@ -570,10 +571,8 @@ enum DirectTargetKind { // preconditions: the candidate entry exists in the state under the relay parent // and the statement has already been imported into the entry. If this is a `Valid` // statement, then there must be at least one `Seconded` statement. -// TODO [now]: make this a more general `broadcast_statement` with an `BroadcastBehavior` that -// affects targets: `Local` keeps current behavior while `Forward` only sends onwards via `BackedCandidate` knowers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn send_statement_direct( +async fn circulate_statement( ctx: &mut Context, state: &mut State, relay_parent: Hash, @@ -611,12 +610,20 @@ async fn send_statement_direct( let cluster_relevant = Some(local_validator.group) == statement_group; let cluster_targets = if cluster_relevant { - Some(local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, DirectTargetKind::Cluster))) + Some( + local_validator + .cluster_tracker + .targets() + .iter() + .filter(|&&v| { + local_validator + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &local_validator.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) } else { None }; @@ -850,77 +857,35 @@ async fn handle_incoming_statement( .next() }; - // TODO [now]: handle direct statements from grid peers - let cluster_sender_index = match cluster_sender_index { - None => { - report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - }, - Some(c) => c, - }; + let was_backed = false; // TODO [now] - // additional cluster checks. - { - match local_validator.cluster_tracker.can_receive( + let is_cluster = cluster_sender_index.is_some(); + let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { + match handle_cluster_statement( + relay_parent, + local_validator, + per_relay_parent.session, + &per_session.session_info, + statement, cluster_sender_index, - statement.unchecked_validator_index(), - statement.unchecked_payload().clone(), ) { - Ok(ClusterAccept::Ok | ClusterAccept::WithPrejudice) => {}, - Err(ClusterRejectIncoming::ExcessiveSeconded) => { - report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; - return - }, - Err(ClusterRejectIncoming::CandidateUnknown | ClusterRejectIncoming::Duplicate) => { - report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - }, - Err(ClusterRejectIncoming::NotInGroup) => { - // sanity: shouldn't be possible; we already filtered this - // out above. + Ok(Some(s)) => s, + Ok(None) => return, + Err(rep) => { + report_peer(ctx.sender(), peer, rep).await; return }, } - } - - // Ensure the statement is correctly signed. - let checked_statement = match check_statement_signature( - per_relay_parent.session, - &session_info.validators[..], - relay_parent, - statement, - ) { - Ok(s) => s, - Err(_) => { - report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await; - return - }, + } else { + // TODO [now]: handle direct statements from grid peers + // TODO [now]: if not a grid peer + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return }; - local_validator.cluster_tracker.note_received( - cluster_sender_index, - checked_statement.validator_index(), - checked_statement.payload().clone(), - ); - let statement = checked_statement.payload().clone(); let originator_index = checked_statement.validator_index(); let candidate_hash = *checked_statement.payload().candidate_hash(); - let was_fresh = - match per_relay_parent.statement_store.insert(&per_session.groups, checked_statement) { - Err(_) => { - // sanity: should never happen. - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - validator_index = ?originator_index, - "Error -Cluster accepted message from unknown validator." - ); - - return - }, - Ok(known) => known, - }; let originator_group = per_relay_parent .statement_store @@ -928,18 +893,22 @@ async fn handle_incoming_statement( .expect("validator confirmed to be known by statement_store.insert; qed"); // Insert an unconfirmed candidate entry if needed - let res = state.candidates.insert_unconfirmed( - peer.clone(), - candidate_hash, - relay_parent, - originator_group, - None, - ); + { + let res = state.candidates.insert_unconfirmed( + peer.clone(), + candidate_hash, + relay_parent, + originator_group, + None, + ); + + if let Err(BadAdvertisement) = res { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + } - if let Err(BadAdvertisement) = res { - // TODO [now]: punish the peer. - // TODO [now]: return? - } else if !state.candidates.is_confirmed(&candidate_hash) { + if !state.candidates.is_confirmed(&candidate_hash) { // If the candidate is not confirmed, note that we should attempt // to request it from the given peer. let mut request_entry = @@ -948,11 +917,95 @@ async fn handle_incoming_statement( .get_or_insert(relay_parent, candidate_hash, originator_group); request_entry.get_mut().add_peer(peer); + + // We only successfully accept statements from the grid on confirmed + // candidates, therefore this check only passes if the statement is from the cluster request_entry.get_mut().set_cluster_priority(); } + let was_fresh = + match per_relay_parent.statement_store.insert(&per_session.groups, checked_statement) { + Err(_) => { + // sanity: should never happen. + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?originator_index, + "Error - Cluster accepted message from unknown validator." + ); + + return + }, + Ok(known) => known, + }; + + let is_backed = false; // TODO [now] + if was_fresh { - // both of the below probably in some shared function. - // TODO [now]: send along grid if backed, send statement to backing if we can + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; + + // both of the below probably in some shared function. + // TODO [now]: circulate the statement + // TODO [now]: import the statement into backing if we can. + } else { + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; + } + + if is_backed && !was_backed { + // TODO [now]: handle a candidate being completely backed now. } } + +/// Checks whether a statement is allowed, whether the signature is accurate, +/// inserting an unconfirmed candidate entry and importing into the cluster tracker +/// if successful. +/// +/// if successful, this returns a checked signed statement if it should be imported +/// or otherwise an error indicating a reputational fault. +fn handle_cluster_statement( + relay_parent: Hash, + local_validator: &mut LocalValidatorState, + session: SessionIndex, + session_info: &SessionInfo, + statement: UncheckedSignedStatement, + cluster_sender_index: ValidatorIndex, +) -> Result, Rep> { + // additional cluster checks. + let should_import = { + match local_validator.cluster_tracker.can_receive( + cluster_sender_index, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ) { + Ok(ClusterAccept::Ok) => true, + Ok(ClusterAccept::WithPrejudice) => false, + Err(ClusterRejectIncoming::ExcessiveSeconded) => return Err(COST_EXCESSIVE_SECONDED), + Err(ClusterRejectIncoming::CandidateUnknown | ClusterRejectIncoming::Duplicate) => + return Err(COST_UNEXPECTED_STATEMENT), + Err(ClusterRejectIncoming::NotInGroup) => { + // sanity: shouldn't be possible; we already filtered this + // out above. + return Err(COST_UNEXPECTED_STATEMENT) + }, + } + }; + + // Ensure the statement is correctly signed. + let checked_statement = match check_statement_signature( + session, + &session_info.validators[..], + relay_parent, + statement, + ) { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; + + local_validator.cluster_tracker.note_received( + cluster_sender_index, + checked_statement.validator_index(), + checked_statement.payload().clone(), + ); + + Ok(if should_import { Some(checked_statement) } else { None }) +} From c2724265388c76028e34d002cc969b626f7769c6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 27 Oct 2022 22:05:25 -0500 Subject: [PATCH 084/220] handle incoming statements along the grid --- .../src/vstaging/grid.rs | 41 +++++++++ .../src/vstaging/mod.rs | 92 ++++++++++++++++--- 2 files changed, 122 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index b4c812798d50..1ae402963686 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -392,6 +392,25 @@ impl GridTracker { } } + /// Determine the validators which can send a statement by direct broadcast. + pub fn direct_statement_senders( + &self, + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, + ) -> Vec { + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return Vec::new(), + Some(x) => x, + }; + + self.confirmed_backed + .get(&c_h) + .map(|k| k.direct_statement_senders(g, in_group, kind)) + .unwrap_or_default() + } + /// Determine the validators which can receive a statement by direct /// broadcast. pub fn direct_statement_recipients( @@ -774,6 +793,28 @@ impl KnownBackedCandidate { } } + fn direct_statement_senders( + &self, + group_index: GroupIndex, + originator_index_in_group: usize, + statement_kind: StatementKind, + ) -> Vec { + if group_index != self.group_index { + return Vec::new() + } + + self.mutual_knowledge + .iter() + .filter(|(_, k)| k.remote_knowledge.is_some()) + .filter(|(_, k)| { + k.local_knowledge + .as_ref() + .map_or(false, |r| !r.contains(originator_index_in_group, statement_kind)) + }) + .map(|(v, _)| *v) + .collect() + } + fn direct_statement_recipients( &self, group_index: GroupIndex, diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 78173f59159c..4d5bc863d017 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -844,6 +844,16 @@ async fn handle_incoming_statement( Some(l) => l, }; + let originator_group = match per_session.groups + .by_validator_index(statement.unchecked_validator_index()) + { + Some(g) => g, + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; + let cluster_sender_index = { let allowed_senders = local_validator .cluster_tracker @@ -859,11 +869,10 @@ async fn handle_incoming_statement( let was_backed = false; // TODO [now] - let is_cluster = cluster_sender_index.is_some(); let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { match handle_cluster_statement( relay_parent, - local_validator, + &mut local_validator.cluster_tracker, per_relay_parent.session, &per_session.session_info, statement, @@ -877,10 +886,38 @@ async fn handle_incoming_statement( }, } } else { - // TODO [now]: handle direct statements from grid peers - // TODO [now]: if not a grid peer - report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return + let grid_sender_index = local_validator.grid_tracker + .direct_statement_senders( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + report_peer(ctx.sender(), peer, rep).await; + return + } + } + } else { + // Not a cluster or grid peer. + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } }; let statement = checked_statement.payload().clone(); @@ -957,14 +994,13 @@ async fn handle_incoming_statement( } /// Checks whether a statement is allowed, whether the signature is accurate, -/// inserting an unconfirmed candidate entry and importing into the cluster tracker -/// if successful. +/// and importing into the cluster tracker if successful. /// /// if successful, this returns a checked signed statement if it should be imported /// or otherwise an error indicating a reputational fault. fn handle_cluster_statement( relay_parent: Hash, - local_validator: &mut LocalValidatorState, + cluster_tracker: &mut ClusterTracker, session: SessionIndex, session_info: &SessionInfo, statement: UncheckedSignedStatement, @@ -972,7 +1008,7 @@ fn handle_cluster_statement( ) -> Result, Rep> { // additional cluster checks. let should_import = { - match local_validator.cluster_tracker.can_receive( + match cluster_tracker.can_receive( cluster_sender_index, statement.unchecked_validator_index(), statement.unchecked_payload().clone(), @@ -1001,7 +1037,7 @@ fn handle_cluster_statement( Err(_) => return Err(COST_INVALID_SIGNATURE), }; - local_validator.cluster_tracker.note_received( + cluster_tracker.note_received( cluster_sender_index, checked_statement.validator_index(), checked_statement.payload().clone(), @@ -1009,3 +1045,37 @@ fn handle_cluster_statement( Ok(if should_import { Some(checked_statement) } else { None }) } + +/// Checks whether the signature is accurate, +/// importing into the grid tracker if successful. +/// +/// if successful, this returns a checked signed statement if it should be imported +/// or otherwise an error indicating a reputational fault. +fn handle_grid_statement( + relay_parent: Hash, + grid_tracker: &mut GridTracker, + session: SessionIndex, + per_session: &PerSessionState, + statement: UncheckedSignedStatement, + grid_sender_index: ValidatorIndex, +) -> Result { + // Ensure the statement is correctly signed. + let checked_statement = match check_statement_signature( + session, + &per_session.session_info.validators[..], + relay_parent, + statement, + ) { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; + + grid_tracker.note_sent_or_received_direct_statement( + &per_session.groups, + checked_statement.validator_index(), + grid_sender_index, + &checked_statement.payload(), + ); + + Ok(checked_statement) +} From b72b4ac7a4af155fb3d64e9c19e7ee32c3deea59 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 00:07:40 -0500 Subject: [PATCH 085/220] API for introduction of candidates into the tree --- .../src/fragment_tree.rs | 32 +++++- node/core/prospective-parachains/src/lib.rs | 98 +++++++++++++++---- node/subsystem-types/src/messages.rs | 41 ++++++-- 3 files changed, 139 insertions(+), 32 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index ab9d678f77b0..f1455d1fc52d 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,7 +54,7 @@ //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{hash_map::{Entry, HashMap}, BTreeMap, HashSet}; use super::LOG_TARGET; use bitvec::prelude::*; @@ -90,8 +90,7 @@ impl CandidateStorage { CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } } - /// Introduce a new candidate. The candidate passed to this function - /// should have been seconded before introduction. + /// Introduce a new candidate. pub fn add_candidate( &mut self, candidate: CommittedCandidateReceipt, @@ -112,7 +111,7 @@ impl CandidateStorage { let entry = CandidateEntry { candidate_hash, relay_parent: candidate.descriptor.relay_parent, - state: CandidateState::Seconded, + state: CandidateState::Introduced, candidate: ProspectiveCandidate { commitments: candidate.commitments, collator: candidate.descriptor.collator, @@ -130,6 +129,28 @@ impl CandidateStorage { Ok(candidate_hash) } + /// Remove a candidate from the store. + pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { + let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); + if let Entry::Occupied(mut e) = self.by_parent_head.entry(parent_head_hash) { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + } + } + + /// Note that an existing candidate has been seconded. + pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { + if entry.state != CandidateState::Backed { + entry.state = CandidateState::Seconded; + } + } + } + /// Note that an existing candidate has been backed. pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { @@ -191,6 +212,9 @@ impl CandidateStorage { /// Candidates aren't even considered until they've at least been seconded. #[derive(Debug, PartialEq)] enum CandidateState { + /// The candidate has been introduced in a spam-protected way but + /// is not necessarily backed. + Introduced, /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 6d203e902cdd..690075868ab8 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -35,13 +35,13 @@ use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, - RuntimeApiRequest, + RuntimeApiRequest, IntroduceCandidateRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, + BlockNumber, CandidateHash, CoreState, Hash, Id as ParaId, PersistedValidationData, }; @@ -124,8 +124,10 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { - ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd, tx) => - handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd, tx).await?, + ProspectiveParachainsMessage::IntroduceCandidate(request, tx) => + handle_candidate_introduced(&mut *ctx, view, request, tx).await?, + ProspectiveParachainsMessage::CandidateSeconded(para, candidate_hash) => + handle_candidate_seconded(view, para, candidate_hash), ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, ProspectiveParachainsMessage::GetBackableCandidate( @@ -261,14 +263,19 @@ fn prune_view_candidate_storage(view: &mut View) { } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_seconded( +async fn handle_candidate_introduced( _ctx: &mut Context, view: &mut View, - para: ParaId, - candidate: CommittedCandidateReceipt, - pvd: PersistedValidationData, + request: IntroduceCandidateRequest, tx: oneshot::Sender, ) -> JfyiErrorResult<()> { + let IntroduceCandidateRequest { + candidate_para: para, + candidate_receipt: candidate, + persisted_validation_data: pvd, + keep_if_unneeded, + } = request; + // Add the candidate to storage. // Then attempt to add it to all trees. let storage = match view.candidate_storage.get_mut(¶) { @@ -288,8 +295,13 @@ async fn handle_candidate_seconded( let candidate_hash = match storage.add_candidate(candidate, pvd) { Ok(c) => c, - Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { - let _ = tx.send(Vec::new()); + Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { + // Candidate known - return existing fragment tree membership. + let _ = tx.send(fragment_tree_membership( + &view.active_leaves, + para, + c, + )); return Ok(()) }, Err( @@ -319,11 +331,49 @@ async fn handle_candidate_seconded( } } } + + if keep_if_unneeded && membership.is_empty() { + storage.remove_candidate(&candidate_hash); + } + let _ = tx.send(membership); Ok(()) } +fn handle_candidate_seconded( + view: &mut View, + para: ParaId, + candidate_hash: CandidateHash, +) { + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to second unknown candidate", + ); + + return + }, + Some(storage) => storage, + }; + + if !storage.contains(&candidate_hash) { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to second unknown candidate", + ); + + return + } + + storage.mark_seconded(&candidate_hash); +} + #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_candidate_backed( _ctx: &mut Context, @@ -337,7 +387,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instruction to back candidate", + "Received instruction to back unknown candidate", ); return Ok(()) @@ -350,7 +400,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instruction to mark unknown candidate as backed.", + "Received instruction to back unknown candidate", ); return Ok(()) @@ -450,21 +500,33 @@ fn answer_hypothetical_depths_request( } } -fn answer_tree_membership_request( - view: &View, +fn fragment_tree_membership( + active_leaves: &HashMap, para: ParaId, candidate: CandidateHash, - tx: oneshot::Sender, -) { +) -> FragmentTreeMembership { let mut membership = Vec::new(); - for (relay_parent, view_data) in &view.active_leaves { + for (relay_parent, view_data) in active_leaves { if let Some(tree) = view_data.fragment_trees.get(¶) { if let Some(depths) = tree.candidate(&candidate) { membership.push((*relay_parent, depths)); } } } - let _ = tx.send(membership); + membership +} + +fn answer_tree_membership_request( + view: &View, + para: ParaId, + candidate: CandidateHash, + tx: oneshot::Sender, +) { + let _ = tx.send(fragment_tree_membership( + &view.active_leaves, + para, + candidate, + )); } fn answer_minimum_relay_parents_request( diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 84dfcbed8af2..3968e935ab9d 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -976,8 +976,27 @@ pub enum GossipSupportMessage { #[derive(Debug)] pub enum PvfCheckerMessage {} +/// Request introduction of a candidate into the prospective parachains subsystem. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct IntroduceCandidateRequest { + /// The para-id of the candidate. + pub candidate_para: ParaId, + /// The candidate receipt itself. + pub candidate_receipt: CommittedCandidateReceipt, + /// The persisted validation data of the candidate. + pub persisted_validation_data: PersistedValidationData, + /// Whether to keep the candidate in storage if unable + /// to introduce into any fragment tree. + /// + /// This should be set to `false` in any scenario where the + /// candidate has not already been vetted for spam-prevention. + pub keep_if_unneeded: bool, +} + /// A request for the depths a hypothetical candidate would occupy within -/// some fragment tree. +/// some fragment tree. Note that this is not an absolute indication of whether +/// a candidate can be added to a fragment tree, as the commitments are not +/// considered in this request. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. @@ -1013,17 +1032,19 @@ pub type FragmentTreeMembership = Vec<(Hash, Vec)>; pub enum ProspectiveParachainsMessage { /// Inform the Prospective Parachains Subsystem of a new candidate. /// - /// The response sender accepts the candidate membership, which is empty - /// if the candidate was already known. - CandidateSeconded( - ParaId, - CommittedCandidateReceipt, - PersistedValidationData, + /// The response sender accepts the candidate membership, which is the existing + /// membership of the candidate if it was already known. + IntroduceCandidate( + IntroduceCandidateRequest, oneshot::Sender, ), - /// Inform the Prospective Parachains Subsystem that a previously seconded candidate - /// has been backed. This requires that `CandidateSeconded` was sent for the candidate - /// some time in the past. + /// Inform the Prospective Parachains Subsystem that a previously introduced candidate + /// has been seconded. This requires that the candidate was successfully introduced in + /// the past. + CandidateSeconded(ParaId, CandidateHash), + /// Inform the Prospective Parachains Subsystem that a previously introduced candidate + /// has been backed. This requires that the candidate was successfully introduced in + /// the past. CandidateBacked(ParaId, CandidateHash), /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, /// which is a descendant of the given candidate hashes. Returns `None` on the channel From cb4f2ef09f32c9265b1c0a3d9a5085bf558753c2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 00:20:55 -0500 Subject: [PATCH 086/220] backing: use new prospective parachains API --- node/core/backing/src/lib.rs | 19 ++- .../src/tests/prospective_parachains.rs | 131 +++++++++++++----- 2 files changed, 109 insertions(+), 41 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 7d8671cef236..3fad11ea7ffb 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -88,7 +88,7 @@ use polkadot_node_subsystem::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, + RuntimeApiRequest, StatementDistributionMessage, IntroduceCandidateRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -1500,10 +1500,14 @@ async fn import_statement( if !per_candidate.contains_key(&candidate_hash) { if rp_state.prospective_parachains_mode.is_enabled() { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor().para_id, - candidate.clone(), - pvd.clone(), + ctx.send_message(ProspectiveParachainsMessage::IntroduceCandidate( + IntroduceCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + // Since this is used during seconding + keep_if_unneeded: false, + }, tx, )) .await; @@ -1522,6 +1526,11 @@ async fn import_statement( return Err(Error::RejectedByProspectiveParachains) }, } + + ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( + candidate.descriptor().para_id, + candidate_hash, + )).await; } // Only save the candidate if it was approved by prospective parachains. diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 59db7f62b722..b5754ae6c1b2 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -394,18 +394,27 @@ fn seconding_sanity_check_allowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -523,18 +532,27 @@ fn seconding_sanity_check_disallowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -706,13 +724,15 @@ fn prospective_parachains_reject_candidate() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Reject it. tx.send(Vec::new()).unwrap(); } @@ -757,18 +777,27 @@ fn prospective_parachains_reject_candidate() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -880,18 +909,27 @@ fn second_multiple_candidates_per_relay_parent() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if &candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1010,18 +1048,27 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate_a && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1212,10 +1259,13 @@ fn concurrent_dependent_candidates() { // Order is not guaranteed since we have 2 statements being handled concurrently. match msg { AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(.., tx), + ProspectiveParachainsMessage::IntroduceCandidate(_, tx), ) => { tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(_, tx), @@ -1399,18 +1449,27 @@ fn seconding_sanity_check_occupy_same_depth() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if &candidate_receipt == candidate && candidate_para == *para_id && pvd == _pvd => { + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + && !req.keep_if_unneeded => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( From 83e0bab94974dbb5270ac63195244f645f089374 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 00:21:21 -0500 Subject: [PATCH 087/220] fmt prospective parachains changes --- node/core/backing/src/lib.rs | 7 +++-- .../src/tests/prospective_parachains.rs | 30 +++++++++++-------- .../src/fragment_tree.rs | 5 +++- node/core/prospective-parachains/src/lib.rs | 25 ++++------------ node/subsystem-types/src/messages.rs | 5 +--- 5 files changed, 32 insertions(+), 40 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 3fad11ea7ffb..0c1a3d885fa1 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -87,8 +87,8 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, HypotheticalDepthRequest, - ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, IntroduceCandidateRequest, + IntroduceCandidateRequest, ProspectiveParachainsMessage, ProvisionableData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -1530,7 +1530,8 @@ async fn import_statement( ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( candidate.descriptor().para_id, candidate_hash, - )).await; + )) + .await; } // Only save the candidate if it was approved by prospective parachains. diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index b5754ae6c1b2..0deaf50647d1 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -410,9 +410,10 @@ fn seconding_sanity_check_allowed() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) ); assert_matches!( @@ -548,9 +549,10 @@ fn seconding_sanity_check_disallowed() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) ); assert_matches!( @@ -793,9 +795,10 @@ fn prospective_parachains_reject_candidate() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) ); assert_matches!( @@ -1064,9 +1067,10 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) ); assert_validate_seconded_candidate( @@ -1264,7 +1268,7 @@ fn concurrent_dependent_candidates() { tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) + ProspectiveParachainsMessage::CandidateSeconded(_, _), ) => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index f1455d1fc52d..a1f1f4b7758d 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,7 +54,10 @@ //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. -use std::collections::{hash_map::{Entry, HashMap}, BTreeMap, HashSet}; +use std::collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, HashSet, +}; use super::LOG_TARGET; use bitvec::prelude::*; diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 690075868ab8..8adc22eaf33e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -34,15 +34,14 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, - ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, - RuntimeApiRequest, IntroduceCandidateRequest, + IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, + RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CoreState, Hash, Id as ParaId, - PersistedValidationData, + BlockNumber, CandidateHash, CoreState, Hash, Id as ParaId, PersistedValidationData, }; use crate::{ @@ -297,11 +296,7 @@ async fn handle_candidate_introduced( Ok(c) => c, Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { // Candidate known - return existing fragment tree membership. - let _ = tx.send(fragment_tree_membership( - &view.active_leaves, - para, - c, - )); + let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c)); return Ok(()) }, Err( @@ -341,11 +336,7 @@ async fn handle_candidate_introduced( Ok(()) } -fn handle_candidate_seconded( - view: &mut View, - para: ParaId, - candidate_hash: CandidateHash, -) { +fn handle_candidate_seconded(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { let storage = match view.candidate_storage.get_mut(¶) { None => { gum::warn!( @@ -522,11 +513,7 @@ fn answer_tree_membership_request( candidate: CandidateHash, tx: oneshot::Sender, ) { - let _ = tx.send(fragment_tree_membership( - &view.active_leaves, - para, - candidate, - )); + let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, candidate)); } fn answer_minimum_relay_parents_request( diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 3968e935ab9d..59b7c73df600 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -1034,10 +1034,7 @@ pub enum ProspectiveParachainsMessage { /// /// The response sender accepts the candidate membership, which is the existing /// membership of the candidate if it was already known. - IntroduceCandidate( - IntroduceCandidateRequest, - oneshot::Sender, - ), + IntroduceCandidate(IntroduceCandidateRequest, oneshot::Sender), /// Inform the Prospective Parachains Subsystem that a previously introduced candidate /// has been seconded. This requires that the candidate was successfully introduced in /// the past. From 4640b1a6e0bc3a15c02c6bf59b767d7250a1fdcd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 00:21:35 -0500 Subject: [PATCH 088/220] fmt statement-dist --- .../src/vstaging/mod.rs | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 4d5bc863d017..70144313477a 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -844,15 +844,14 @@ async fn handle_incoming_statement( Some(l) => l, }; - let originator_group = match per_session.groups - .by_validator_index(statement.unchecked_validator_index()) - { - Some(g) => g, - None => { - report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - } - }; + let originator_group = + match per_session.groups.by_validator_index(statement.unchecked_validator_index()) { + Some(g) => g, + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + }; let cluster_sender_index = { let allowed_senders = local_validator @@ -886,7 +885,8 @@ async fn handle_incoming_statement( }, } } else { - let grid_sender_index = local_validator.grid_tracker + let grid_sender_index = local_validator + .grid_tracker .direct_statement_senders( &per_session.groups, statement.unchecked_validator_index(), @@ -911,7 +911,7 @@ async fn handle_incoming_statement( Err(rep) => { report_peer(ctx.sender(), peer, rep).await; return - } + }, } } else { // Not a cluster or grid peer. From b46177b8e616b8143f61ba9537d7ec6ca4085bf7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 18:54:28 -0500 Subject: [PATCH 089/220] fix condition --- node/core/prospective-parachains/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 8adc22eaf33e..b7e79e2feffd 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -327,7 +327,7 @@ async fn handle_candidate_introduced( } } - if keep_if_unneeded && membership.is_empty() { + if !keep_if_unneeded && membership.is_empty() { storage.remove_candidate(&candidate_hash); } From 6b8370f2f8f77f9036e43791e9881aff7ef17794 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 20:30:53 -0500 Subject: [PATCH 090/220] get ready for tracking importable candidates --- .../network/statement-distribution/src/lib.rs | 22 ++++--- .../src/vstaging/candidates.rs | 42 +++++++++++++- .../src/vstaging/mod.rs | 57 ++++++++++++------- node/subsystem-types/src/messages.rs | 7 +++ 4 files changed, 98 insertions(+), 30 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 36b1ef4a956a..23438b434988 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -205,6 +205,8 @@ impl StatementDistributionSubsystem { })) => { let _timer = metrics.time_active_leaves_update(); + // TODO [now]: vstaging should handle activated first + // because of implicit view. for deactivated in deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); } @@ -241,13 +243,13 @@ impl StatementDistributionSubsystem { // pass to legacy, but not if the message isn't // v1. let legacy = match &event { - &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, - Versioned::V1(_) => true, - Versioned::VStaging(_) => false, - }, - _ => true, - }; + &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, + Versioned::V1(_) => true, + Versioned::VStaging(_) => false, + }, + _ => true, + }; if legacy { crate::legacy_v1::handle_network_update( @@ -264,6 +266,12 @@ impl StatementDistributionSubsystem { // TODO [now]: pass to vstaging, but not if the message is // v1 or the connecting peer is v1. }, + StatementDistributionMessage::Backed { + para_id, + para_head, + } => { + // TODO [now]: pass to vstaging + } }, } Ok(false) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 7c20e58daa6b..4e885725da19 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -42,6 +42,7 @@ use std::collections::{ #[derive(Default)] pub struct Candidates { candidates: HashMap, + by_parent_hash: HashMap>, } impl Candidates { @@ -123,9 +124,14 @@ impl Candidates { persisted_validation_data, assigned_group, parent_hash, + importable_under: HashSet::new(), }), ); + self.by_parent_hash.entry(parent_hash) + .or_default() + .insert(candidate_hash); + match prev_state { None => None, Some(CandidateState::Confirmed(_)) => None, @@ -166,9 +172,29 @@ impl Candidates { /// Prune all candidates according to the relay-parent predicate /// provided. - pub fn collect_garbage(&mut self, relay_parent_live: impl Fn(&Hash) -> bool) { - self.candidates.retain(|_, state| match state { - CandidateState::Confirmed(ref c) => relay_parent_live(&c.relay_parent()), + pub fn on_deactivate_leaves( + &mut self, + leaves: &[Hash], + relay_parent_live: impl Fn(&Hash) -> bool, + ) { + let by_parent_hash = &mut self.by_parent_hash; + self.candidates.retain(|c_hash, state| match state { + CandidateState::Confirmed(ref mut c) => { + if !relay_parent_live(&c.relay_parent()) { + if let Entry::Occupied(mut e) = by_parent_hash.entry(c.parent_hash) { + e.get_mut().remove(c_hash); + if e.get().is_empty() { + e.remove(); + } + } + false + } else { + for leaf_hash in leaves { + c.importable_under.remove(leaf_hash); + } + true + } + } CandidateState::Unconfirmed(ref mut c) => { c.claims.retain(|c| relay_parent_live(&c.1.relay_parent)); @@ -240,6 +266,8 @@ pub struct ConfirmedCandidate { persisted_validation_data: PersistedValidationData, assigned_group: GroupIndex, parent_hash: Hash, + // active leaves statements about this candidate are importable under. + importable_under: HashSet, } impl ConfirmedCandidate { @@ -253,6 +281,14 @@ impl ConfirmedCandidate { self.receipt.descriptor().para_id } + /// Whether the candidate is importable. + pub fn is_importable<'a>(&self, under_active_leaf: impl Into>) -> bool { + match under_active_leaf.into() { + Some(h) => self.importable_under.contains(h), + None => !self.importable_under.is_empty(), + } + } + fn group_index(&self) -> GroupIndex { self.assigned_group } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 70144313477a..89b66bcc7282 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -29,7 +29,7 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::{ jaeger, messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, - overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, + overseer, ActiveLeavesUpdate, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ @@ -314,35 +314,46 @@ pub(crate) async fn handle_network_update( } } -/// This should only be invoked for leaves that implement prospective parachains. +/// If there is a new leaf, this should only be called for leaves which support +/// prospective parachains. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn handle_activated_leaf( +pub(crate) async fn handle_active_leaves_update( ctx: &mut Context, state: &mut State, - leaf: ActivatedLeaf, + update: ActiveLeavesUpdate, ) -> JfyiErrorResult<()> { - state - .implicit_view - .activate_leaf(ctx.sender(), leaf.hash) - .await - .map_err(JfyiError::ActivateLeafFailure)?; - - for leaf in state.implicit_view.all_allowed_relay_parents() { - if state.per_relay_parent.contains_key(leaf) { + + if let Some(ref leaf) = update.activated { + state + .implicit_view + .activate_leaf(ctx.sender(), leaf.hash) + .await + .map_err(JfyiError::ActivateLeafFailure)?; + } + + handle_deactivate_leaves(state, &update.deactivated[..]); + + if let Some(ref leaf) = update.activated { + // TODO [now]: determine which candidates are importable under the given + // active leaf + } + + for new_relay_parent in state.implicit_view.all_allowed_relay_parents() { + if state.per_relay_parent.contains_key(new_relay_parent) { continue } // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. let session_index = - polkadot_node_subsystem_util::request_session_index_for_child(*leaf, ctx.sender()) + polkadot_node_subsystem_util::request_session_index_for_child(*new_relay_parent, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchSessionIndex)?; let availability_cores = - polkadot_node_subsystem_util::request_availability_cores(*leaf, ctx.sender()) + polkadot_node_subsystem_util::request_availability_cores(*new_relay_parent, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? @@ -350,7 +361,7 @@ pub(crate) async fn handle_activated_leaf( if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( - *leaf, + *new_relay_parent, session_index, ctx.sender(), ) @@ -363,7 +374,7 @@ pub(crate) async fn handle_activated_leaf( None => { gum::warn!( target: LOG_TARGET, - relay_parent = ?leaf, + relay_parent = ?new_relay_parent, "No session info available for current session" ); @@ -392,7 +403,7 @@ pub(crate) async fn handle_activated_leaf( }); state.per_relay_parent.insert( - *leaf, + *new_relay_parent, PerRelayParentState { validator_state: HashMap::new(), local_validator, @@ -442,9 +453,12 @@ fn find_local_validator_state( }) } -pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { +fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // deactivate the leaf in the implicit view. - state.implicit_view.deactivate_leaf(leaf_hash); + for leaf in leaves { + state.implicit_view.deactivate_leaf(*leaf); + } + let relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); // fast exit for no-op. @@ -456,7 +470,10 @@ pub(crate) fn handle_deactivate_leaf(state: &mut State, leaf_hash: Hash) { state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); // TODO [now]: clean up requests - state.candidates.collect_garbage(|h| relay_parents.contains(h)); + state.candidates.on_deactivate_leaves( + &leaves, + |h| relay_parents.contains(h), + ); // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 59b7c73df600..b05efa0385b2 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -748,6 +748,13 @@ pub enum StatementDistributionMessage { /// We have originated a signed statement in the context of /// given relay-parent hash and it should be distributed to other validators. Share(Hash, SignedFullStatementWithPVD), + /// The candidate received enough validity votes from the backing group. + Backed { + /// Candidate's para id. + para_id: ParaId, + /// Hash of the para head generated by candidate. + para_head: Hash, + }, /// Event from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), From 63499a200929bd633127d52b961510a420f97ddf Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 21:01:35 -0500 Subject: [PATCH 091/220] prospective parachains: add Cow logic --- .../src/fragment_tree.rs | 11 +-- .../src/inclusion_emulator/staging.rs | 68 +++++++++++++++---- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index a1f1f4b7758d..c921df27ff45 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,6 +54,7 @@ //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. +use std::borrow::Cow; use std::collections::{ hash_map::{Entry, HashMap}, BTreeMap, HashSet, @@ -116,7 +117,7 @@ impl CandidateStorage { relay_parent: candidate.descriptor.relay_parent, state: CandidateState::Introduced, candidate: ProspectiveCandidate { - commitments: candidate.commitments, + commitments: Cow::Owned(candidate.commitments), collator: candidate.descriptor.collator, collator_signature: candidate.descriptor.signature, persisted_validation_data, @@ -227,7 +228,7 @@ enum CandidateState { struct CandidateEntry { candidate_hash: CandidateHash, relay_parent: Hash, - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'static>, state: CandidateState, } @@ -650,11 +651,11 @@ impl FragmentTree { let f = Fragment::new( relay_parent.clone(), child_constraints.clone(), - candidate.candidate.clone(), + candidate.candidate.partial_clone(), ); match f { - Ok(f) => f, + Ok(f) => f.into_owned(), Err(e) => { gum::debug!( target: LOG_TARGET, @@ -695,7 +696,7 @@ impl FragmentTree { struct FragmentNode { // A pointer to the parent node. parent: NodePointer, - fragment: Fragment, + fragment: Fragment<'static>, candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 60eecb9b5180..6192d8d60060 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -118,6 +118,7 @@ use polkadot_primitives::vstaging::{ Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; +use std::borrow::{Borrow, Cow}; use std::collections::HashMap; /// Constraints on inbound HRMP channels. @@ -523,9 +524,9 @@ impl ConstraintModifications { /// here. But the erasure-root is not. This means that prospective candidates /// are not correlated to any session in particular. #[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate { +pub struct ProspectiveCandidate<'a> { /// The commitments to the output of the execution. - pub commitments: CandidateCommitments, + pub commitments: Cow<'a, CandidateCommitments>, /// The collator that created the candidate. pub collator: CollatorId, /// The signature of the collator on the payload. @@ -538,6 +539,35 @@ pub struct ProspectiveCandidate { pub validation_code_hash: ValidationCodeHash, } +impl<'a> ProspectiveCandidate<'a> { + fn into_owned(self) -> ProspectiveCandidate<'static> { + ProspectiveCandidate { + commitments: Cow::Owned(self.commitments.into_owned()), + ..self + } + } + + /// Partially clone the prospective candidate, but borrow the + /// parts which are potentially heavy. + pub fn partial_clone<'b>(&'b self) -> ProspectiveCandidate<'b> { + ProspectiveCandidate { + commitments: Cow::Borrowed(self.commitments.borrow()), + collator: self.collator.clone(), + collator_signature: self.collator_signature.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + pov_hash: self.pov_hash.clone(), + validation_code_hash: self.validation_code_hash.clone(), + } + } +} + +#[cfg(test)] +impl ProspectiveCandidate<'static> { + fn commitments_mut(&mut self) -> &mut CandidateCommitments { + self.commitments.to_mut() + } +} + /// Kinds of errors with the validity of a fragment. #[derive(Debug, Clone, PartialEq)] pub enum FragmentValidityError { @@ -582,19 +612,19 @@ pub enum FragmentValidityError { /// This is a type which guarantees that the candidate is valid under the /// operating constraints. #[derive(Debug, Clone, PartialEq)] -pub struct Fragment { +pub struct Fragment<'a> { /// The new relay-parent. relay_parent: RelayChainBlockInfo, /// The constraints this fragment is operating under. operating_constraints: Constraints, /// The core information about the prospective candidate. - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'a>, /// Modifications to the constraints based on the outputs of /// the candidate. modifications: ConstraintModifications, } -impl Fragment { +impl<'a> Fragment<'a> { /// Create a new fragment. /// /// This fails if the fragment isn't in line with the operating @@ -606,7 +636,7 @@ impl Fragment { pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'a>, ) -> Result { let modifications = { let commitments = &candidate.commitments; @@ -671,7 +701,7 @@ impl Fragment { } /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate { + pub fn candidate(&self) -> &ProspectiveCandidate<'a> { &self.candidate } @@ -680,6 +710,14 @@ impl Fragment { &self.modifications } + /// Convert the fragment into an owned variant. + pub fn into_owned(self) -> Fragment<'static> { + Fragment { + candidate: self.candidate.into_owned(), + ..self + } + } + /// Validate this fragment against some set of constraints /// instead of the operating constraints. pub fn validate_against_constraints( @@ -1111,21 +1149,21 @@ mod tests { fn make_candidate( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate { + ) -> ProspectiveCandidate<'static> { let collator_pair = CollatorPair::generate().0; let collator = collator_pair.public(); let sig = collator_pair.sign(b"blabla".as_slice()); ProspectiveCandidate { - commitments: CandidateCommitments { + commitments: Cow::Owned(CandidateCommitments { upward_messages: Vec::new(), horizontal_messages: Vec::new(), new_validation_code: None, head_data: HeadData::from(vec![1, 2, 3, 4, 5]), processed_downward_messages: 0, hrmp_watermark: relay_parent.number, - }, + }), collator, collator_signature: sig, persisted_validation_data: PersistedValidationData { @@ -1205,7 +1243,7 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); let max_code_size = constraints.max_code_size; - candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); + candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1243,7 +1281,7 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; - candidate.commitments.horizontal_messages.extend((0..max_hrmp + 1).map(|i| { + candidate.commitments_mut().horizontal_messages.extend((0..max_hrmp + 1).map(|i| { OutboundHrmpMessage { recipient: ParaId::from(i as u32), data: vec![1, 2, 3] } })); @@ -1268,7 +1306,7 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1287,7 +1325,7 @@ mod tests { let constraints = make_constraints(); let mut candidate = make_candidate(&constraints, &relay_parent); - candidate.commitments.horizontal_messages = vec![ + candidate.commitments_mut().horizontal_messages = vec![ OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; @@ -1297,7 +1335,7 @@ mod tests { Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); - candidate.commitments.horizontal_messages = vec![ + candidate.commitments_mut().horizontal_messages = vec![ OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; From 19cca1ab433b40edcffa68e2b1f33e1dce08784f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 21:37:57 -0500 Subject: [PATCH 092/220] incomplete and complete hypothetical candidates --- .../src/fragment_tree.rs | 210 +++++++++++++++--- node/core/prospective-parachains/src/lib.rs | 6 +- 2 files changed, 187 insertions(+), 29 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index c921df27ff45..97bde9d41a66 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -333,6 +333,47 @@ enum NodePointer { Storage(usize), } +/// A hypothetical candidate, which may or may not exist in +/// the fragment tree already. +pub(crate) enum HypotheticalCandidate<'a> { + Complete { + receipt: Cow<'a, CommittedCandidateReceipt>, + persisted_validation_data: PersistedValidationData, + }, + Incomplete { + relay_parent: Hash, + parent_head_data_hash: Hash, + }, +} + +impl<'a> HypotheticalCandidate<'a> { + fn parent_head_data_hash(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { + ref persisted_validation_data, + .. + } => persisted_validation_data.parent_head.hash(), + HypotheticalCandidate::Incomplete { + ref parent_head_data_hash, + .. + } => *parent_head_data_hash, + } + } + + fn relay_parent(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { + ref receipt, + .. + } => receipt.descriptor().relay_parent, + HypotheticalCandidate::Incomplete { + ref relay_parent, + .. + } => *relay_parent, + } + } +} + /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { @@ -477,11 +518,10 @@ impl FragmentTree { /// /// If the candidate is already known, this returns the actual depths where this /// candidate is part of the tree. - pub(crate) fn hypothetical_depths( + pub(crate) fn hypothetical_depths<'a>( &self, hash: CandidateHash, - parent_head_data_hash: Hash, - candidate_relay_parent: Hash, + candidate: HypotheticalCandidate<'a>, ) -> Vec { // if known. if let Some(depths) = self.candidates.get(&hash) { @@ -489,11 +529,12 @@ impl FragmentTree { } // if out of scope. - let candidate_relay_parent_number = + let candidate_relay_parent = candidate.relay_parent(); + let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent { - self.scope.relay_parent.number + self.scope.relay_parent.clone() } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.number + info.clone() } else { return Vec::new() }; @@ -503,21 +544,72 @@ impl FragmentTree { // iterate over all nodes < max_depth where parent head-data matches, // relay-parent number is <= candidate, and depth < max_depth. - for node in &self.nodes { - if node.depth == max_depth { + let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage); + for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) { + let (modifications, child_depth, earliest_rp) = match parent_pointer { + NodePointer::Root => + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), + NodePointer::Storage(ptr) => { + let node = &self.nodes[ptr]; + let parent_rp = self + .scope + .ancestor_by_hash(&node.relay_parent()) + .expect("nodes in tree can only contain ancestors within scope; qed"); + + (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + }, + }; + + if child_depth > max_depth { continue } - if node.fragment.relay_parent().number > candidate_relay_parent_number { + + if earliest_rp.number > candidate_relay_parent.number { continue } - if node.head_data_hash == parent_head_data_hash { - depths.set(node.depth + 1, true); - } - } - // compare against root as well. - if self.scope.base_constraints.required_parent.hash() == parent_head_data_hash { - depths.set(0, true); + let child_constraints = + match self.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + }, + Ok(c) => c, + }; + + let parent_head_hash = candidate.parent_head_data_hash(); + if parent_head_hash == child_constraints.required_parent.hash() { + // We do additional checks for complete candidates. + if let HypotheticalCandidate::Complete { + ref receipt, + ref persisted_validation_data, + } = candidate { + let prospective_candidate = ProspectiveCandidate { + commitments: Cow::Borrowed(&receipt.commitments), + collator: receipt.descriptor().collator.clone(), + collator_signature: receipt.descriptor().signature.clone(), + persisted_validation_data: persisted_validation_data.clone(), + pov_hash: receipt.descriptor().pov_hash, + validation_code_hash: receipt.descriptor().validation_code_hash, + }; + + if Fragment::new( + candidate_relay_parent.clone(), + child_constraints, + prospective_candidate, + ).is_err() { + continue + } + } + + depths.set(child_depth, true); + } } depths.iter_ones().collect() @@ -673,7 +765,6 @@ impl FragmentTree { let mut cumulative_modifications = modifications.clone(); cumulative_modifications.stack(fragment.constraint_modifications()); - let head_data_hash = fragment.candidate().commitments.head_data.hash(); let node = FragmentNode { parent: parent_pointer, fragment, @@ -681,7 +772,6 @@ impl FragmentTree { depth: child_depth, cumulative_modifications, children: Vec::new(), - head_data_hash, }; self.insert_node(node); @@ -700,7 +790,6 @@ struct FragmentNode { candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, - head_data_hash: Hash, children: Vec<(NodePointer, CandidateHash)>, } @@ -1321,8 +1410,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( candidate_a_hash, - HeadData::from(vec![0x0a]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, ), vec![0, 2, 4], ); @@ -1330,8 +1421,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( candidate_b_hash, - HeadData::from(vec![0x0b]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, ), vec![1, 3], ); @@ -1339,8 +1432,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( CandidateHash(Hash::repeat_byte(21)), - HeadData::from(vec![0x0a]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, ), vec![0, 2, 4], ); @@ -1348,10 +1443,71 @@ mod tests { assert_eq!( tree.hypothetical_depths( CandidateHash(Hash::repeat_byte(22)), - HeadData::from(vec![0x0b]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, ), vec![1, 3] ); } + + #[test] + fn hypothetical_depths_stricter_on_complete() { + let storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 1000, // watermark is illegal + ); + + let candidate_a_hash = candidate_a.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + ), + vec![0], + ); + + assert!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Complete { + receipt: Cow::Owned(candidate_a), + persisted_validation_data: pvd_a, + }, + ).is_empty() + ); + } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index b7e79e2feffd..812813f900a1 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -480,8 +480,10 @@ fn answer_hypothetical_depths_request( Some(fragment_tree) => { let depths = fragment_tree.hypothetical_depths( request.candidate_hash, - request.parent_head_data_hash, - request.candidate_relay_parent, + crate::fragment_tree::HypotheticalCandidate::Incomplete { + relay_parent: request.candidate_relay_parent, + parent_head_data_hash: request.parent_head_data_hash, + } ); let _ = tx.send(depths); }, From 7960c803d131027ec927c1bc86d1087bf2b9c44a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 21:49:11 -0500 Subject: [PATCH 093/220] remove keep_if_unneeded --- node/core/backing/src/lib.rs | 2 -- .../src/tests/prospective_parachains.rs | 19 +++++++------------ node/core/prospective-parachains/src/lib.rs | 3 +-- node/subsystem-types/src/messages.rs | 6 ------ 4 files changed, 8 insertions(+), 22 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 0c1a3d885fa1..1688732ac0d6 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1505,8 +1505,6 @@ async fn import_statement( candidate_para: candidate.descriptor().para_id, candidate_receipt: candidate.clone(), persisted_validation_data: pvd.clone(), - // Since this is used during seconding - keep_if_unneeded: false, }, tx, )) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 0deaf50647d1..cab8f5f9bc9a 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -401,8 +401,7 @@ fn seconding_sanity_check_allowed() { ) if req.candidate_receipt == candidate && req.candidate_para == para_id - && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } @@ -540,8 +539,7 @@ fn seconding_sanity_check_disallowed() { ) if req.candidate_receipt == candidate && req.candidate_para == para_id - && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } @@ -733,8 +731,7 @@ fn prospective_parachains_reject_candidate() { ) if req.candidate_receipt == candidate && req.candidate_para == para_id - && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + && pvd == req.persisted_validation_data => { // Reject it. tx.send(Vec::new()).unwrap(); } @@ -786,8 +783,7 @@ fn prospective_parachains_reject_candidate() { ) if req.candidate_receipt == candidate && req.candidate_para == para_id - && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } @@ -920,7 +916,7 @@ fn second_multiple_candidates_per_relay_parent() { &req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } @@ -1058,8 +1054,7 @@ fn backing_works() { ) if req.candidate_receipt == candidate_a && req.candidate_para == para_id - && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } @@ -1461,7 +1456,7 @@ fn seconding_sanity_check_occupy_same_depth() { &req.candidate_receipt == candidate && &req.candidate_para == para_id && pvd == req.persisted_validation_data - && !req.keep_if_unneeded => { + => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 812813f900a1..090201174b09 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -272,7 +272,6 @@ async fn handle_candidate_introduced( candidate_para: para, candidate_receipt: candidate, persisted_validation_data: pvd, - keep_if_unneeded, } = request; // Add the candidate to storage. @@ -327,7 +326,7 @@ async fn handle_candidate_introduced( } } - if !keep_if_unneeded && membership.is_empty() { + if membership.is_empty() { storage.remove_candidate(&candidate_hash); } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index b05efa0385b2..51fea06aef99 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -992,12 +992,6 @@ pub struct IntroduceCandidateRequest { pub candidate_receipt: CommittedCandidateReceipt, /// The persisted validation data of the candidate. pub persisted_validation_data: PersistedValidationData, - /// Whether to keep the candidate in storage if unable - /// to introduce into any fragment tree. - /// - /// This should be set to `false` in any scenario where the - /// candidate has not already been vetted for spam-prevention. - pub keep_if_unneeded: bool, } /// A request for the depths a hypothetical candidate would occupy within From 29d68e0bce8601767244d611c3a63807a73cf5fb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Nov 2022 21:49:28 -0500 Subject: [PATCH 094/220] fmt --- .../src/tests/prospective_parachains.rs | 60 +++++++++--------- .../src/fragment_tree.rs | 63 +++++++++---------- node/core/prospective-parachains/src/lib.rs | 2 +- .../network/statement-distribution/src/lib.rs | 7 +-- .../src/vstaging/candidates.rs | 9 +-- .../src/vstaging/mod.rs | 38 +++++------ .../src/inclusion_emulator/staging.rs | 26 ++++---- 7 files changed, 97 insertions(+), 108 deletions(-) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index cab8f5f9bc9a..90a7bf16730d 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -906,21 +906,21 @@ fn second_multiple_candidates_per_relay_parent() { // Prospective parachains are notified. assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); assert_matches!( virtual_overseer.recv().await, @@ -1446,21 +1446,21 @@ fn seconding_sanity_check_occupy_same_depth() { // Prospective parachains are notified. assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && &req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); assert_matches!( virtual_overseer.recv().await, diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 97bde9d41a66..3cf3a2eb5a30 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,10 +54,12 @@ //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. -use std::borrow::Cow; -use std::collections::{ - hash_map::{Entry, HashMap}, - BTreeMap, HashSet, +use std::{ + borrow::Cow, + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, HashSet, + }, }; use super::LOG_TARGET; @@ -349,27 +351,18 @@ pub(crate) enum HypotheticalCandidate<'a> { impl<'a> HypotheticalCandidate<'a> { fn parent_head_data_hash(&self) -> Hash { match *self { - HypotheticalCandidate::Complete { - ref persisted_validation_data, - .. - } => persisted_validation_data.parent_head.hash(), - HypotheticalCandidate::Incomplete { - ref parent_head_data_hash, - .. - } => *parent_head_data_hash, + HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => + persisted_validation_data.parent_head.hash(), + HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } => + *parent_head_data_hash, } } fn relay_parent(&self) -> Hash { match *self { - HypotheticalCandidate::Complete { - ref receipt, - .. - } => receipt.descriptor().relay_parent, - HypotheticalCandidate::Incomplete { - ref relay_parent, - .. - } => *relay_parent, + HypotheticalCandidate::Complete { ref receipt, .. } => + receipt.descriptor().relay_parent, + HypotheticalCandidate::Incomplete { ref relay_parent, .. } => *relay_parent, } } } @@ -530,14 +523,13 @@ impl FragmentTree { // if out of scope. let candidate_relay_parent = candidate.relay_parent(); - let candidate_relay_parent = - if self.scope.relay_parent.hash == candidate_relay_parent { - self.scope.relay_parent.clone() - } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.clone() - } else { - return Vec::new() - }; + let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent { + self.scope.relay_parent.clone() + } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.clone() + } else { + return Vec::new() + }; let max_depth = self.scope.max_depth; let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; @@ -589,7 +581,8 @@ impl FragmentTree { if let HypotheticalCandidate::Complete { ref receipt, ref persisted_validation_data, - } = candidate { + } = candidate + { let prospective_candidate = ProspectiveCandidate { commitments: Cow::Borrowed(&receipt.commitments), collator: receipt.descriptor().collator.clone(), @@ -603,7 +596,9 @@ impl FragmentTree { candidate_relay_parent.clone(), child_constraints, prospective_candidate, - ).is_err() { + ) + .is_err() + { continue } } @@ -1500,14 +1495,14 @@ mod tests { vec![0], ); - assert!( - tree.hypothetical_depths( + assert!(tree + .hypothetical_depths( candidate_a_hash, HypotheticalCandidate::Complete { receipt: Cow::Owned(candidate_a), persisted_validation_data: pvd_a, }, - ).is_empty() - ); + ) + .is_empty()); } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 090201174b09..9ce22e1d561b 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -482,7 +482,7 @@ fn answer_hypothetical_depths_request( crate::fragment_tree::HypotheticalCandidate::Incomplete { relay_parent: request.candidate_relay_parent, parent_head_data_hash: request.parent_head_data_hash, - } + }, ); let _ = tx.send(depths); }, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 23438b434988..c44da56adc83 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -266,12 +266,9 @@ impl StatementDistributionSubsystem { // TODO [now]: pass to vstaging, but not if the message is // v1 or the connecting peer is v1. }, - StatementDistributionMessage::Backed { - para_id, - para_head, - } => { + StatementDistributionMessage::Backed { para_id, para_head } => { // TODO [now]: pass to vstaging - } + }, }, } Ok(false) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 4e885725da19..29233967f697 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -128,9 +128,7 @@ impl Candidates { }), ); - self.by_parent_hash.entry(parent_hash) - .or_default() - .insert(candidate_hash); + self.by_parent_hash.entry(parent_hash).or_default().insert(candidate_hash); match prev_state { None => None, @@ -179,7 +177,7 @@ impl Candidates { ) { let by_parent_hash = &mut self.by_parent_hash; self.candidates.retain(|c_hash, state| match state { - CandidateState::Confirmed(ref mut c) => { + CandidateState::Confirmed(ref mut c) => if !relay_parent_live(&c.relay_parent()) { if let Entry::Occupied(mut e) = by_parent_hash.entry(c.parent_hash) { e.get_mut().remove(c_hash); @@ -193,8 +191,7 @@ impl Candidates { c.importable_under.remove(leaf_hash); } true - } - } + }, CandidateState::Unconfirmed(ref mut c) => { c.claims.retain(|c| relay_parent_live(&c.1.relay_parent)); diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 89b66bcc7282..90ffc9b00aab 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -29,7 +29,7 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::{ jaeger, messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, - overseer, ActiveLeavesUpdate, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, + overseer, ActivatedLeaf, ActiveLeavesUpdate, PerLeafSpan, StatementDistributionSenderTrait, }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ @@ -322,7 +322,6 @@ pub(crate) async fn handle_active_leaves_update( state: &mut State, update: ActiveLeavesUpdate, ) -> JfyiErrorResult<()> { - if let Some(ref leaf) = update.activated { state .implicit_view @@ -345,19 +344,23 @@ pub(crate) async fn handle_active_leaves_update( // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. - let session_index = - polkadot_node_subsystem_util::request_session_index_for_child(*new_relay_parent, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchSessionIndex)?; - - let availability_cores = - polkadot_node_subsystem_util::request_availability_cores(*new_relay_parent, ctx.sender()) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchAvailabilityCores)?; + let session_index = polkadot_node_subsystem_util::request_session_index_for_child( + *new_relay_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionIndex)?; + + let availability_cores = polkadot_node_subsystem_util::request_availability_cores( + *new_relay_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchAvailabilityCores)?; if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( @@ -470,10 +473,7 @@ fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); // TODO [now]: clean up requests - state.candidates.on_deactivate_leaves( - &leaves, - |h| relay_parents.contains(h), - ); + state.candidates.on_deactivate_leaves(&leaves, |h| relay_parents.contains(h)); // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 6192d8d60060..ac41768f6bca 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -118,8 +118,10 @@ use polkadot_primitives::vstaging::{ Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; -use std::borrow::{Borrow, Cow}; -use std::collections::HashMap; +use std::{ + borrow::{Borrow, Cow}, + collections::HashMap, +}; /// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] @@ -541,10 +543,7 @@ pub struct ProspectiveCandidate<'a> { impl<'a> ProspectiveCandidate<'a> { fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { - commitments: Cow::Owned(self.commitments.into_owned()), - ..self - } + ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } } /// Partially clone the prospective candidate, but borrow the @@ -712,10 +711,7 @@ impl<'a> Fragment<'a> { /// Convert the fragment into an owned variant. pub fn into_owned(self) -> Fragment<'static> { - Fragment { - candidate: self.candidate.into_owned(), - ..self - } + Fragment { candidate: self.candidate.into_owned(), ..self } } /// Validate this fragment against some set of constraints @@ -1281,9 +1277,13 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; - candidate.commitments_mut().horizontal_messages.extend((0..max_hrmp + 1).map(|i| { - OutboundHrmpMessage { recipient: ParaId::from(i as u32), data: vec![1, 2, 3] } - })); + candidate + .commitments_mut() + .horizontal_messages + .extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + })); assert_eq!( Fragment::new(relay_parent, constraints, candidate), From 6fa7b802d406d217243a8d5009419d5b7df31b4f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 3 Nov 2022 18:26:48 -0500 Subject: [PATCH 095/220] implement more general HypotheticalFrontier --- .../src/fragment_tree.rs | 8 +-- node/core/prospective-parachains/src/lib.rs | 68 +++++++++++++++++- node/subsystem-types/src/messages.rs | 71 +++++++++++++++++++ 3 files changed, 142 insertions(+), 5 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 3cf3a2eb5a30..0ba5d7daa554 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -340,7 +340,7 @@ enum NodePointer { pub(crate) enum HypotheticalCandidate<'a> { Complete { receipt: Cow<'a, CommittedCandidateReceipt>, - persisted_validation_data: PersistedValidationData, + persisted_validation_data: Cow<'a, PersistedValidationData>, }, Incomplete { relay_parent: Hash, @@ -352,7 +352,7 @@ impl<'a> HypotheticalCandidate<'a> { fn parent_head_data_hash(&self) -> Hash { match *self { HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => - persisted_validation_data.parent_head.hash(), + persisted_validation_data.as_ref().parent_head.hash(), HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } => *parent_head_data_hash, } @@ -587,7 +587,7 @@ impl FragmentTree { commitments: Cow::Borrowed(&receipt.commitments), collator: receipt.descriptor().collator.clone(), collator_signature: receipt.descriptor().signature.clone(), - persisted_validation_data: persisted_validation_data.clone(), + persisted_validation_data: persisted_validation_data.as_ref().clone(), pov_hash: receipt.descriptor().pov_hash, validation_code_hash: receipt.descriptor().validation_code_hash, }; @@ -1500,7 +1500,7 @@ mod tests { candidate_a_hash, HypotheticalCandidate::Complete { receipt: Cow::Owned(candidate_a), - persisted_validation_data: pvd_a, + persisted_validation_data: Cow::Owned(pvd_a), }, ) .is_empty()); diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 9ce22e1d561b..9cb6aae120be 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,13 +27,15 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments. +use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, + ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, HypotheticalFrontierRequest, + HypotheticalCandidate, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -137,6 +139,8 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => + answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => answer_tree_membership_request(&view, para, candidate, tx), ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => @@ -492,6 +496,68 @@ fn answer_hypothetical_depths_request( } } +fn answer_hypothetical_frontier_request( + view: &View, + request: HypotheticalFrontierRequest, + tx: oneshot::Sender>, +) { + let mut response = Vec::with_capacity(request.candidates.len()); + for candidate in request.candidates { + response.push((candidate, Vec::new())); + } + + let required_active_leaf = request.fragment_tree_relay_parent; + for (active_leaf, leaf_view) in view + .active_leaves + .iter() + .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) + { + for &mut (ref c, ref mut membership) in &mut response { + let fragment_tree = match leaf_view.fragment_trees.get(&c.candidate_para()) { + None => continue, + Some(f) => f, + }; + + let (c_hash, hypothetical) = match c { + HypotheticalCandidate::Complete { + candidate_hash, + receipt, + persisted_validation_data, + } => ( + *candidate_hash, + fragment_tree::HypotheticalCandidate::Complete { + receipt: Cow::Borrowed(&*receipt), + persisted_validation_data: Cow::Borrowed(&*persisted_validation_data), + }, + ), + HypotheticalCandidate::Incomplete { + candidate_hash, + parent_head_data_hash, + candidate_relay_parent, + .. + } => ( + *candidate_hash, + fragment_tree::HypotheticalCandidate::Incomplete { + relay_parent: *candidate_relay_parent, + parent_head_data_hash: *parent_head_data_hash, + }, + ), + }; + + let depths = fragment_tree.hypothetical_depths( + c_hash, + hypothetical, + ); + + if !depths.is_empty() { + membership.push((*active_leaf, depths)); + } + } + } + + let _ = tx.send(response); +} + fn fragment_tree_membership( active_leaves: &HashMap, para: ParaId, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 51fea06aef99..fc53699a5ec4 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -994,10 +994,72 @@ pub struct IntroduceCandidateRequest { pub persisted_validation_data: PersistedValidationData, } +/// A hypothetical candidate to be evaluated for frontier membership +/// in the prospective parachains subsystem. +/// +/// Hypothetical candidates are either complete or incomplete. +/// Complete candidates have already had their (potentially heavy) +/// candidate receipt fetched, while incomplete candidates are simply +/// claims about properties that a fetched candidate would have. +/// +/// Complete candidates can be evaluated more strictly than incomplete candidates. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum HypotheticalCandidate { + /// A complete candidate. + Complete { + /// The hash of the candidate. + candidate_hash: CandidateHash, + /// The receipt of the candidate. + receipt: Arc, + /// The persisted validation data of the candidate. + persisted_validation_data: PersistedValidationData, + }, + /// An incomplete candidate. + Incomplete { + /// The claimed hash of the candidate. + candidate_hash: CandidateHash, + /// The claimed para-ID of the candidate. + candidate_para: ParaId, + /// The claimed head-data hash of the candidate. + parent_head_data_hash: Hash, + /// The claimed relay parent of the candidate. + candidate_relay_parent: Hash, + }, +} + +impl HypotheticalCandidate { + /// Get the `ParaId` of the hypothetical candidate. + pub fn candidate_para(&self) -> ParaId { + match *self { + HypotheticalCandidate::Complete { + ref receipt, + .. + } => receipt.descriptor().para_id, + HypotheticalCandidate::Incomplete { + candidate_para, + .. + } => candidate_para, + } + } +} + +/// Request specifying which candidates are either already included +/// or might be included in the hypothetical frontier of fragment trees +/// under a given active leaf. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct HypotheticalFrontierRequest { + /// Candidates, in arbitrary order, which should be checked for + /// possible membership in fragment trees. + pub candidates: Vec, + /// Either a specific fragment tree to check, otherwise all. + pub fragment_tree_relay_parent: Option, +} + /// A request for the depths a hypothetical candidate would occupy within /// some fragment tree. Note that this is not an absolute indication of whether /// a candidate can be added to a fragment tree, as the commitments are not /// considered in this request. +// TODO [now]: file issue making this obsolete in favor of `HypotheticalFrontierRequest` #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. @@ -1057,6 +1119,15 @@ pub enum ProspectiveParachainsMessage { /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent /// is unknown. GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the hypothetical frontier membership of candidates with the given properties + /// under the specified active leaves' fragment trees. + /// + /// For any candidate which is already known, this returns the depths the candidate + /// occupies. + GetHypotheticalFrontier( + HypotheticalFrontierRequest, + oneshot::Sender>, + ), /// Get the membership of the candidate in all fragment trees. GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), /// Get the minimum accepted relay-parent number for each para in the fragment tree From 73ce578b22831ffc8c7cc8418f527e8b27476185 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 4 Nov 2022 17:14:36 -0500 Subject: [PATCH 096/220] fmt, cleanup --- node/core/prospective-parachains/src/lib.rs | 18 ++++++++---------- node/network/protocol/src/lib.rs | 4 ++++ node/subsystem-types/src/messages.rs | 10 ++-------- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 9cb6aae120be..fec69ba4b9b4 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,17 +27,18 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments. -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, +}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, HypotheticalFrontierRequest, - HypotheticalCandidate, - IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, - RuntimeApiMessage, RuntimeApiRequest, + ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalDepthRequest, + HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, + ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -544,10 +545,7 @@ fn answer_hypothetical_frontier_request( ), }; - let depths = fragment_tree.hypothetical_depths( - c_hash, - hypothetical, - ); + let depths = fragment_tree.hypothetical_depths(c_hash, hypothetical); if !depths.is_empty() { membership.push((*active_leaf, depths)); diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 7ac9ffde6693..0c28080879d6 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -619,6 +619,10 @@ pub mod vstaging { pub candidate_hash: CandidateHash, /// The group index backing the candidate at the relay-parent. pub group_index: GroupIndex, + /// The para ID of the candidate. It is illegal for this to + /// be a para ID which is not assigned to the group indicated + /// in this manifest. + pub para_id: ParaId, /// The head-data corresponding to the candidate. pub parent_head_data_hash: Hash, /// A bitfield which indicates which validators in the para's diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index fc53699a5ec4..1d61cbe84801 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -1031,14 +1031,8 @@ impl HypotheticalCandidate { /// Get the `ParaId` of the hypothetical candidate. pub fn candidate_para(&self) -> ParaId { match *self { - HypotheticalCandidate::Complete { - ref receipt, - .. - } => receipt.descriptor().para_id, - HypotheticalCandidate::Incomplete { - candidate_para, - .. - } => candidate_para, + HypotheticalCandidate::Complete { ref receipt, .. } => receipt.descriptor().para_id, + HypotheticalCandidate::Incomplete { candidate_para, .. } => candidate_para, } } } From 99dad29eb04ad61918d23f55a22d4c0aebee9513 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 4 Nov 2022 17:14:46 -0500 Subject: [PATCH 097/220] add a by_parent_hash index to candidate tracker --- .../src/vstaging/candidates.rs | 149 +++++++++++++++--- 1 file changed, 128 insertions(+), 21 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 29233967f697..9b66cbf6904c 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -42,7 +42,7 @@ use std::collections::{ #[derive(Default)] pub struct Candidates { candidates: HashMap, - by_parent_hash: HashMap>, + by_parent: HashMap<(Hash, ParaId), HashSet>, } impl Candidates { @@ -65,10 +65,14 @@ impl Candidates { candidate_hash: CandidateHash, claimed_relay_parent: Hash, claimed_group_index: GroupIndex, - claimed_parent_hash: Option, + claimed_parent_hash_and_id: Option<(Hash, ParaId)>, ) -> Result<(), BadAdvertisement> { let entry = self.candidates.entry(candidate_hash).or_insert_with(|| { - CandidateState::Unconfirmed(UnconfirmedCandidate { claims: Vec::new() }) + CandidateState::Unconfirmed(UnconfirmedCandidate { + claims: Vec::new(), + parent_claims: HashMap::new(), + unconfirmed_importable_under: HashSet::new(), + }) }); match entry { @@ -81,10 +85,14 @@ impl Candidates { return Err(BadAdvertisement) } - if let Some(claimed_parent_hash) = claimed_parent_hash { + if let Some((claimed_parent_hash, claimed_id)) = claimed_parent_hash_and_id { if c.parent_hash() != claimed_parent_hash { return Err(BadAdvertisement) } + + if c.para_id() != claimed_id { + return Err(BadAdvertisement) + } } }, CandidateState::Unconfirmed(ref mut c) => { @@ -93,9 +101,13 @@ impl Candidates { CandidateClaims { relay_parent: claimed_relay_parent, group_index: claimed_group_index, - parent_hash: claimed_parent_hash, + parent_hash_and_id: claimed_parent_hash_and_id, }, ); + + if let Some(parent_claims) = claimed_parent_hash_and_id { + self.by_parent.entry(parent_claims).or_default().insert(candidate_hash); + } }, } @@ -116,6 +128,7 @@ impl Candidates { ) -> Option { let parent_hash = persisted_validation_data.parent_head.hash(); let relay_parent = candidate_receipt.descriptor().relay_parent; + let para_id = candidate_receipt.descriptor().para_id; let prev_state = self.candidates.insert( candidate_hash, @@ -128,7 +141,7 @@ impl Candidates { }), ); - self.by_parent_hash.entry(parent_hash).or_default().insert(candidate_hash); + self.by_parent.entry((parent_hash, para_id)).or_default().insert(candidate_hash); match prev_state { None => None, @@ -140,7 +153,22 @@ impl Candidates { }; for (peer, claims) in u.claims { - if claims.check(relay_parent, assigned_group, parent_hash) { + // Update the by-parent-hash index not to store any outdated + // claims. + if let Some((claimed_parent_hash, claimed_id)) = claims.parent_hash_and_id { + if claimed_parent_hash != parent_hash || claimed_id != para_id { + if let Entry::Occupied(mut e) = + self.by_parent.entry((claimed_parent_hash, claimed_id)) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + } + } + + if claims.check(relay_parent, assigned_group, parent_hash, para_id) { reckoning.correct.insert(peer); } else { reckoning.incorrect.insert(peer); @@ -175,16 +203,19 @@ impl Candidates { leaves: &[Hash], relay_parent_live: impl Fn(&Hash) -> bool, ) { - let by_parent_hash = &mut self.by_parent_hash; + let by_parent = &mut self.by_parent; + let mut remove_parent_claims = |c_hash, parent_hash, id| { + if let Entry::Occupied(mut e) = by_parent.entry((parent_hash, id)) { + e.get_mut().remove(&c_hash); + if e.get().is_empty() { + e.remove(); + } + } + }; self.candidates.retain(|c_hash, state| match state { CandidateState::Confirmed(ref mut c) => if !relay_parent_live(&c.relay_parent()) { - if let Entry::Occupied(mut e) = by_parent_hash.entry(c.parent_hash) { - e.get_mut().remove(c_hash); - if e.get().is_empty() { - e.remove(); - } - } + remove_parent_claims(*c_hash, c.parent_hash(), c.para_id()); false } else { for leaf_hash in leaves { @@ -193,9 +224,12 @@ impl Candidates { true }, CandidateState::Unconfirmed(ref mut c) => { - c.claims.retain(|c| relay_parent_live(&c.1.relay_parent)); - - !c.claims.is_empty() + c.on_deactivate_leaves( + leaves, + |parent_hash, id| remove_parent_claims(*c_hash, parent_hash, id), + &relay_parent_live, + ); + c.has_claims() }, }) } @@ -226,24 +260,41 @@ struct CandidateClaims { relay_parent: Hash, /// The group index assigned to this candidate. group_index: GroupIndex, - /// The hash of the parent head-data. This is optional, + /// The hash of the parent head-data and the ParaId. This is optional, /// as only some types of advertisements include this data. - parent_hash: Option, + parent_hash_and_id: Option<(Hash, ParaId)>, } impl CandidateClaims { - fn check(&self, relay_parent: Hash, group_index: GroupIndex, parent_hash: Hash) -> bool { + fn check( + &self, + relay_parent: Hash, + group_index: GroupIndex, + parent_hash: Hash, + para_id: ParaId, + ) -> bool { self.relay_parent == relay_parent && self.group_index == group_index && - self.parent_hash.map_or(true, |p| p == parent_hash) + self.parent_hash_and_id.map_or(true, |p| p == (parent_hash, para_id)) } } +// properties of an unconfirmed but hypothetically importable candidate. +#[derive(Hash, PartialEq, Eq)] +struct UnconfirmedImportable { + relay_parent: Hash, + parent_hash: Hash, + para_id: ParaId, +} + // An unconfirmed candidate may have have been advertised under // multiple identifiers. We track here, on the basis of unique identifier, // the peers which advertised each candidate in a specific way. struct UnconfirmedCandidate { claims: Vec<(PeerId, CandidateClaims)>, + // ref-counted + parent_claims: HashMap<(Hash, ParaId), usize>, + unconfirmed_importable_under: HashSet<(Hash, UnconfirmedImportable)>, } impl UnconfirmedCandidate { @@ -253,8 +304,51 @@ impl UnconfirmedCandidate { // each peer will be able to announce the same candidate about 1 time per live relay-parent, // but in doing so it limits the amount of other candidates it can advertise. on balance, // memory consumption is bounded in the same way. + if let Some(parent_claims) = claims.parent_hash_and_id { + *self.parent_claims.entry(parent_claims).or_default() += 1; + } self.claims.push((peer, claims)); } + + fn note_maybe_importable_under( + &mut self, + active_leaf: Hash, + unconfirmed_importable: UnconfirmedImportable, + ) { + self.unconfirmed_importable_under.insert((active_leaf, unconfirmed_importable)); + } + + fn on_deactivate_leaves( + &mut self, + leaves: &[Hash], + mut remove_parent_index: impl FnMut(Hash, ParaId), + relay_parent_live: impl Fn(&Hash) -> bool, + ) { + self.claims.retain(|c| { + if relay_parent_live(&c.1.relay_parent) { + true + } else { + if let Some(parent_claims) = c.1.parent_hash_and_id { + if let Entry::Occupied(mut e) = self.parent_claims.entry(parent_claims) { + *e.get_mut() -= 1; + if *e.get() == 0 { + remove_parent_index(parent_claims.0, parent_claims.1); + e.remove(); + } + } + } + + false + } + }); + + self.unconfirmed_importable_under + .retain(|(l, props)| leaves.contains(l) && relay_parent_live(&props.relay_parent)); + } + + fn has_claims(&self) -> bool { + !self.claims.is_empty() + } } /// A confirmed candidate. @@ -294,3 +388,16 @@ impl ConfirmedCandidate { self.parent_hash } } + +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]: test that inserting unconfirmed rejects if claims are + // incomptable. + + // TODO [now]: test that confirming correctly maintains the parent hash index + + // TODO [now]: test that pruning unconfirmed claims correctly maintains the parent hash + // index +} From e04caf4e6bbd3cf5adeeb43f845f167a430bd7c9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 15:26:06 -0600 Subject: [PATCH 098/220] more framework for future code --- .../src/vstaging/mod.rs | 91 +++++++++++++++++-- .../src/vstaging/statement_store.rs | 2 + 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 90ffc9b00aab..5f71559fd733 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -332,10 +332,10 @@ pub(crate) async fn handle_active_leaves_update( handle_deactivate_leaves(state, &update.deactivated[..]); - if let Some(ref leaf) = update.activated { - // TODO [now]: determine which candidates are importable under the given - // active leaf - } + let leaf = match update.activated { + Some(l) => l, + None => return Ok(()), + }; for new_relay_parent in state.implicit_view.all_allowed_relay_parents() { if state.per_relay_parent.contains_key(new_relay_parent) { @@ -414,11 +414,18 @@ pub(crate) async fn handle_active_leaves_update( session: session_index, }, ); - - // TODO [now]: update peers which have the leaf in their view. - // update their implicit view. send any messages accordingly. } + // TODO [now]: determine which candidates are importable under the given + // active leaf + new_leaf_fragment_tree_updates( + ctx, + leaf.hash, + ).await; + + // TODO [now]: update peers which have the leaf in their view. + // update their implicit view. send any messages accordingly. + Ok(()) } @@ -998,9 +1005,15 @@ async fn handle_incoming_statement( if was_fresh { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; - // both of the below probably in some shared function. - // TODO [now]: circulate the statement - // TODO [now]: import the statement into backing if we can. + // both of the below probably in some shared function. + // TODO [now]: circulate the statement + // TODO [now]: import the statement into backing if we can. + // If the candidate is confirmed and statements are importable, + // we send the statement to backing either if + // a) it is a candidate from the cluster + // b) it is a candidate from the grid and it is backed + // + // We always circulate statements at this point. } else { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; } @@ -1096,3 +1109,61 @@ fn handle_grid_statement( Ok(checked_statement) } + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_leaf_fragment_tree_updates( + ctx: &mut Context, + leaf_hash: Hash, +) { + // TODO [now] + // 1. get hypothetical candidates + // 2. find out which are in the frontier + // 3. note that they are + // 4. for unconfirmed candidates, send requests to all given peers. + // note that all unconfirmed hypothetical candidates are from the grid + // 5. for confirmed candidates, if the candidate has enough statements to + // back, send all statements which are new to backing. Also, send + // backed candidate manifests to peers with the relay parent in their view +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_backed_fragment_tree_updates( + ctx: &mut Context, + para_id: ParaId, + head_data_hash: Hash, +) { + // TODO [now] + // 1. get hypothetical candidates + // 2. find out which are in the frontier + // 3. note that they are + // 4. schedule requests/import statements accordingly. +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_manifest( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + manifest: net_protocol::vstaging::BackedCandidateManifest, +) { + // TODO [now]: + // 1. sanity checks: relay-parent in state, para ID matches group index, + // 2. sanity checks: peer is validator, bitvec size, import into grid tracker + // 3. if accepted by grid, insert as unconfirmed. + // 4. if already confirmed, acknowledge candidate + // 5. if already unconfirmed, add request entry + // 6. if fresh unconfirmed, determine whether it's in the hypothetical + // frontier, update candidates wrapper, add request entry if so. +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_cluster_newly_backed( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + candidate_hash: CandidateHash, +) { + // TODO [now] + // 1. for confirmed & importable candidates only + // 2. send advertisements along the grid +} diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index d2d980480b8a..e4d1344df70d 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -76,6 +76,8 @@ impl StatementStore { /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. // TODO [now]: perhaps reject over-seconded statements. + // TODO [now]: accept a flag indicating whether the statement comes + // from backing. pub fn insert( &mut self, groups: &Groups, From 7944c31f2aff6a187bfd09b5814a8548ee92d0d6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 15:26:21 -0600 Subject: [PATCH 099/220] utilities for getting all hypothetical candidates for frontier --- .../src/vstaging/candidates.rs | 128 +++++++++++++++++- 1 file changed, 122 insertions(+), 6 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 9b66cbf6904c..8de2dac89b83 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -26,6 +26,7 @@ //! and punish them accordingly. use polkadot_node_network_protocol::PeerId; +use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, PersistedValidationData, @@ -35,6 +36,7 @@ use std::collections::{ hash_map::{Entry, HashMap}, HashSet, }; +use std::sync::Arc; /// A tracker for all known candidates in the view. /// @@ -133,7 +135,7 @@ impl Candidates { let prev_state = self.candidates.insert( candidate_hash, CandidateState::Confirmed(ConfirmedCandidate { - receipt: candidate_receipt, + receipt: Arc::new(candidate_receipt), persisted_validation_data, assigned_group, parent_hash, @@ -196,6 +198,64 @@ impl Candidates { } } + /// Whether statements from a candidate are importable. + /// + /// This is only true when the candidate is known, confirmed, + /// and is importable in a fragment tree. + pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool { + self.get_confirmed(candidate_hash) + .map_or(false, |c| c.is_importable(None)) + } + + /// Get all hypothetical candidates which should be tested + /// for inclusion in the frontier. + /// + /// Provide optional parent parablock information to filter hypotheticals to only + /// potential children of that parent. + pub fn frontier_hypotheticals( + &self, + parent: Option<(Hash, ParaId)>, + ) -> Vec { + fn extend_hypotheticals<'a>( + v: &mut Vec, + i: impl IntoIterator, + maybe_required_parent: Option<(Hash, ParaId)>, + ) { + for (c_hash, candidate) in i { + match candidate { + CandidateState::Unconfirmed(u) => u.extend_hypotheticals( + *c_hash, + v, + maybe_required_parent, + ), + CandidateState::Confirmed(c) => v.push(c.to_hypothetical(*c_hash)), + } + } + } + + let mut v = Vec::new(); + if let Some(parent) = parent { + let maybe_children = self.by_parent.get(&parent); + let i = maybe_children + .into_iter() + .flat_map(|c| c) + .filter_map(|c_hash| self.candidates.get_key_value(c_hash)); + + extend_hypotheticals( + &mut v, + i, + Some(parent), + ); + } else { + extend_hypotheticals( + &mut v, + self.candidates.iter(), + None, + ); + } + v + } + /// Prune all candidates according to the relay-parent predicate /// provided. pub fn on_deactivate_leaves( @@ -293,7 +353,7 @@ struct UnconfirmedImportable { struct UnconfirmedCandidate { claims: Vec<(PeerId, CandidateClaims)>, // ref-counted - parent_claims: HashMap<(Hash, ParaId), usize>, + parent_claims: HashMap<(Hash, ParaId), Vec<(Hash, usize)>>, unconfirmed_importable_under: HashSet<(Hash, UnconfirmedImportable)>, } @@ -305,7 +365,11 @@ impl UnconfirmedCandidate { // but in doing so it limits the amount of other candidates it can advertise. on balance, // memory consumption is bounded in the same way. if let Some(parent_claims) = claims.parent_hash_and_id { - *self.parent_claims.entry(parent_claims).or_default() += 1; + let sub_claims = self.parent_claims.entry(parent_claims).or_default(); + match sub_claims.iter().position(|x| x.0 == claims.relay_parent) { + Some(p) => sub_claims[p].1 += 1, + None => sub_claims.push((claims.relay_parent, 1)), + } } self.claims.push((peer, claims)); } @@ -330,8 +394,15 @@ impl UnconfirmedCandidate { } else { if let Some(parent_claims) = c.1.parent_hash_and_id { if let Entry::Occupied(mut e) = self.parent_claims.entry(parent_claims) { - *e.get_mut() -= 1; - if *e.get() == 0 { + if let Some(p) = e.get().iter().position(|x| x.0 == c.1.relay_parent) { + let mut sub_claims = e.get_mut(); + sub_claims[p].1 -= 1; + if sub_claims[p].1 == 0 { + sub_claims.remove(p); + } + }; + + if e.get().is_empty() { remove_parent_index(parent_claims.0, parent_claims.1); e.remove(); } @@ -346,6 +417,43 @@ impl UnconfirmedCandidate { .retain(|(l, props)| leaves.contains(l) && relay_parent_live(&props.relay_parent)); } + fn extend_hypotheticals( + &self, + candidate_hash: CandidateHash, + v: &mut Vec, + required_parent: Option<(Hash, ParaId)> + ) { + fn extend_hypotheticals_inner<'a>( + candidate_hash: CandidateHash, + v: &mut Vec, + i: impl IntoIterator)>, + ) { + for ((parent_head_hash, para_id), possible_relay_parents) in i { + for (relay_parent, _rc) in possible_relay_parents { + v.push(HypotheticalCandidate::Incomplete { + candidate_hash, + candidate_para: *para_id, + parent_head_data_hash: *parent_head_hash, + candidate_relay_parent: *relay_parent, + }); + } + } + } + + match required_parent { + Some(parent) => extend_hypotheticals_inner( + candidate_hash, + v, + self.parent_claims.get_key_value(&parent), + ), + None => extend_hypotheticals_inner( + candidate_hash, + v, + self.parent_claims.iter(), + ), + } + } + fn has_claims(&self) -> bool { !self.claims.is_empty() } @@ -353,7 +461,7 @@ impl UnconfirmedCandidate { /// A confirmed candidate. pub struct ConfirmedCandidate { - receipt: CommittedCandidateReceipt, + receipt: Arc, persisted_validation_data: PersistedValidationData, assigned_group: GroupIndex, parent_hash: Hash, @@ -387,6 +495,14 @@ impl ConfirmedCandidate { fn parent_hash(&self) -> Hash { self.parent_hash } + + fn to_hypothetical(&self, candidate_hash: CandidateHash) -> HypotheticalCandidate { + HypotheticalCandidate::Complete { + candidate_hash, + receipt: self.receipt.clone(), + persisted_validation_data: self.persisted_validation_data.clone() + } + } } #[cfg(test)] From 7e99ff29ac0a5ceabac460878ae806ea5bd6c004 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 17:23:13 -0600 Subject: [PATCH 100/220] track origin in statement store --- .../src/vstaging/mod.rs | 14 ++++-- .../src/vstaging/statement_store.rs | 45 ++++++++++++++++--- 2 files changed, 50 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 5f71559fd733..1e9ca045711f 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -56,7 +56,7 @@ use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as Cluster use grid::{GridTracker, ManifestSummary, StatementFilter}; use groups::Groups; use requests::RequestManager; -use statement_store::StatementStore; +use statement_store::{StatementStore, StatementOrigin}; mod candidates; mod cluster; @@ -554,7 +554,11 @@ pub(crate) async fn share_local_statement( match per_relay_parent .statement_store - .insert(&per_session.groups, compact_statement.clone()) + .insert( + &per_session.groups, + compact_statement.clone(), + StatementOrigin::Local, + ) { Ok(false) | Err(_) => { gum::warn!( @@ -985,7 +989,11 @@ async fn handle_incoming_statement( } let was_fresh = - match per_relay_parent.statement_store.insert(&per_session.groups, checked_statement) { + match per_relay_parent.statement_store.insert( + &per_session.groups, + checked_statement, + StatementOrigin::Remote, + ) { Err(_) => { // sanity: should never happen. gum::warn!( diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index e4d1344df70d..a3fcd6faffc8 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -31,6 +31,28 @@ use std::collections::hash_map::{Entry as HEntry, HashMap}; use super::groups::Groups; +/// Possible origins of a statement. +pub enum StatementOrigin { + /// The statement originated locally. + Local, + /// The statement originated from a remote peer. + Remote, +} + +impl StatementOrigin { + fn is_local(&self) -> bool { + match *self { + StatementOrigin::Local => true, + StatementOrigin::Remote => false, + } + } +} + +struct StoredStatement { + statement: SignedStatement, + known_by_backing: bool, +} + /// Storage for statements. Intended to be used for statements signed under /// the same relay-parent. See module docs for more details. pub struct StatementStore { @@ -40,7 +62,7 @@ pub struct StatementStore { // producing statements about a candidate, until we have the candidate receipt // itself, we can't tell which group that is. group_statements: HashMap<(GroupIndex, CandidateHash), GroupStatements>, - known_statements: HashMap, + known_statements: HashMap, } impl StatementStore { @@ -76,12 +98,11 @@ impl StatementStore { /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. // TODO [now]: perhaps reject over-seconded statements. - // TODO [now]: accept a flag indicating whether the statement comes - // from backing. pub fn insert( &mut self, groups: &Groups, statement: SignedStatement, + origin: StatementOrigin, ) -> Result { let validator_index = statement.validator_index(); @@ -93,9 +114,18 @@ impl StatementStore { let compact = statement.payload().clone(); let fingerprint = (validator_index, compact.clone()); match self.known_statements.entry(fingerprint) { - HEntry::Occupied(_) => return Ok(false), + HEntry::Occupied(mut e) => { + if let StatementOrigin::Local = origin { + e.get_mut().known_by_backing = true; + } + + return Ok(false); + } HEntry::Vacant(mut e) => { - e.insert(statement); + e.insert(StoredStatement { + statement, + known_by_backing: origin.is_local(), + }); }, } @@ -166,6 +196,7 @@ impl StatementStore { .flat_map(|v| v.iter_ones()) .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) .filter_map(move |v| self.known_statements.get(&(*v, statement.clone()))) + .map(|s| &s.statement) } /// Get the full statement of this kind issued by this validator, if it is known. @@ -174,7 +205,9 @@ impl StatementStore { validator_index: ValidatorIndex, statement: CompactStatement, ) -> Option<&SignedStatement> { - self.known_statements.get(&(validator_index, statement)) + self.known_statements + .get(&(validator_index, statement)) + .map(|s| &s.statement) } } From 238f70bf92a0010f8402a49f50dffcd10c637dd5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 17:23:25 -0600 Subject: [PATCH 101/220] fmt --- .../src/vstaging/candidates.rs | 42 ++++------ .../src/vstaging/mod.rs | 78 ++++++++----------- .../src/vstaging/statement_store.rs | 13 +--- 3 files changed, 52 insertions(+), 81 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 8de2dac89b83..5855e7e6e8b7 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -32,11 +32,13 @@ use polkadot_primitives::vstaging::{ PersistedValidationData, }; -use std::collections::{ - hash_map::{Entry, HashMap}, - HashSet, +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + sync::Arc, }; -use std::sync::Arc; /// A tracker for all known candidates in the view. /// @@ -203,8 +205,7 @@ impl Candidates { /// This is only true when the candidate is known, confirmed, /// and is importable in a fragment tree. pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool { - self.get_confirmed(candidate_hash) - .map_or(false, |c| c.is_importable(None)) + self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } /// Get all hypothetical candidates which should be tested @@ -223,11 +224,8 @@ impl Candidates { ) { for (c_hash, candidate) in i { match candidate { - CandidateState::Unconfirmed(u) => u.extend_hypotheticals( - *c_hash, - v, - maybe_required_parent, - ), + CandidateState::Unconfirmed(u) => + u.extend_hypotheticals(*c_hash, v, maybe_required_parent), CandidateState::Confirmed(c) => v.push(c.to_hypothetical(*c_hash)), } } @@ -241,17 +239,9 @@ impl Candidates { .flat_map(|c| c) .filter_map(|c_hash| self.candidates.get_key_value(c_hash)); - extend_hypotheticals( - &mut v, - i, - Some(parent), - ); + extend_hypotheticals(&mut v, i, Some(parent)); } else { - extend_hypotheticals( - &mut v, - self.candidates.iter(), - None, - ); + extend_hypotheticals(&mut v, self.candidates.iter(), None); } v } @@ -421,7 +411,7 @@ impl UnconfirmedCandidate { &self, candidate_hash: CandidateHash, v: &mut Vec, - required_parent: Option<(Hash, ParaId)> + required_parent: Option<(Hash, ParaId)>, ) { fn extend_hypotheticals_inner<'a>( candidate_hash: CandidateHash, @@ -446,11 +436,7 @@ impl UnconfirmedCandidate { v, self.parent_claims.get_key_value(&parent), ), - None => extend_hypotheticals_inner( - candidate_hash, - v, - self.parent_claims.iter(), - ), + None => extend_hypotheticals_inner(candidate_hash, v, self.parent_claims.iter()), } } @@ -500,7 +486,7 @@ impl ConfirmedCandidate { HypotheticalCandidate::Complete { candidate_hash, receipt: self.receipt.clone(), - persisted_validation_data: self.persisted_validation_data.clone() + persisted_validation_data: self.persisted_validation_data.clone(), } } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1e9ca045711f..ac1304788936 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -56,7 +56,7 @@ use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as Cluster use grid::{GridTracker, ManifestSummary, StatementFilter}; use groups::Groups; use requests::RequestManager; -use statement_store::{StatementStore, StatementOrigin}; +use statement_store::{StatementOrigin, StatementStore}; mod candidates; mod cluster; @@ -418,10 +418,7 @@ pub(crate) async fn handle_active_leaves_update( // TODO [now]: determine which candidates are importable under the given // active leaf - new_leaf_fragment_tree_updates( - ctx, - leaf.hash, - ).await; + new_leaf_fragment_tree_updates(ctx, leaf.hash).await; // TODO [now]: update peers which have the leaf in their view. // update their implicit view. send any messages accordingly. @@ -552,14 +549,11 @@ pub(crate) async fn share_local_statement( } }; - match per_relay_parent - .statement_store - .insert( - &per_session.groups, - compact_statement.clone(), - StatementOrigin::Local, - ) - { + match per_relay_parent.statement_store.insert( + &per_session.groups, + compact_statement.clone(), + StatementOrigin::Local, + ) { Ok(false) | Err(_) => { gum::warn!( target: LOG_TARGET, @@ -988,40 +982,39 @@ async fn handle_incoming_statement( request_entry.get_mut().set_cluster_priority(); } - let was_fresh = - match per_relay_parent.statement_store.insert( - &per_session.groups, - checked_statement, - StatementOrigin::Remote, - ) { - Err(_) => { - // sanity: should never happen. - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - validator_index = ?originator_index, - "Error - Cluster accepted message from unknown validator." - ); + let was_fresh = match per_relay_parent.statement_store.insert( + &per_session.groups, + checked_statement, + StatementOrigin::Remote, + ) { + Err(_) => { + // sanity: should never happen. + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?originator_index, + "Error - Cluster accepted message from unknown validator." + ); - return - }, - Ok(known) => known, - }; + return + }, + Ok(known) => known, + }; let is_backed = false; // TODO [now] if was_fresh { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; - // both of the below probably in some shared function. - // TODO [now]: circulate the statement - // TODO [now]: import the statement into backing if we can. - // If the candidate is confirmed and statements are importable, - // we send the statement to backing either if - // a) it is a candidate from the cluster - // b) it is a candidate from the grid and it is backed - // - // We always circulate statements at this point. + // both of the below probably in some shared function. + // TODO [now]: circulate the statement + // TODO [now]: import the statement into backing if we can. + // If the candidate is confirmed and statements are importable, + // we send the statement to backing either if + // a) it is a candidate from the cluster + // b) it is a candidate from the grid and it is backed + // + // We always circulate statements at this point. } else { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; } @@ -1119,10 +1112,7 @@ fn handle_grid_statement( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_leaf_fragment_tree_updates( - ctx: &mut Context, - leaf_hash: Hash, -) { +async fn new_leaf_fragment_tree_updates(ctx: &mut Context, leaf_hash: Hash) { // TODO [now] // 1. get hypothetical candidates // 2. find out which are in the frontier diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index a3fcd6faffc8..969d6e0b7c9c 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -119,13 +119,10 @@ impl StatementStore { e.get_mut().known_by_backing = true; } - return Ok(false); - } + return Ok(false) + }, HEntry::Vacant(mut e) => { - e.insert(StoredStatement { - statement, - known_by_backing: origin.is_local(), - }); + e.insert(StoredStatement { statement, known_by_backing: origin.is_local() }); }, } @@ -205,9 +202,7 @@ impl StatementStore { validator_index: ValidatorIndex, statement: CompactStatement, ) -> Option<&SignedStatement> { - self.known_statements - .get(&(validator_index, statement)) - .map(|s| &s.statement) + self.known_statements.get(&(validator_index, statement)).map(|s| &s.statement) } } From dbb2a3ca76671ffce8a7ee7bee42d5ebb08fea28 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 17:29:00 -0600 Subject: [PATCH 102/220] requests should return peer --- .../statement-distribution/src/vstaging/requests.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 2214c9ba4b4a..096b78380200 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -345,6 +345,7 @@ impl<'a> UnhandledResponse<'a> { let entry = match manager.requests.get_mut(&identifier) { None => return ResponseValidationOutput { + requested_peer, reputation_changes: Vec::new(), request_status: CandidateRequestStatus::Outdated, }, @@ -380,12 +381,14 @@ impl<'a> UnhandledResponse<'a> { ); return ResponseValidationOutput { + requested_peer: requested_peer.clone(), reputation_changes: vec![(requested_peer, COST_IMPROPERLY_DECODED_RESPONSE)], request_status: CandidateRequestStatus::Incomplete, } }, Err(RequestError::NetworkError(_) | RequestError::Canceled(_)) => return ResponseValidationOutput { + requested_peer, reputation_changes: vec![], request_status: CandidateRequestStatus::Incomplete, }, @@ -438,6 +441,7 @@ fn validate_complete_response( let invalid_candidate_output = || ResponseValidationOutput { request_status: CandidateRequestStatus::Incomplete, reputation_changes: vec![(requested_peer.clone(), COST_INVALID_RESPONSE)], + requested_peer, }; // sanity-check candidate response. @@ -545,6 +549,7 @@ fn validate_complete_response( rep_changes.push((requested_peer.clone(), BENEFIT_VALID_RESPONSE)); ResponseValidationOutput { + requested_peer, request_status: CandidateRequestStatus::Complete { candidate: response.candidate_receipt, persisted_validation_data: response.persisted_validation_data, @@ -578,6 +583,8 @@ pub enum CandidateRequestStatus { /// Output of the response validation. pub struct ResponseValidationOutput { + /// The peer we requested from. + pub requested_peer: PeerId, /// The status of the request. pub request_status: CandidateRequestStatus, /// Any reputation changes as a result of validating the response. From 43a2add7ccd928c8cfb9ebabcc708269b2481c9e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Nov 2022 17:32:05 -0600 Subject: [PATCH 103/220] apply post-confirmation reckoning --- .../statement-distribution/src/vstaging/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index ac1304788936..70f2f6a180bb 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -545,7 +545,7 @@ pub(crate) async fn share_local_statement( pvd.clone(), local_group, ) { - // TODO [now] apply the reckoning. + apply_post_confirmation_reckoning(ctx, reckoning).await; } }; @@ -1165,3 +1165,13 @@ async fn handle_cluster_newly_backed( // 1. for confirmed & importable candidates only // 2. send advertisements along the grid } + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn apply_post_confirmation_reckoning( + ctx: &mut Context, + reckoning: candidates::PostConfirmationReckoning, +) { + for peer in reckoning.incorrect { + report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; + } +} From 164a09b81d89fab9a0faa603b4ee6b4929c15161 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Nov 2022 11:36:33 -0600 Subject: [PATCH 104/220] flesh out import/announce/circulate logic on new statements --- .../src/vstaging/groups.rs | 11 +- .../src/vstaging/mod.rs | 148 ++++++++++++++++-- .../src/vstaging/statement_store.rs | 21 +++ 3 files changed, 155 insertions(+), 25 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index 5f3618cd7946..0c7b79dc1e59 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -65,7 +65,7 @@ impl Groups { &self, group_index: GroupIndex, ) -> Option<(usize, usize)> { - self.get(group_index).map(|g| (g.len(), minimum_votes(g.len()))) + self.get(group_index).map(|g| (g.len(), super::minimum_votes(g.len()))) } /// Get the group index for a validator by index. @@ -78,12 +78,3 @@ impl Groups { self.by_discovery_key.get(&discovery_key).map(|x| *x) } } - -/// How many votes we need to consider a candidate backed. -/// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module and -/// the backing subsystem. -// TODO [now]: extract to shared primitives. -fn minimum_votes(n_validators: usize) -> usize { - std::cmp::min(2, n_validators) -} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 70f2f6a180bb..9f246841485f 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -195,6 +195,8 @@ fn connected_validator_peer( struct PeerState { view: View, + // TODO [now]: actually keep track of remote implicit views + // in a smooth manner implicit_view: HashSet, discovery_ids: Option>, } @@ -217,6 +219,7 @@ impl PeerState { /// How many votes we need to consider a candidate backed. /// /// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +// TODO [now]: extract to shared primitives fn minimum_votes(n_validators: usize) -> usize { std::cmp::min(2, n_validators) } @@ -817,6 +820,20 @@ async fn report_peer( } /// Handle an incoming statement. +/// +/// This checks whether the sender is allowed to send the statement, +/// either via the cluster or the grid. +/// +/// This also checks the signature of the statement. +/// If the statement is fresh, this function guarantees that after completion +/// - The statement is re-circulated to all relevant peers in both the cluster +/// and the grid +/// - If the candidate is out-of-cluster and is backable and importable, +/// all statements about the candidate have been sent to backing +/// - If the candidate is in-cluster and is importable, +/// the statement has been sent to backing +/// - If the candidate just became backable, appropriate announcements +/// and acknowledgement along the topology have been made. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn handle_incoming_statement( ctx: &mut Context, @@ -888,8 +905,6 @@ async fn handle_incoming_statement( .next() }; - let was_backed = false; // TODO [now] - let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { match handle_cluster_statement( relay_parent, @@ -967,7 +982,8 @@ async fn handle_incoming_statement( } } - if !state.candidates.is_confirmed(&candidate_hash) { + let is_confirmed = state.candidates.is_confirmed(&candidate_hash); + if !is_confirmed { // If the candidate is not confirmed, note that we should attempt // to request it from the given peer. let mut request_entry = @@ -977,23 +993,26 @@ async fn handle_incoming_statement( request_entry.get_mut().add_peer(peer); - // We only successfully accept statements from the grid on confirmed + // We only successfully accept statements from the grid on unconfirmed // candidates, therefore this check only passes if the statement is from the cluster request_entry.get_mut().set_cluster_priority(); } + let was_backable = + per_relay_parent.statement_store.is_backable(originator_group, candidate_hash); + let was_fresh = match per_relay_parent.statement_store.insert( &per_session.groups, - checked_statement, + checked_statement.clone(), StatementOrigin::Remote, ) { - Err(_) => { + Err(statement_store::ValidatorUnknown) => { // sanity: should never happen. gum::warn!( target: LOG_TARGET, ?relay_parent, validator_index = ?originator_index, - "Error - Cluster accepted message from unknown validator." + "Error - accepted message from unknown validator." ); return @@ -1001,11 +1020,33 @@ async fn handle_incoming_statement( Ok(known) => known, }; - let is_backed = false; // TODO [now] - if was_fresh { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; + let is_backable = + per_relay_parent.statement_store.is_backable(originator_group, candidate_hash); + let is_importable = state.candidates.is_importable(&candidate_hash); + if !was_backable && is_backable && is_importable { + handle_backable_and_importable_candidate( + ctx, + candidate_hash, + originator_group, + &relay_parent, + &mut *per_relay_parent, + &*per_session, + &state.authorities, + &state.peers, + ) + .await; + } else if (is_backable || !is_confirmed) && is_importable { + // TODO [now]: this is either a grid statement on a candidate + // that was already backed _or_ a cluster statement. + // import statement into backing. + } + + // We always circulate statements at this point. + circulate_statement(ctx, state, relay_parent, originator_group, checked_statement).await; + // both of the below probably in some shared function. // TODO [now]: circulate the statement // TODO [now]: import the statement into backing if we can. @@ -1014,14 +1055,9 @@ async fn handle_incoming_statement( // a) it is a candidate from the cluster // b) it is a candidate from the grid and it is backed // - // We always circulate statements at this point. } else { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; } - - if is_backed && !was_backed { - // TODO [now]: handle a candidate being completely backed now. - } } /// Checks whether a statement is allowed, whether the signature is accurate, @@ -1111,6 +1147,88 @@ fn handle_grid_statement( Ok(checked_statement) } +// For a candidate that is both backable and importable, this +// 1) imports all known statements into the backing subsystem +// 2) dispatches backable candidate announcements or acknowledgements +// via the grid topology. If the session topology is not yet +// available, this will be a no-op +// +// It is expected +async fn handle_backable_and_importable_candidate( + ctx: &mut Context, + candidate_hash: CandidateHash, + group_index: GroupIndex, + relay_parent: &Hash, + relay_parent_state: &mut PerRelayParentState, + per_session: &PerSessionState, + authorities: &HashMap, + peers: &HashMap, +) { + let local_validator = match relay_parent_state.local_validator { + Some(ref mut v) => v, + None => return, + }; + + // TODO [now]: dispatch all unknown statements to backing + + let grid_view = match per_session.grid_view { + Some(ref t) => t, + None => { + gum::trace!( + target: LOG_TARGET, + session = relay_parent_state.session, + "Cannot handle backable candidate due to lack of topology", + ); + + return + }, + }; + + let group_size = match per_session.groups.get(group_index) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?relay_parent, + ?group_index, + session = relay_parent_state.session, + "Handled backed candidate with unknown group?", + ); + + return + }, + Some(g) => g.len(), + }; + + let actions = local_validator.grid_tracker.add_backed_candidate( + grid_view, + candidate_hash, + group_index, + group_size, + ); + + for (v, action) in actions { + let p = match connected_validator_peer(authorities, per_session, v) { + None => continue, + Some(p) => + if peers.get(&p).map_or(false, |d| d.knows_relay_parent(relay_parent)) { + p + } else { + continue + }, + }; + + match action { + grid::PostBackingAction::Advertise => { + // TODO [now]: send inventory message and `note_advertised_to`. + }, + grid::PostBackingAction::Acknowledge => { + // TODO [now]: send acknowledgement message and `note_local_acknowledged`. + }, + } + } +} + #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn new_leaf_fragment_tree_updates(ctx: &mut Context, leaf_hash: Hash) { // TODO [now] @@ -1125,7 +1243,7 @@ async fn new_leaf_fragment_tree_updates(ctx: &mut Context, leaf_hash: H } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_backed_fragment_tree_updates( +async fn prospective_backed_notification_fragment_tree_updates( ctx: &mut Context, para_id: ParaId, head_data_hash: Hash, diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 969d6e0b7c9c..e6a93a913f02 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -204,6 +204,13 @@ impl StatementStore { ) -> Option<&SignedStatement> { self.known_statements.get(&(validator_index, statement)).map(|s| &s.statement) } + + /// Whether a candidate has enough statements to be backed. + pub fn is_backable(&self, group_index: GroupIndex, candidate_hash: CandidateHash) -> bool { + self.group_statements + .get(&(group_index, candidate_hash)) + .map_or(false, |s| s.is_backable()) + } } /// Error indicating that the validator was unknown. @@ -230,6 +237,20 @@ impl GroupStatements { } } + fn is_backable(&self) -> bool { + let votes = self + .seconded + .iter() + .by_vals() + .zip(self.valid.iter().by_vals()) + .filter(|&(s, v)| s || v) // no double-counting + .count(); + + let threshold = super::minimum_votes(self.valid.len()); + + votes >= threshold + } + fn note_seconded(&mut self, within_group_index: usize) { self.seconded.set(within_group_index, true); } From 9d9fbe36f2649cc465bb613a64833578b5159e64 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 1 Dec 2022 07:14:14 -0600 Subject: [PATCH 105/220] adjust --- .../statement-distribution/src/vstaging/mod.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9f246841485f..5b2a02c06b36 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1046,15 +1046,6 @@ async fn handle_incoming_statement( // We always circulate statements at this point. circulate_statement(ctx, state, relay_parent, originator_group, checked_statement).await; - - // both of the below probably in some shared function. - // TODO [now]: circulate the statement - // TODO [now]: import the statement into backing if we can. - // If the candidate is confirmed and statements are importable, - // we send the statement to backing either if - // a) it is a candidate from the cluster - // b) it is a candidate from the grid and it is backed - // } else { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; } @@ -1152,8 +1143,6 @@ fn handle_grid_statement( // 2) dispatches backable candidate announcements or acknowledgements // via the grid topology. If the session topology is not yet // available, this will be a no-op -// -// It is expected async fn handle_backable_and_importable_candidate( ctx: &mut Context, candidate_hash: CandidateHash, From af0a76bcc42d086ad3d51cb291b961bfdf68c08a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 10:25:05 -0600 Subject: [PATCH 106/220] adjust TODO comment --- node/network/statement-distribution/src/vstaging/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 5b2a02c06b36..90bca8650a6e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -282,8 +282,13 @@ pub(crate) async fn handle_network_update( per_session.supply_topology(&new_topology); } - // TODO [now] for all relay-parents with this session, send all grid peers + // TODO [https://github.com/paritytech/polkadot/issues/6194] + // technically, we should account for the fact that the session topology might + // come late, and for all relay-parents with this session, send all grid peers // any `BackedCandidateInv` messages they might need. + // + // in practice, this is a small issue & the API of receiving topologies could + // be altered to fix it altogether. }, NetworkBridgeEvent::PeerMessage(peer_id, message) => { match message { From 447fa5ca6aa51a6bc5e5eaee175117adbce9ce09 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 11:02:21 -0600 Subject: [PATCH 107/220] fix backing tests --- .../src/tests/prospective_parachains.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 827ac3751fbe..031d843d93f7 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -1586,18 +1586,27 @@ fn occupied_core_assignment() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( From 1b4b6d4c6ca45e5d992270916f385b9adcc96fcd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 11:31:27 -0600 Subject: [PATCH 108/220] update statement-distribution to use new indexedvec --- .../statement-distribution/src/vstaging/grid.rs | 4 ++-- .../src/vstaging/groups.rs | 10 +++++----- .../statement-distribution/src/vstaging/mod.rs | 17 +++++++++-------- node/subsystem-util/src/lib.rs | 6 +++--- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 1ae402963686..f25335d03948 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -58,8 +58,8 @@ pub struct SessionTopologyView { /// and send to the corresponding X/Y slice. /// For any validators we don't share a slice with, we receive from the nodes /// which share a slice with them. -pub fn build_session_topology( - groups: &[Vec], +pub fn build_session_topology<'a>( + groups: impl IntoIterator>, topology: &SessionGridTopology, our_index: Option, ) -> SessionTopologyView { diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index 0c7b79dc1e59..01db093a875e 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -16,7 +16,7 @@ //! A utility for tracking groups and their members within a session. -use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, ValidatorIndex}; +use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, IndexedVec, ValidatorIndex}; use std::collections::HashMap; @@ -24,7 +24,7 @@ use std::collections::HashMap; /// looking up groups by validator indices or authority discovery ID. #[derive(Debug, Clone)] pub struct Groups { - groups: Vec>, + groups: IndexedVec>, by_validator_index: HashMap, by_discovery_key: HashMap, } @@ -32,7 +32,7 @@ pub struct Groups { impl Groups { /// Create a new [`Groups`] tracker with the groups and discovery keys /// from the session. - pub fn new(groups: Vec>, discovery_keys: &[AuthorityDiscoveryId]) -> Self { + pub fn new(groups: IndexedVec>, discovery_keys: &[AuthorityDiscoveryId]) -> Self { let mut by_validator_index = HashMap::new(); let mut by_discovery_key = HashMap::new(); @@ -51,13 +51,13 @@ impl Groups { } /// Access all the underlying groups. - pub fn all(&self) -> &[Vec] { + pub fn all(&self) -> &IndexedVec> { &self.groups } /// Get the underlying group validators by group index. pub fn get(&self, group_index: GroupIndex) -> Option<&[ValidatorIndex]> { - self.groups.get(group_index.0 as usize).map(|x| &x[..]) + self.groups.get(group_index).map(|x| &x[..]) } /// Get the backing group size and backing threshold. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 90bca8650a6e..75c39d0b56a5 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -36,6 +36,7 @@ use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, + IndexedVec, }; use sp_keystore::SyncCryptoStorePtr; @@ -134,7 +135,7 @@ impl PerSessionState { } let local_validator = - polkadot_node_subsystem_util::signing_key_and_index(&session_info.validators, keystore) + polkadot_node_subsystem_util::signing_key_and_index(session_info.validators.iter(), keystore) .await; PerSessionState { @@ -148,7 +149,7 @@ impl PerSessionState { fn supply_topology(&mut self, topology: &SessionGridTopology) { let grid_view = grid::build_session_topology( - &self.session_info.validator_groups[..], + self.session_info.validator_groups.iter(), topology, self.local_validator, ); @@ -436,7 +437,7 @@ pub(crate) async fn handle_active_leaves_update( fn find_local_validator_state( validator_index: ValidatorIndex, - validators: &[ValidatorId], + validators: &IndexedVec, groups: &Groups, availability_cores: &[CoreState], ) -> Option { @@ -444,7 +445,7 @@ fn find_local_validator_state( return None } - let validator_id = validators.get(validator_index.0 as usize)?.clone(); + let validator_id = validators.get(validator_index)?.clone(); let our_group = groups.by_validator_index(validator_index)?; @@ -804,14 +805,14 @@ fn cluster_sendable_seconded_statement<'a>( /// Check a statement signature under this parent hash. fn check_statement_signature( session_index: SessionIndex, - validators: &[ValidatorId], + validators: &IndexedVec, relay_parent: Hash, statement: UncheckedSignedStatement, ) -> std::result::Result { let signing_context = SigningContext { session_index, parent_hash: relay_parent }; validators - .get(statement.unchecked_validator_index().0 as usize) + .get(statement.unchecked_validator_index()) .ok_or_else(|| statement.clone()) .and_then(|v| statement.try_into_checked(&signing_context, v)) } @@ -1092,7 +1093,7 @@ fn handle_cluster_statement( // Ensure the statement is correctly signed. let checked_statement = match check_statement_signature( session, - &session_info.validators[..], + &session_info.validators, relay_parent, statement, ) { @@ -1125,7 +1126,7 @@ fn handle_grid_statement( // Ensure the statement is correctly signed. let checked_statement = match check_statement_signature( session, - &per_session.session_info.validators[..], + &per_session.session_info.validators, relay_parent, statement, ) { diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index ede44c283d29..76c701e3202f 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -222,7 +222,7 @@ specialize_requests! { /// From the given set of validators, find the first key we can sign with, if any. pub async fn signing_key( - validators: &[ValidatorId], + validators: impl IntoIterator, keystore: &SyncCryptoStorePtr, ) -> Option { signing_key_and_index(validators, keystore).await.map(|(k, _)| k) @@ -231,10 +231,10 @@ pub async fn signing_key( /// From the given set of validators, find the first key we can sign with, if any, and return it /// along with the validator index. pub async fn signing_key_and_index( - validators: &[ValidatorId], + validators: impl IntoIterator, keystore: &SyncCryptoStorePtr, ) -> Option<(ValidatorId, ValidatorIndex)> { - for (i, v) in validators.iter().enumerate() { + for (i, v) in validators.into_iter().enumerate() { if CryptoStore::has_keys(&**keystore, &[(v.to_raw_vec(), ValidatorId::ID)]).await { return Some((v.clone(), ValidatorIndex(i as _))) } From 45ef1dedab9adcbbae3c793c1ecf44d8022a5646 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 11:31:36 -0600 Subject: [PATCH 109/220] fmt --- .../src/tests/prospective_parachains.rs | 7 ++--- .../src/vstaging/groups.rs | 5 +++- .../src/vstaging/mod.rs | 26 +++++++++---------- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 031d843d93f7..84a840c62bbe 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -1602,9 +1602,10 @@ fn occupied_core_assignment() { assert_matches!( virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) ); assert_matches!( diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index 01db093a875e..94f6c1e219b0 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -32,7 +32,10 @@ pub struct Groups { impl Groups { /// Create a new [`Groups`] tracker with the groups and discovery keys /// from the session. - pub fn new(groups: IndexedVec>, discovery_keys: &[AuthorityDiscoveryId]) -> Self { + pub fn new( + groups: IndexedVec>, + discovery_keys: &[AuthorityDiscoveryId], + ) -> Self { let mut by_validator_index = HashMap::new(); let mut by_discovery_key = HashMap::new(); diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 75c39d0b56a5..d5cce530237e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -34,9 +34,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, - GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SessionInfo, + GroupIndex, Hash, Id as ParaId, IndexedVec, PersistedValidationData, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, - IndexedVec, }; use sp_keystore::SyncCryptoStorePtr; @@ -134,9 +133,11 @@ impl PerSessionState { authority_lookup.insert(ad, ValidatorIndex(i as _)); } - let local_validator = - polkadot_node_subsystem_util::signing_key_and_index(session_info.validators.iter(), keystore) - .await; + let local_validator = polkadot_node_subsystem_util::signing_key_and_index( + session_info.validators.iter(), + keystore, + ) + .await; PerSessionState { session_info, @@ -1091,15 +1092,12 @@ fn handle_cluster_statement( }; // Ensure the statement is correctly signed. - let checked_statement = match check_statement_signature( - session, - &session_info.validators, - relay_parent, - statement, - ) { - Ok(s) => s, - Err(_) => return Err(COST_INVALID_SIGNATURE), - }; + let checked_statement = + match check_statement_signature(session, &session_info.validators, relay_parent, statement) + { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; cluster_tracker.note_received( cluster_sender_index, From cd3ecf8fe66a43d33a8b2e7b6c7f9b26dd0eb52b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 13:57:01 -0600 Subject: [PATCH 110/220] query hypothetical candidates --- .../statement-distribution/src/error.rs | 3 + .../src/vstaging/candidates.rs | 11 ++ .../src/vstaging/grid.rs | 19 +++ .../src/vstaging/mod.rs | 120 +++++++++++++++--- node/subsystem-types/src/messages.rs | 7 + 5 files changed, 142 insertions(+), 18 deletions(-) diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 406eccfc7874..c49b208adbfb 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -75,6 +75,9 @@ pub enum Error { #[error("Fetching availability cores failed {0:?}")] FetchAvailabilityCores(RuntimeApiError), + #[error("Fetching validator groups failed {0:?}")] + FetchValidatorGroups(RuntimeApiError), + #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 5855e7e6e8b7..6af2b8abb5f1 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -208,6 +208,17 @@ impl Candidates { self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } + /// Note that a candidate is importable in a fragment tree indicated by the given + /// leaf hash. + pub fn note_importable_under( + &mut self, + candidate: &HypotheticalCandidate, + leaf_hash: Hash, + ) { + // TODO [now] + unimplemented!() + } + /// Get all hypothetical candidates which should be tested /// for inclusion in the frontier. /// diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index f25335d03948..84c310f15bf6 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -369,6 +369,25 @@ impl GridTracker { } } + /// Which validators we should request the fully attested candidates from. + /// If we already have successfully requested, then this will return an empty + /// set. + pub fn validators_to_request( + &self, + candidate_hash: CandidateHash, + group_index: GroupIndex, + ) -> Vec { + let mut validators = Vec::new(); + if let Some(unconfirmed) = self.unconfirmed.get(&candidate_hash) { + for (v, g) in unconfirmed { + if g == &group_index { + validators.push(*v); + } + } + } + validators + } + /// Whether we can acknowledge a remote's advertisement. pub fn can_local_acknowledge( &self, diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index d5cce530237e..52dfab60302e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -28,18 +28,19 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ jaeger, - messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, + messages::{CandidateBackingMessage, HypotheticalCandidate, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, HypotheticalFrontierRequest}, overseer, ActivatedLeaf, ActiveLeavesUpdate, PerLeafSpan, StatementDistributionSenderTrait, }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, GroupIndex, Hash, Id as ParaId, IndexedVec, PersistedValidationData, SessionIndex, SessionInfo, - SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, + SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, GroupRotationInfo, CoreIndex, }; use sp_keystore::SyncCryptoStorePtr; +use futures::channel::oneshot; use indexmap::IndexMap; use std::collections::{ @@ -92,6 +93,8 @@ struct PerRelayParentState { validator_state: HashMap, local_validator: Option, statement_store: StatementStore, + availability_cores: Vec, + group_rotation_info: GroupRotationInfo, session: SessionIndex, } @@ -372,6 +375,17 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchAvailabilityCores)?; + let group_rotation_info = polkadot_node_subsystem_util::request_validator_groups( + *new_relay_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchValidatorGroups)? + .1; + + if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( *new_relay_parent, @@ -412,6 +426,7 @@ pub(crate) async fn handle_active_leaves_update( &per_session.session_info.validators, &per_session.groups, &availability_cores, + &group_rotation_info, ) }); @@ -421,18 +436,20 @@ pub(crate) async fn handle_active_leaves_update( validator_state: HashMap::new(), local_validator, statement_store: StatementStore::new(&per_session.groups), + availability_cores, + group_rotation_info, session: session_index, }, ); } - // TODO [now]: determine which candidates are importable under the given - // active leaf - new_leaf_fragment_tree_updates(ctx, leaf.hash).await; - // TODO [now]: update peers which have the leaf in their view. // update their implicit view. send any messages accordingly. + // TODO [now]: determine which candidates are importable under the given + // active leaf + new_leaf_fragment_tree_updates(ctx, state, leaf.hash).await; + Ok(()) } @@ -441,6 +458,7 @@ fn find_local_validator_state( validators: &IndexedVec, groups: &Groups, availability_cores: &[CoreState], + group_rotation_info: &GroupRotationInfo, ) -> Option { if groups.all().is_empty() { return None @@ -453,14 +471,14 @@ fn find_local_validator_state( // note: this won't work well for parathreads because it only works // when core assignments to paras are static throughout the session. - let para_for_group = - |g: GroupIndex| availability_cores.get(g.0 as usize).and_then(|c| c.para_id()); - + let core = group_rotation_info.core_for_group(our_group, availability_cores.len()); + let para = availability_cores.get(core.0 as usize).and_then(|c| c.para_id()); let group_validators = groups.get(our_group)?.to_owned(); + Some(LocalValidatorState { index: validator_index, group: our_group, - assignment: para_for_group(our_group), + assignment: para, cluster_tracker: ClusterTracker::new( group_validators, todo!(), // TODO [now]: seconding limit? @@ -1223,16 +1241,82 @@ async fn handle_backable_and_importable_candidate( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_leaf_fragment_tree_updates(ctx: &mut Context, leaf_hash: Hash) { +async fn new_leaf_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + leaf_hash: Hash, +) { // TODO [now] // 1. get hypothetical candidates + let hypotheticals = state.candidates.frontier_hypotheticals(None); // 2. find out which are in the frontier - // 3. note that they are - // 4. for unconfirmed candidates, send requests to all given peers. - // note that all unconfirmed hypothetical candidates are from the grid - // 5. for confirmed candidates, if the candidate has enough statements to - // back, send all statements which are new to backing. Also, send - // backed candidate manifests to peers with the relay parent in their view + let frontier = { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( + HypotheticalFrontierRequest { + candidates: hypotheticals, + fragment_tree_relay_parent: Some(leaf_hash), + }, + tx, + )).await; + + match rx.await { + Ok(frontier) => frontier, + Err(oneshot::Canceled) => return, + } + }; + // 3. note that they are importable under a given leaf hash. + for (hypo, membership) in frontier { + // skip parablocks outside of the frontier + if membership.is_empty() { continue } + + for (leaf_hash, depths) in membership { + state.candidates.note_importable_under(&hypo, leaf_hash); + } + + match hypo { + HypotheticalCandidate::Complete { candidate_hash, receipt, persisted_validation_data } => { + // 4a. for confirmed candidates, if the candidate has enough statements to + // back, send all statements which are new to backing. + if let Some(prs) = state.per_relay_parent.get(&receipt.descriptor().relay_parent) { + let core_index = prs.availability_cores + .iter() + .position(|c| c.para_id() == Some(receipt.descriptor().para_id)); + + let group_index = core_index.map(|c| prs.group_rotation_info.group_for_core( + CoreIndex(c as _), + prs.availability_cores.len(), + )); + if let Some(true) = group_index.map(|g| prs.statement_store.is_backable(g, candidate_hash)) { + // TODO [now]: send statements to backing + } + } + } + HypotheticalCandidate::Incomplete { + candidate_hash, candidate_para, parent_head_data_hash, candidate_relay_parent, + } => { + // 4b. for unconfirmed candidates, send requests to all advertising peers. + // note that all unconfirmed hypothetical candidates are from the grid + if let Some(prs) = state.per_relay_parent.get(&candidate_relay_parent) { + let core_index = prs.availability_cores + .iter() + .position(|c| c.para_id() == Some(candidate_para)); + + let group_index = core_index.map(|c| prs.group_rotation_info.group_for_core( + CoreIndex(c as _), + prs.availability_cores.len(), + )); + + let request_from = prs.local_validator.as_ref() + .zip(group_index) + .map(|(vs, group_index)| vs.grid_tracker.validators_to_request(candidate_hash, group_index)) + .unwrap_or_default(); + + // TODO [now]: issue requests + } + } + } + } } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1260,7 +1344,7 @@ async fn handle_incoming_manifest( // 2. sanity checks: peer is validator, bitvec size, import into grid tracker // 3. if accepted by grid, insert as unconfirmed. // 4. if already confirmed, acknowledge candidate - // 5. if already unconfirmed, add request entry + // 5. if already unconfirmed, add request entry (if importable) // 6. if fresh unconfirmed, determine whether it's in the hypothetical // frontier, update candidates wrapper, add request entry if so. } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index cad19c7b39dc..1b3465a8217d 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -1012,6 +1012,13 @@ pub enum ProspectiveParachainsMessage { /// Get the hypothetical frontier membership of candidates with the given properties /// under the specified active leaves' fragment trees. /// + /// Specifically, when provided with a set of hypothetical candidates, which are either + /// complete or incomplete, this will return fragment tree memberships for each candidate. + /// + /// These memberships are computed in relation only to what is already within the fragment + /// trees and not with respect to all candidates in the set. Therefore, the membership sets + /// may be incomplete. + /// /// For any candidate which is already known, this returns the depths the candidate /// occupies. GetHypotheticalFrontier( From bcf5f35e38e1dec2e183bdc8dd3433ddd4bea0ae Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 14:15:43 -0600 Subject: [PATCH 111/220] implement `note_importable_under` --- .../src/vstaging/candidates.rs | 45 +++++++++++++++++-- node/subsystem-types/src/messages.rs | 8 ++++ 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 6af2b8abb5f1..1244551ebb4e 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -122,7 +122,8 @@ impl Candidates { /// yielding lists of peers which advertised it /// both correctly and incorrectly. /// - /// This does no sanity-checking of input data. + /// This does no sanity-checking of input data, and will overwrite + /// already-confirmed canidates. pub fn confirm_candidate( &mut self, candidate_hash: CandidateHash, @@ -144,6 +145,10 @@ impl Candidates { importable_under: HashSet::new(), }), ); + let new_confirmed = match self.candidates.get_mut(&candidate_hash).expect("just inserted; qed") { + CandidateState::Confirmed(x) => x, + _ => panic!("just inserted as confirmed; qed"), + }; self.by_parent.entry((parent_hash, para_id)).or_default().insert(candidate_hash); @@ -156,6 +161,12 @@ impl Candidates { incorrect: HashSet::new(), }; + for (leaf_hash, x) in u.unconfirmed_importable_under { + if x.relay_parent == relay_parent && x.parent_hash == parent_hash && x.para_id == para_id { + new_confirmed.importable_under.insert(leaf_hash); + } + } + for (peer, claims) in u.claims { // Update the by-parent-hash index not to store any outdated // claims. @@ -215,8 +226,36 @@ impl Candidates { candidate: &HypotheticalCandidate, leaf_hash: Hash, ) { - // TODO [now] - unimplemented!() + match candidate { + HypotheticalCandidate::Incomplete { + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, + } => { + let u = UnconfirmedImportable { + relay_parent: *candidate_relay_parent, + parent_hash: *parent_head_data_hash, + para_id: *candidate_para, + }; + + if let Some(&mut CandidateState::Unconfirmed(ref mut c)) + = self.candidates.get_mut(&candidate_hash) + { + c.note_maybe_importable_under(leaf_hash, u); + } + } + HypotheticalCandidate::Complete { + candidate_hash, + .. + } => { + if let Some(&mut CandidateState::Confirmed(ref mut c)) + = self.candidates.get_mut(&candidate_hash) + { + c.importable_under.insert(leaf_hash); + } + } + } } /// Get all hypothetical candidates which should be tested diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 1b3465a8217d..9a933eb008b1 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -924,6 +924,14 @@ pub enum HypotheticalCandidate { } impl HypotheticalCandidate { + /// Get the hash of the candidate. + pub fn candidate_hash(&self) -> CandidateHash { + match *self { + HypotheticalCandidate::Complete { ref candidate_hash, .. } => *candidate_hash, + HypotheticalCandidate::Incomplete { ref candidate_hash, .. } => *candidate_hash, + } + } + /// Get the `ParaId` of the hypothetical candidate. pub fn candidate_para(&self) -> ParaId { match *self { From 8f739882413ae0e440fcbcd1bc7993c99b286d14 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 14:20:49 -0600 Subject: [PATCH 112/220] extract common utility of fragment tree updates --- .../src/vstaging/mod.rs | 38 +++++++++++++------ 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 52dfab60302e..9e95c98a880e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1241,21 +1241,21 @@ async fn handle_backable_and_importable_candidate( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_leaf_fragment_tree_updates( +async fn fragment_tree_update_inner( ctx: &mut Context, state: &mut State, - leaf_hash: Hash, + active_leaf_hash: Option, + required_parent_info: Option<(Hash, ParaId)>, ) { - // TODO [now] // 1. get hypothetical candidates - let hypotheticals = state.candidates.frontier_hypotheticals(None); + let hypotheticals = state.candidates.frontier_hypotheticals(required_parent_info); // 2. find out which are in the frontier let frontier = { let (tx, rx) = oneshot::channel(); ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( HypotheticalFrontierRequest { candidates: hypotheticals, - fragment_tree_relay_parent: Some(leaf_hash), + fragment_tree_relay_parent: active_leaf_hash, }, tx, )).await; @@ -1319,17 +1319,33 @@ async fn new_leaf_fragment_tree_updates( } } +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_leaf_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + leaf_hash: Hash, +) { + fragment_tree_update_inner( + ctx, + state, + Some(leaf_hash), + None, + ).await +} + #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn prospective_backed_notification_fragment_tree_updates( ctx: &mut Context, + state: &mut State, para_id: ParaId, - head_data_hash: Hash, + para_head: Hash, ) { - // TODO [now] - // 1. get hypothetical candidates - // 2. find out which are in the frontier - // 3. note that they are - // 4. schedule requests/import statements accordingly. + fragment_tree_update_inner( + ctx, + state, + None, + Some((para_head, para_id)), + ).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] From a150fb4ee81ece45bc723c847f4b418d27a15fa7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 Jan 2023 14:56:59 -0600 Subject: [PATCH 113/220] add a helper function for getting statements unknown by backing --- .../src/vstaging/statement_store.rs | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index e6a93a913f02..ee0f25e64362 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -211,6 +211,35 @@ impl StatementStore { .get(&(group_index, candidate_hash)) .map_or(false, |s| s.is_backable()) } + + /// Get an iterator over all statements marked as being unknown by the backing subsystem. + pub fn fresh_statements_for_backing<'a>( + &'a self, + validators: &'a [ValidatorIndex], + candidate_hash: CandidateHash, + ) + -> impl IntoIterator + 'a + { + let s_st = CompactStatement::Seconded(candidate_hash); + let v_st = CompactStatement::Valid(candidate_hash); + + validators.iter() + .flat_map(move |v| { + let a = self.known_statements.get(&(*v, s_st.clone())); + let b = self.known_statements.get(&(*v, v_st.clone())); + + a.into_iter().chain(b) + }) + .filter(|stored| !stored.known_by_backing) + .map(|stored| &stored.statement) + } + + /// Note that a statement is known by the backing subsystem. + pub fn note_known_by_backing(&mut self, validator_index: ValidatorIndex, statement: CompactStatement) { + if let Some(stored) = self.known_statements.get_mut(&(validator_index, statement)) { + stored.known_by_backing = true; + } + } } /// Error indicating that the validator was unknown. From 80a051c1ee5dd9ebe0e3705400574a4db9f43f50 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 12:27:00 -0600 Subject: [PATCH 114/220] import fresh statements to backing --- .../src/vstaging/candidates.rs | 47 +++-- .../src/vstaging/mod.rs | 178 ++++++++++++------ .../src/vstaging/statement_store.rs | 13 +- 3 files changed, 154 insertions(+), 84 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 1244551ebb4e..f5725ab48752 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -145,10 +145,11 @@ impl Candidates { importable_under: HashSet::new(), }), ); - let new_confirmed = match self.candidates.get_mut(&candidate_hash).expect("just inserted; qed") { - CandidateState::Confirmed(x) => x, - _ => panic!("just inserted as confirmed; qed"), - }; + let new_confirmed = + match self.candidates.get_mut(&candidate_hash).expect("just inserted; qed") { + CandidateState::Confirmed(x) => x, + _ => panic!("just inserted as confirmed; qed"), + }; self.by_parent.entry((parent_hash, para_id)).or_default().insert(candidate_hash); @@ -162,7 +163,10 @@ impl Candidates { }; for (leaf_hash, x) in u.unconfirmed_importable_under { - if x.relay_parent == relay_parent && x.parent_hash == parent_hash && x.para_id == para_id { + if x.relay_parent == relay_parent && + x.parent_hash == parent_hash && + x.para_id == para_id + { new_confirmed.importable_under.insert(leaf_hash); } } @@ -221,11 +225,7 @@ impl Candidates { /// Note that a candidate is importable in a fragment tree indicated by the given /// leaf hash. - pub fn note_importable_under( - &mut self, - candidate: &HypotheticalCandidate, - leaf_hash: Hash, - ) { + pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { match candidate { HypotheticalCandidate::Incomplete { candidate_hash, @@ -239,22 +239,19 @@ impl Candidates { para_id: *candidate_para, }; - if let Some(&mut CandidateState::Unconfirmed(ref mut c)) - = self.candidates.get_mut(&candidate_hash) + if let Some(&mut CandidateState::Unconfirmed(ref mut c)) = + self.candidates.get_mut(&candidate_hash) { c.note_maybe_importable_under(leaf_hash, u); } - } - HypotheticalCandidate::Complete { - candidate_hash, - .. - } => { - if let Some(&mut CandidateState::Confirmed(ref mut c)) - = self.candidates.get_mut(&candidate_hash) + }, + HypotheticalCandidate::Complete { candidate_hash, .. } => { + if let Some(&mut CandidateState::Confirmed(ref mut c)) = + self.candidates.get_mut(&candidate_hash) { c.importable_under.insert(leaf_hash); } - } + }, } } @@ -516,6 +513,16 @@ impl ConfirmedCandidate { self.receipt.descriptor().para_id } + /// Get the underlying candidate receipt. + pub fn candidate_receipt(&self) -> &Arc { + &self.receipt + } + + /// Get the persisted validation data. + pub fn persisted_validation_data(&self) -> &PersistedValidationData { + &self.persisted_validation_data + } + /// Whether the candidate is importable. pub fn is_importable<'a>(&self, under_active_leaf: impl Into>) -> bool { match under_active_leaf.into() { diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9e95c98a880e..fe45342e3073 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -28,14 +28,18 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ jaeger, - messages::{CandidateBackingMessage, HypotheticalCandidate, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, HypotheticalFrontierRequest}, + messages::{ + CandidateBackingMessage, HypotheticalCandidate, HypotheticalFrontierRequest, + NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + }, overseer, ActivatedLeaf, ActiveLeavesUpdate, PerLeafSpan, StatementDistributionSenderTrait, }; use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreState, - GroupIndex, Hash, Id as ParaId, IndexedVec, PersistedValidationData, SessionIndex, SessionInfo, - SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, GroupRotationInfo, CoreIndex, + AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreIndex, + CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, + PersistedValidationData, SessionIndex, SessionInfo, SignedStatement, SigningContext, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; use sp_keystore::SyncCryptoStorePtr; @@ -375,16 +379,13 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::RuntimeApiUnavailable)? .map_err(JfyiError::FetchAvailabilityCores)?; - let group_rotation_info = polkadot_node_subsystem_util::request_validator_groups( - *new_relay_parent, - ctx.sender(), - ) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchValidatorGroups)? - .1; - + let group_rotation_info = + polkadot_node_subsystem_util::request_validator_groups(*new_relay_parent, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchValidatorGroups)? + .1; if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( @@ -991,7 +992,9 @@ async fn handle_incoming_statement( .validator_group_index(originator_index) .expect("validator confirmed to be known by statement_store.insert; qed"); - // Insert an unconfirmed candidate entry if needed + // Insert an unconfirmed candidate entry if needed. Note that if the candidate is already confirmed, + // this ensures that the assigned group of the originator matches the expected group of the + // parachain. { let res = state.candidates.insert_unconfirmed( peer.clone(), @@ -1007,6 +1010,7 @@ async fn handle_incoming_statement( } } + let confirmed = state.candidates.get_confirmed(&candidate_hash); let is_confirmed = state.candidates.is_confirmed(&candidate_hash); if !is_confirmed { // If the candidate is not confirmed, note that we should attempt @@ -1051,8 +1055,22 @@ async fn handle_incoming_statement( let is_backable = per_relay_parent.statement_store.is_backable(originator_group, candidate_hash); let is_importable = state.candidates.is_importable(&candidate_hash); + + if let (true, &Some(confirmed)) = (is_importable, &confirmed) { + send_backing_fresh_statements( + ctx, + candidate_hash, + originator_group, + &relay_parent, + &mut *per_relay_parent, + confirmed, + &*per_session, + ) + .await; + } + if !was_backable && is_backable && is_importable { - handle_backable_and_importable_candidate( + dispatch_announcements_and_acknowledgements( ctx, candidate_hash, originator_group, @@ -1063,10 +1081,6 @@ async fn handle_incoming_statement( &state.peers, ) .await; - } else if (is_backable || !is_confirmed) && is_importable { - // TODO [now]: this is either a grid statement on a candidate - // that was already backed _or_ a cluster statement. - // import statement into backing. } // We always circulate statements at this point. @@ -1160,12 +1174,52 @@ fn handle_grid_statement( Ok(checked_statement) } -// For a candidate that is both backable and importable, this -// 1) imports all known statements into the backing subsystem -// 2) dispatches backable candidate announcements or acknowledgements -// via the grid topology. If the session topology is not yet -// available, this will be a no-op -async fn handle_backable_and_importable_candidate( +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_backing_fresh_statements( + ctx: &mut Context, + candidate_hash: CandidateHash, + group_index: GroupIndex, + relay_parent: &Hash, + relay_parent_state: &mut PerRelayParentState, + confirmed: &candidates::ConfirmedCandidate, + per_session: &PerSessionState, +) { + let group_validators = per_session.groups.get(group_index).unwrap_or(&[]); + let mut imported = Vec::new(); + + for statement in relay_parent_state + .statement_store + .fresh_statements_for_backing(group_validators, candidate_hash) + { + let v = statement.validator_index(); + let compact = statement.payload().clone(); + imported.push((v, compact)); + let carrying_pvd = statement + .clone() + .convert_to_superpayload_with(|statement| match statement { + CompactStatement::Seconded(c_hash) => FullStatementWithPVD::Seconded( + (&**confirmed.candidate_receipt()).clone(), + confirmed.persisted_validation_data().clone(), + ), + CompactStatement::Valid(c_hash) => FullStatementWithPVD::Valid(c_hash), + }) + .expect("statements refer to same candidate; qed"); + + ctx.send_message(CandidateBackingMessage::Statement(*relay_parent, carrying_pvd)) + .await; + } + + for (v, s) in imported { + relay_parent_state.statement_store.note_known_by_backing(v, s); + } +} + +// For a candidate that has just become both backable and importable, this +// dispatches backable candidate announcements or acknowledgements +// via the grid topology. If the session topology is not yet +// available, this will be a no-op. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn dispatch_announcements_and_acknowledgements( ctx: &mut Context, candidate_hash: CandidateHash, group_index: GroupIndex, @@ -1180,8 +1234,6 @@ async fn handle_backable_and_importable_candidate( None => return, }; - // TODO [now]: dispatch all unknown statements to backing - let grid_view = match per_session.grid_view { Some(ref t) => t, None => { @@ -1258,7 +1310,8 @@ async fn fragment_tree_update_inner( fragment_tree_relay_parent: active_leaf_hash, }, tx, - )).await; + )) + .await; match rx.await { Ok(frontier) => frontier, @@ -1268,53 +1321,70 @@ async fn fragment_tree_update_inner( // 3. note that they are importable under a given leaf hash. for (hypo, membership) in frontier { // skip parablocks outside of the frontier - if membership.is_empty() { continue } + if membership.is_empty() { + continue + } for (leaf_hash, depths) in membership { state.candidates.note_importable_under(&hypo, leaf_hash); } match hypo { - HypotheticalCandidate::Complete { candidate_hash, receipt, persisted_validation_data } => { + HypotheticalCandidate::Complete { + candidate_hash, + receipt, + persisted_validation_data, + } => { // 4a. for confirmed candidates, if the candidate has enough statements to // back, send all statements which are new to backing. if let Some(prs) = state.per_relay_parent.get(&receipt.descriptor().relay_parent) { - let core_index = prs.availability_cores + let core_index = prs + .availability_cores .iter() .position(|c| c.para_id() == Some(receipt.descriptor().para_id)); - let group_index = core_index.map(|c| prs.group_rotation_info.group_for_core( - CoreIndex(c as _), - prs.availability_cores.len(), - )); - if let Some(true) = group_index.map(|g| prs.statement_store.is_backable(g, candidate_hash)) { + let group_index = core_index.map(|c| { + prs.group_rotation_info + .group_for_core(CoreIndex(c as _), prs.availability_cores.len()) + }); + if let Some(true) = + group_index.map(|g| prs.statement_store.is_backable(g, candidate_hash)) + { // TODO [now]: send statements to backing } } - } + }, HypotheticalCandidate::Incomplete { - candidate_hash, candidate_para, parent_head_data_hash, candidate_relay_parent, + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, } => { // 4b. for unconfirmed candidates, send requests to all advertising peers. // note that all unconfirmed hypothetical candidates are from the grid if let Some(prs) = state.per_relay_parent.get(&candidate_relay_parent) { - let core_index = prs.availability_cores + let core_index = prs + .availability_cores .iter() .position(|c| c.para_id() == Some(candidate_para)); - let group_index = core_index.map(|c| prs.group_rotation_info.group_for_core( - CoreIndex(c as _), - prs.availability_cores.len(), - )); + let group_index = core_index.map(|c| { + prs.group_rotation_info + .group_for_core(CoreIndex(c as _), prs.availability_cores.len()) + }); - let request_from = prs.local_validator.as_ref() + let request_from = prs + .local_validator + .as_ref() .zip(group_index) - .map(|(vs, group_index)| vs.grid_tracker.validators_to_request(candidate_hash, group_index)) + .map(|(vs, group_index)| { + vs.grid_tracker.validators_to_request(candidate_hash, group_index) + }) .unwrap_or_default(); // TODO [now]: issue requests } - } + }, } } } @@ -1325,12 +1395,7 @@ async fn new_leaf_fragment_tree_updates( state: &mut State, leaf_hash: Hash, ) { - fragment_tree_update_inner( - ctx, - state, - Some(leaf_hash), - None, - ).await + fragment_tree_update_inner(ctx, state, Some(leaf_hash), None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1340,12 +1405,7 @@ async fn prospective_backed_notification_fragment_tree_updates( para_id: ParaId, para_head: Hash, ) { - fragment_tree_update_inner( - ctx, - state, - None, - Some((para_head, para_id)), - ).await + fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id))).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index ee0f25e64362..e944568450e1 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -217,13 +217,12 @@ impl StatementStore { &'a self, validators: &'a [ValidatorIndex], candidate_hash: CandidateHash, - ) - -> impl IntoIterator + 'a - { + ) -> impl IntoIterator + 'a { let s_st = CompactStatement::Seconded(candidate_hash); let v_st = CompactStatement::Valid(candidate_hash); - validators.iter() + validators + .iter() .flat_map(move |v| { let a = self.known_statements.get(&(*v, s_st.clone())); let b = self.known_statements.get(&(*v, v_st.clone())); @@ -235,7 +234,11 @@ impl StatementStore { } /// Note that a statement is known by the backing subsystem. - pub fn note_known_by_backing(&mut self, validator_index: ValidatorIndex, statement: CompactStatement) { + pub fn note_known_by_backing( + &mut self, + validator_index: ValidatorIndex, + statement: CompactStatement, + ) { if let Some(stored) = self.known_statements.get_mut(&(validator_index, statement)) { stored.known_by_backing = true; } From 2d2e9743853cd7229c63fa20bd852de099d629f3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 13:18:52 -0600 Subject: [PATCH 115/220] send announcements and acknowledgements over grid --- node/network/protocol/src/lib.rs | 26 +++++- .../src/vstaging/candidates.rs | 13 +-- .../src/vstaging/grid.rs | 1 + .../src/vstaging/mod.rs | 86 +++++++++++++------ .../src/vstaging/statement_store.rs | 16 +++- 5 files changed, 108 insertions(+), 34 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index e790282a150f..7a2df17fd5a2 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -643,6 +643,30 @@ pub mod vstaging { pub validated_in_group: BitVec, } + /// An acknowledgement of a backed candidate being known. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct BackedCandidateAcknowledgement { + /// The relay-parent of the candidate. + pub relay_parent: Hash, + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// A bitfield which indicates which validators in the para's + /// group at the relay-parent have validated this candidate + /// and issued `Seconded` statements about it. + /// + /// This MUST have exactly the minimum amount of bytes + /// necessary to represent the number of validators in the + /// assigned backing group as-of the relay-parent. + pub seconded_in_group: BitVec, + /// A bitfield which indicates which validators in the para's + /// group at the relay-parent have validated this candidate + /// and issued `Valid` statements about it. + /// + /// This MUST have exactly the minimum amount of bytes + /// necessary to represent the number of validators in the + /// assigned backing group as-of the relay-parent. + pub validated_in_group: BitVec, + } /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { @@ -659,7 +683,7 @@ pub mod vstaging { /// A notification of a backed candidate being known by the sending node, /// for the purpose of informing a receiving node which already has the candidate. #[codec(index = 2)] - BackedCandidateKnown(Hash, CandidateHash), + BackedCandidateKnown(BackedCandidateAcknowledgement), /// All messages for V1 for compatibility with the statement distribution /// protocol, for relay-parents that don't support asynchronous backing. diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index f5725ab48752..0222024c2846 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -90,7 +90,7 @@ impl Candidates { } if let Some((claimed_parent_hash, claimed_id)) = claimed_parent_hash_and_id { - if c.parent_hash() != claimed_parent_hash { + if c.parent_head_data_hash() != claimed_parent_hash { return Err(BadAdvertisement) } @@ -312,7 +312,7 @@ impl Candidates { self.candidates.retain(|c_hash, state| match state { CandidateState::Confirmed(ref mut c) => if !relay_parent_live(&c.relay_parent()) { - remove_parent_claims(*c_hash, c.parent_hash(), c.para_id()); + remove_parent_claims(*c_hash, c.parent_head_data_hash(), c.para_id()); false } else { for leaf_hash in leaves { @@ -531,12 +531,13 @@ impl ConfirmedCandidate { } } - fn group_index(&self) -> GroupIndex { - self.assigned_group + /// Get the parent head data hash. + pub fn parent_head_data_hash(&self) -> Hash { + self.parent_hash } - fn parent_hash(&self) -> Hash { - self.parent_hash + fn group_index(&self) -> GroupIndex { + self.assigned_group } fn to_hypothetical(&self, candidate_hash: CandidateHash) -> HypotheticalCandidate { diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 84c310f15bf6..c6dc0044c994 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -666,6 +666,7 @@ enum StatementKind { /// Bitfields indicating the statements that are known or undesired /// about a candidate. +#[derive(Clone)] pub struct StatementFilter { /// Seconded statements. '1' is known or undesired. pub seconded_in_group: BitVec, diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index fe45342e3073..8297dcf9e317 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -315,10 +315,7 @@ pub(crate) async fn handle_network_update( protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), ) => {}, // TODO [now] net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( - relay_parent, - candidate_hash, - ), + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), ) => {}, // TODO [now] } }, @@ -858,8 +855,6 @@ async fn report_peer( /// all statements about the candidate have been sent to backing /// - If the candidate is in-cluster and is importable, /// the statement has been sent to backing -/// - If the candidate just became backable, appropriate announcements -/// and acknowledgement along the topology have been made. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn handle_incoming_statement( ctx: &mut Context, @@ -1069,20 +1064,6 @@ async fn handle_incoming_statement( .await; } - if !was_backable && is_backable && is_importable { - dispatch_announcements_and_acknowledgements( - ctx, - candidate_hash, - originator_group, - &relay_parent, - &mut *per_relay_parent, - &*per_session, - &state.authorities, - &state.peers, - ) - .await; - } - // We always circulate statements at this point. circulate_statement(ctx, state, relay_parent, originator_group, checked_statement).await; } else { @@ -1214,17 +1195,17 @@ async fn send_backing_fresh_statements( } } -// For a candidate that has just become both backable and importable, this -// dispatches backable candidate announcements or acknowledgements -// via the grid topology. If the session topology is not yet +// This provides a backable candidate to the grid and dispatches backable candidate announcements +// and acknowledgements via the grid topology. If the session topology is not yet // available, this will be a no-op. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn dispatch_announcements_and_acknowledgements( +async fn provide_candidate_to_grid( ctx: &mut Context, candidate_hash: CandidateHash, group_index: GroupIndex, relay_parent: &Hash, relay_parent_state: &mut PerRelayParentState, + confirmed_candidate: &candidates::ConfirmedCandidate, per_session: &PerSessionState, authorities: &HashMap, peers: &HashMap, @@ -1270,6 +1251,41 @@ async fn dispatch_announcements_and_acknowledgements( group_size, ); + let filter = { + let mut f = StatementFilter::new(group_size); + relay_parent_state.statement_store.fill_statement_filter( + group_index, + candidate_hash, + &mut f, + ); + f + }; + + let manifest = protocol_vstaging::BackedCandidateManifest { + relay_parent: *relay_parent, + candidate_hash, + group_index, + para_id: confirmed_candidate.para_id(), + parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), + seconded_in_group: filter.seconded_in_group.clone(), + validated_in_group: filter.validated_in_group.clone(), + }; + let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + relay_parent: *relay_parent, + candidate_hash, + seconded_in_group: filter.seconded_in_group.clone(), + validated_in_group: filter.validated_in_group.clone(), + }; + + let inventory_message = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ); + let ack_message = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + ); + + let mut inventory_peers = Vec::new(); + let mut ack_peers = Vec::new(); for (v, action) in actions { let p = match connected_validator_peer(authorities, per_session, v) { None => continue, @@ -1283,13 +1299,31 @@ async fn dispatch_announcements_and_acknowledgements( match action { grid::PostBackingAction::Advertise => { - // TODO [now]: send inventory message and `note_advertised_to`. + inventory_peers.push(p); + local_validator + .grid_tracker + .note_advertised_to(v, candidate_hash, filter.clone()); }, grid::PostBackingAction::Acknowledge => { - // TODO [now]: send acknowledgement message and `note_local_acknowledged`. + ack_peers.push(p); + // TODO [now]: send follow-up statements as well + local_validator.grid_tracker.note_local_acknowledged( + v, + candidate_hash, + filter.clone(), + ); }, } } + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + inventory_peers, + inventory_message.into(), + )) + .await; + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(ack_peers, ack_message.into())) + .await; } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index e944568450e1..3bf380533889 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -29,7 +29,7 @@ use polkadot_primitives::vstaging::{ }; use std::collections::hash_map::{Entry as HEntry, HashMap}; -use super::groups::Groups; +use super::{grid::StatementFilter, groups::Groups}; /// Possible origins of a statement. pub enum StatementOrigin { @@ -177,6 +177,20 @@ impl StatementStore { }) } + /// Fill a `StatementFilter` to be used in the grid topology with all statements + /// we are already aware of. + pub fn fill_statement_filter( + &self, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_filter: &mut StatementFilter, + ) { + if let Some(statements) = self.group_statements.get(&(group_index, candidate_hash)) { + statement_filter.seconded_in_group |= statements.seconded.as_bitslice(); + statement_filter.validated_in_group |= statements.valid.as_bitslice(); + } + } + // TODO [now]: this may not be useful. /// Get an iterator over signed statements of the given form by the given group. pub fn group_statements<'a>( From 50fb10eabf6a921dbadb00ad4fdcb8e99feb89ca Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 21:01:53 -0600 Subject: [PATCH 116/220] provide freshly importable statements also avoid tracking backed candidates in statement distribution --- node/core/backing/src/lib.rs | 4 + .../network/statement-distribution/src/lib.rs | 2 +- .../src/vstaging/candidates.rs | 19 +++++ .../src/vstaging/mod.rs | 78 +++++++++++-------- .../src/vstaging/statement_store.rs | 21 ----- node/subsystem-types/src/messages.rs | 7 ++ 6 files changed, 77 insertions(+), 54 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index db84a03ff6dc..5473ba128914 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1578,6 +1578,10 @@ async fn import_statement( para_head: backed.candidate.descriptor.para_head, }) .await; + + // TODO [now]: notify statement distribution of backed + // candidate. alter control flow so "Share" is always sent + // first. } else { // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 09a0183067bb..adcf81c4977e 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -266,7 +266,7 @@ impl StatementDistributionSubsystem { // TODO [now]: pass to vstaging, but not if the message is // v1 or the connecting peer is v1. }, - StatementDistributionMessage::Backed { para_id, para_head } => { + StatementDistributionMessage::Backed { para_id, candidate_hash, para_head } => { // TODO [now]: pass to vstaging }, }, diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 0222024c2846..81e2407ba034 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -143,6 +143,7 @@ impl Candidates { assigned_group, parent_hash, importable_under: HashSet::new(), + backed: false, }), ); let new_confirmed = @@ -223,6 +224,11 @@ impl Candidates { self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } + /// Whether the candidate is marked as backed. + pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.get_confirmed(candidate_hash).map_or(false, |c| c.is_backed()) + } + /// Note that a candidate is importable in a fragment tree indicated by the given /// leaf hash. pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { @@ -255,6 +261,13 @@ impl Candidates { } } + /// Note that a candidate is backed. No-op if the candidate is not confirmed. + pub fn note_backed(&mut self, candidate_hash: &CandidateHash) { + if let Some(&mut CandidateState::Confirmed(ref mut c)) = self.candidates.get_mut(candidate_hash) { + c.backed = true; + } + } + /// Get all hypothetical candidates which should be tested /// for inclusion in the frontier. /// @@ -500,6 +513,7 @@ pub struct ConfirmedCandidate { parent_hash: Hash, // active leaves statements about this candidate are importable under. importable_under: HashSet, + backed: bool, } impl ConfirmedCandidate { @@ -531,6 +545,11 @@ impl ConfirmedCandidate { } } + /// Whether the candidate is marked as being backed. + pub fn is_backed(&self) -> bool { + self.backed + } + /// Get the parent head data hash. pub fn parent_head_data_hash(&self) -> Hash { self.parent_hash diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 8297dcf9e317..c2217f6e6023 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1022,9 +1022,6 @@ async fn handle_incoming_statement( request_entry.get_mut().set_cluster_priority(); } - let was_backable = - per_relay_parent.statement_store.is_backable(originator_group, candidate_hash); - let was_fresh = match per_relay_parent.statement_store.insert( &per_session.groups, checked_statement.clone(), @@ -1046,9 +1043,6 @@ async fn handle_incoming_statement( if was_fresh { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; - - let is_backable = - per_relay_parent.statement_store.is_backable(originator_group, candidate_hash); let is_importable = state.candidates.is_importable(&candidate_hash); if let (true, &Some(confirmed)) = (is_importable, &confirmed) { @@ -1155,6 +1149,8 @@ fn handle_grid_statement( Ok(checked_statement) } +/// Send backing fresh statements. This should only be performed on importable & confirmed +/// candidates. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn send_backing_fresh_statements( ctx: &mut Context, @@ -1297,6 +1293,8 @@ async fn provide_candidate_to_grid( }, }; + // TODO [now]: only send if peer has relay parent in implicit view? + match action { grid::PostBackingAction::Advertise => { inventory_peers.push(p); @@ -1326,6 +1324,22 @@ async fn provide_candidate_to_grid( .await; } +fn group_for_para( + availability_cores: &[CoreState], + group_rotation_info: &GroupRotationInfo, + para_id: ParaId, +) -> Option { + // Note: this won't work well for parathreads as it assumes that core assignments are fixed + // across blocks. + let core_index = availability_cores + .iter() + .position(|c| c.para_id() == Some(para_id)); + + core_index.map(|c| { + group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len()) + }) +} + #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn fragment_tree_update_inner( ctx: &mut Context, @@ -1369,22 +1383,27 @@ async fn fragment_tree_update_inner( receipt, persisted_validation_data, } => { - // 4a. for confirmed candidates, if the candidate has enough statements to - // back, send all statements which are new to backing. - if let Some(prs) = state.per_relay_parent.get(&receipt.descriptor().relay_parent) { - let core_index = prs - .availability_cores - .iter() - .position(|c| c.para_id() == Some(receipt.descriptor().para_id)); - - let group_index = core_index.map(|c| { - prs.group_rotation_info - .group_for_core(CoreIndex(c as _), prs.availability_cores.len()) - }); - if let Some(true) = - group_index.map(|g| prs.statement_store.is_backable(g, candidate_hash)) - { - // TODO [now]: send statements to backing + // 4a. for confirmed candidates, send all statements which are new to backing. + let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); + let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); + if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { + let group_index = group_for_para( + &prs.availability_cores, + &prs.group_rotation_info, + receipt.descriptor().para_id, + ); + + let per_session = state.per_session.get(&prs.session); + if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + send_backing_fresh_statements( + ctx, + candidate_hash, + group_index, + &receipt.descriptor().relay_parent, + prs, + confirmed, + per_session, + ).await; } } }, @@ -1395,17 +1414,12 @@ async fn fragment_tree_update_inner( candidate_relay_parent, } => { // 4b. for unconfirmed candidates, send requests to all advertising peers. - // note that all unconfirmed hypothetical candidates are from the grid if let Some(prs) = state.per_relay_parent.get(&candidate_relay_parent) { - let core_index = prs - .availability_cores - .iter() - .position(|c| c.para_id() == Some(candidate_para)); - - let group_index = core_index.map(|c| { - prs.group_rotation_info - .group_for_core(CoreIndex(c as _), prs.availability_cores.len()) - }); + let group_index = group_for_para( + &prs.availability_cores, + &prs.group_rotation_info, + candidate_para, + ); let request_from = prs .local_validator diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 3bf380533889..8909614741cf 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -219,13 +219,6 @@ impl StatementStore { self.known_statements.get(&(validator_index, statement)).map(|s| &s.statement) } - /// Whether a candidate has enough statements to be backed. - pub fn is_backable(&self, group_index: GroupIndex, candidate_hash: CandidateHash) -> bool { - self.group_statements - .get(&(group_index, candidate_hash)) - .map_or(false, |s| s.is_backable()) - } - /// Get an iterator over all statements marked as being unknown by the backing subsystem. pub fn fresh_statements_for_backing<'a>( &'a self, @@ -283,20 +276,6 @@ impl GroupStatements { } } - fn is_backable(&self) -> bool { - let votes = self - .seconded - .iter() - .by_vals() - .zip(self.valid.iter().by_vals()) - .filter(|&(s, v)| s || v) // no double-counting - .count(); - - let threshold = super::minimum_votes(self.valid.len()); - - votes >= threshold - } - fn note_seconded(&mut self, within_group_index: usize) { self.seconded.set(within_group_index, true); } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 2ea55ec2de50..d12660b6a67f 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -670,9 +670,16 @@ pub enum StatementDistributionMessage { /// given relay-parent hash and it should be distributed to other validators. Share(Hash, SignedFullStatementWithPVD), /// The candidate received enough validity votes from the backing group. + /// + /// If the candidate is backed as a result of a local statement, this message MUST + /// be preceded by a `Share` message for that statement. This ensures that Statement Distribution + /// is always aware of full candidates prior to receiving the `Backed` notification, even + /// when the group size is 1 and the candidate is seconded locally. Backed { /// Candidate's para id. para_id: ParaId, + /// The candidate's hash. + candidate_hash: CandidateHash, /// Hash of the para head generated by candidate. para_head: Hash, }, From 4c5b6017f4824b753cf3d68b8c1f12cb80d65c6f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 21:07:29 -0600 Subject: [PATCH 117/220] do not issue requests on newly importable candidates --- .../src/vstaging/mod.rs | 80 ++++++------------- 1 file changed, 26 insertions(+), 54 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index c2217f6e6023..ba882d000a85 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1377,62 +1377,34 @@ async fn fragment_tree_update_inner( state.candidates.note_importable_under(&hypo, leaf_hash); } - match hypo { - HypotheticalCandidate::Complete { - candidate_hash, - receipt, - persisted_validation_data, - } => { - // 4a. for confirmed candidates, send all statements which are new to backing. - let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); - let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); - if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { - let group_index = group_for_para( - &prs.availability_cores, - &prs.group_rotation_info, - receipt.descriptor().para_id, - ); - - let per_session = state.per_session.get(&prs.session); - if let (Some(per_session), Some(group_index)) = (per_session, group_index) { - send_backing_fresh_statements( - ctx, - candidate_hash, - group_index, - &receipt.descriptor().relay_parent, - prs, - confirmed, - per_session, - ).await; - } - } - }, - HypotheticalCandidate::Incomplete { - candidate_hash, - candidate_para, - parent_head_data_hash, - candidate_relay_parent, - } => { - // 4b. for unconfirmed candidates, send requests to all advertising peers. - if let Some(prs) = state.per_relay_parent.get(&candidate_relay_parent) { - let group_index = group_for_para( - &prs.availability_cores, - &prs.group_rotation_info, - candidate_para, - ); - - let request_from = prs - .local_validator - .as_ref() - .zip(group_index) - .map(|(vs, group_index)| { - vs.grid_tracker.validators_to_request(candidate_hash, group_index) - }) - .unwrap_or_default(); + // 4. for confirmed candidates, send all statements which are new to backing. + if let HypotheticalCandidate::Complete { + candidate_hash, + receipt, + persisted_validation_data, + } = hypo { + let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); + let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); + if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { + let group_index = group_for_para( + &prs.availability_cores, + &prs.group_rotation_info, + receipt.descriptor().para_id, + ); - // TODO [now]: issue requests + let per_session = state.per_session.get(&prs.session); + if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + send_backing_fresh_statements( + ctx, + candidate_hash, + group_index, + &receipt.descriptor().relay_parent, + prs, + confirmed, + per_session, + ).await; } - }, + } } } } From aaa41cca4493354637585302b0530686c03812ff Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 21:09:59 -0600 Subject: [PATCH 118/220] add TODO for later when confirming candidate --- node/network/statement-distribution/src/vstaging/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index ba882d000a85..b9883ee559db 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -590,6 +590,11 @@ pub(crate) async fn share_local_statement( }, Ok(true) => (compact_statement, candidate_hash), } + + // TODO [now]: the candidate is confirmed, so + // a) cancel requests + // b) import previously-received statements. + // Probably: create a helper function that does all this + applies post-confirmation reckoning. }; // send the compact version of the statement to any peers which need it. From 51a891cf060ed637b2b5634381cd80cf0f6062da Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 Jan 2023 21:29:45 -0600 Subject: [PATCH 119/220] write a routine for handling backed candidate notifications --- .../network/statement-distribution/src/lib.rs | 8 ++- .../src/vstaging/candidates.rs | 4 +- .../src/vstaging/mod.rs | 58 +++++++++++++++---- node/subsystem-types/src/messages.rs | 9 +-- 4 files changed, 56 insertions(+), 23 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index adcf81c4977e..e910bc410830 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -266,8 +266,12 @@ impl StatementDistributionSubsystem { // TODO [now]: pass to vstaging, but not if the message is // v1 or the connecting peer is v1. }, - StatementDistributionMessage::Backed { para_id, candidate_hash, para_head } => { - // TODO [now]: pass to vstaging + StatementDistributionMessage::Backed(candidate_hash) => { + crate::vstaging::handle_backed_candidate_message( + ctx, + unimplemented!(), // TODO [now] state + candidate_hash, + ).await; }, }, } diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 81e2407ba034..37d41868a071 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -555,7 +555,9 @@ impl ConfirmedCandidate { self.parent_hash } - fn group_index(&self) -> GroupIndex { + /// Get the group index of the assigned group. Note that this is in the context + /// of the state of the chain at the candidate's relay parent and its para-id. + pub fn group_index(&self) -> GroupIndex { self.assigned_group } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index b9883ee559db..80eccd9cc367 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -17,6 +17,9 @@ //! Implementation of the v2 statement distribution protocol, //! designed for asynchronous backing. +// TODO [now]: remove before merging & fix warnings +#![allow(unused)] + use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RequiredRouting, SessionGridTopology}, @@ -1203,8 +1206,6 @@ async fn send_backing_fresh_statements( async fn provide_candidate_to_grid( ctx: &mut Context, candidate_hash: CandidateHash, - group_index: GroupIndex, - relay_parent: &Hash, relay_parent_state: &mut PerRelayParentState, confirmed_candidate: &candidates::ConfirmedCandidate, per_session: &PerSessionState, @@ -1216,6 +1217,9 @@ async fn provide_candidate_to_grid( None => return, }; + let relay_parent = confirmed_candidate.relay_parent(); + let group_index = confirmed_candidate.group_index(); + let grid_view = match per_session.grid_view { Some(ref t) => t, None => { @@ -1263,7 +1267,7 @@ async fn provide_candidate_to_grid( }; let manifest = protocol_vstaging::BackedCandidateManifest { - relay_parent: *relay_parent, + relay_parent, candidate_hash, group_index, para_id: confirmed_candidate.para_id(), @@ -1272,7 +1276,7 @@ async fn provide_candidate_to_grid( validated_in_group: filter.validated_in_group.clone(), }; let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { - relay_parent: *relay_parent, + relay_parent, candidate_hash, seconded_in_group: filter.seconded_in_group.clone(), validated_in_group: filter.validated_in_group.clone(), @@ -1291,15 +1295,13 @@ async fn provide_candidate_to_grid( let p = match connected_validator_peer(authorities, per_session, v) { None => continue, Some(p) => - if peers.get(&p).map_or(false, |d| d.knows_relay_parent(relay_parent)) { + if peers.get(&p).map_or(false, |d| d.knows_relay_parent(&relay_parent)) { p } else { continue }, }; - // TODO [now]: only send if peer has relay parent in implicit view? - match action { grid::PostBackingAction::Advertise => { inventory_peers.push(p); @@ -1450,16 +1452,48 @@ async fn handle_incoming_manifest( // frontier, update candidates wrapper, add request entry if so. } +/// Handle a notification of a candidate being backed. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn handle_cluster_newly_backed( +pub(crate) async fn handle_backed_candidate_message( ctx: &mut Context, state: &mut State, - relay_parent: Hash, candidate_hash: CandidateHash, ) { - // TODO [now] - // 1. for confirmed & importable candidates only - // 2. send advertisements along the grid + // If the candidate is unknown or unconfirmed, it's a race (pruned before receiving message) + // or a bug. Ignore if so + state.candidates.note_backed(&candidate_hash); + let confirmed = match state.candidates.get_confirmed(&candidate_hash) { + None => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + "Received backed candidate notification for unknown or unconfirmed", + ); + + return + } + Some(c) => c, + }; + + let relay_parent_state = match state.per_relay_parent.get_mut(&confirmed.relay_parent()) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + provide_candidate_to_grid( + ctx, + candidate_hash, + relay_parent_state, + confirmed, + per_session, + &state.authorities, + &state.peers, + ).await; } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index d12660b6a67f..6f01423f43f9 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -675,14 +675,7 @@ pub enum StatementDistributionMessage { /// be preceded by a `Share` message for that statement. This ensures that Statement Distribution /// is always aware of full candidates prior to receiving the `Backed` notification, even /// when the group size is 1 and the candidate is seconded locally. - Backed { - /// Candidate's para id. - para_id: ParaId, - /// The candidate's hash. - candidate_hash: CandidateHash, - /// Hash of the para head generated by candidate. - para_head: Hash, - }, + Backed(CandidateHash), /// Event from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), From dcdce4688eb739d42bedf6bfa40be77331f818a0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 20 Jan 2023 18:54:45 -0600 Subject: [PATCH 120/220] simplify grid substantially --- node/network/protocol/src/lib.rs | 3 +- .../src/vstaging/grid.rs | 290 +++++++----------- .../src/vstaging/mod.rs | 25 +- 3 files changed, 127 insertions(+), 191 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 7a2df17fd5a2..44100d4b1faf 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -646,8 +646,6 @@ pub mod vstaging { /// An acknowledgement of a backed candidate being known. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub struct BackedCandidateAcknowledgement { - /// The relay-parent of the candidate. - pub relay_parent: Hash, /// The hash of the candidate. pub candidate_hash: CandidateHash, /// A bitfield which indicates which validators in the para's @@ -667,6 +665,7 @@ pub mod vstaging { /// assigned backing group as-of the relay-parent. pub validated_in_group: BitVec, } + /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index c6dc0044c994..cebeb079f1a0 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -156,10 +156,16 @@ pub fn build_session_topology<'a>( view } -/// Actions that can be taken once affirming that a candidate is backed. -pub enum PostBackingAction { - Acknowledge, - Advertise, +/// The kind of backed candidate manifest we should send to a remote peer. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ManifestKind { + /// Full manifests contain information about the candidate and should be sent + /// to peers which aren't guaranteed to have the candidate already. + Full, + /// Acknowledgement manifests omit information which is implicit in the candidate + /// itself, and should be sent to peers which are guaranteed to have the candidate + /// already. + Acknowledgement, } /// A tracker of knowledge from authorities within the grid for a particular @@ -169,6 +175,7 @@ pub struct GridTracker { received: HashMap, confirmed_backed: HashMap, unconfirmed: HashMap>, + pending_communication: HashMap>, } impl GridTracker { @@ -178,6 +185,9 @@ impl GridTracker { /// about this group at this relay-parent. This also does sanity /// checks on the format of the manifest and the amount of votes /// it contains. It has effects on the stored state only when successful. + /// + /// This returns an optional `ManifestKind` indicating the type of manifest + /// to be sent in response to the received manifest. pub fn import_manifest( &mut self, session_topology: &SessionTopologyView, @@ -185,15 +195,30 @@ impl GridTracker { candidate_hash: CandidateHash, seconding_limit: usize, manifest: ManifestSummary, + kind: ManifestKind, sender: ValidatorIndex, - ) -> Result<(), ManifestImportError> { + ) -> Result, ManifestImportError> { let claimed_group_index = manifest.claimed_group_index; - if session_topology - .group_views - .get(&manifest.claimed_group_index) - .map_or(true, |g| !g.receiving.contains(&sender)) - { + let group_topology = match session_topology.group_views.get(&manifest.claimed_group_index) { + None => return Err(ManifestImportError::Disallowed), + Some(g) => g, + }; + + let is_receiver = group_topology.receiving.contains(&sender); + let is_sender = group_topology.sending.contains(&sender); + let manifest_allowed = match kind { + // Peers can send manifests _if_: + // * They are in the receiving set for the group AND the manifest is full OR + // * They are in the sending set for the group AND we have sent them + // a manifest AND the received manifest is partial. + ManifestKind::Full => is_receiver, + ManifestKind::Acknowledgement => is_sender && self.confirmed_backed + .get(&candidate_hash) + .map_or(false, |c| c.has_sent_manifest_to(sender)), + }; + + if !manifest_allowed { return Err(ManifestImportError::Disallowed) } @@ -238,9 +263,17 @@ impl GridTracker { manifest, )?; + let mut to_send = None; if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { - // TODO [now]: send statements they need? - confirmed.note_remote_advertised(sender, remote_knowledge); + // TODO [now]: send statements they need if this is an ack + if is_receiver && !confirmed.has_sent_manifest_to(sender) { + self.pending_communication.entry(sender) + .or_default() + .insert(candidate_hash, ManifestKind::Acknowledgement); + + to_send = Some(ManifestKind::Acknowledgement); + } + confirmed.manifest_received_from(sender, remote_knowledge); } else { // received prevents conflicting manifests so this is max 1 per validator. self.unconfirmed @@ -249,22 +282,22 @@ impl GridTracker { .push((sender, claimed_group_index)) } - Ok(()) + Ok(to_send) } /// Add a new backed candidate to the tracker. This yields /// an iterator of validators which we should either advertise to - /// or signal that we know the candidate. + /// or signal that we know the candidate, along with the corresponding + /// type of manifest we should send. pub fn add_backed_candidate( &mut self, session_topology: &SessionTopologyView, candidate_hash: CandidateHash, group_index: GroupIndex, group_size: usize, - ) -> Vec<(ValidatorIndex, PostBackingAction)> { - let mut actions = Vec::new(); + ) -> Vec<(ValidatorIndex, ManifestKind)> { let c = match self.confirmed_backed.entry(candidate_hash) { - Entry::Occupied(_) => return actions, + Entry::Occupied(_) => return Vec::new(), Entry::Vacant(v) => v.insert(KnownBackedCandidate { group_index, mutual_knowledge: HashMap::new() }), }; @@ -284,93 +317,90 @@ impl GridTracker { .and_then(|r| r.candidate_statement_filter(&candidate_hash)) .expect("unconfirmed is only populated by validators who have sent manifest; qed"); - c.note_remote_advertised(v, statement_filter); + + // No need to send direct statements, because our local knowledge is `None` + c.manifest_received_from(v, statement_filter); } let group_topology = match session_topology.group_views.get(&group_index) { - None => return actions, + None => return Vec::new(), Some(g) => g, }; // advertise onwards ad accept received advertisements - for &v in &group_topology.sending { - if c.should_advertise(v) { - actions.push((v, PostBackingAction::Advertise)) - } - } + let sending_group_manifests = group_topology + .sending + .iter() + .map(|v| (*v, ManifestKind::Full)); - for &v in &group_topology.receiving { - if c.can_local_acknowledge(v) { - actions.push((v, PostBackingAction::Acknowledge)) - } + let receiving_group_manifests = group_topology + .receiving + .iter() + .filter_map(|v| if c.has_received_manifest_from(*v) { + Some((*v, ManifestKind::Acknowledgement)) + } else { + None + }); + + // Note that order is important: if a validator is part of both the sending + // and receiving groups, we may overwrite a `Full` manifest with a `Acknowledgement` + // one. + for (v, manifest_mode) in sending_group_manifests.chain(receiving_group_manifests) { + self.pending_communication.entry(v).or_default().insert(candidate_hash, manifest_mode); } - actions + self.pending_communication + .iter() + .filter_map(|(v, x)| x.get(&candidate_hash).map(|k| (*v, *k))) + .collect() } /// Note that a backed candidate has been advertised to a /// given validator. - pub fn note_advertised_to( + pub fn manifest_sent_to( &mut self, validator_index: ValidatorIndex, candidate_hash: CandidateHash, local_knowledge: StatementFilter, ) { if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { - c.note_advertised_to(validator_index, local_knowledge); + c.manifest_sent_to(validator_index, local_knowledge); } - } - /// Provided a validator index, gives an iterator of candidate - /// hashes which may be advertised to the validator and have not yet - /// been. - pub fn advertisements<'a>( - &'a self, - session_topology: &SessionTopologyView, - validator_index: ValidatorIndex, - ) -> impl IntoIterator + 'a { - let allowed_groups: HashSet<_> = session_topology - .group_views - .iter() - .filter(|(_, x)| x.sending.contains(&validator_index)) - .map(|(g, x)| *g) - .collect(); - - self.confirmed_backed - .iter() - .filter(move |(_, c)| allowed_groups.contains(c.group_index())) - .filter(move |(_, c)| c.should_advertise(validator_index)) - .map(|(c_h, _)| *c_h) + if let Some(x) = self.pending_communication.get_mut(&validator_index) { + x.remove(&candidate_hash); + } } - /// Whether the given validator is allowed to acknowledge an advertisement - /// via request. - pub fn can_remote_acknowledge( + /// Whether we should send a manifest about a specific candidate to a validator, + /// and which kind of manifest. + pub fn is_manifest_pending_for( &self, validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - ) -> bool { - self.confirmed_backed - .get(&candidate_hash) - .map_or(false, |c| c.can_remote_acknowledge(validator_index)) + candidate_hash: &CandidateHash, + ) -> Option { + self.pending_communication + .get(&validator_index) + .and_then(|x| x.get(&candidate_hash)) + .map(|x| *x) } - /// Note that a validator peer we advertised a backed candidate to - /// has acknowledged the candidate directly or by requesting it. - pub fn note_remote_acknowledged( - &mut self, + /// Returns a vector of all candidates pending manifests for the specific validator, and + /// the type of manifest we should send. + pub fn pending_manifests_for( + &self, validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - remote_knowledge: StatementFilter, - ) { - if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { - c.note_remote_acknowledged(validator_index, remote_knowledge); - } + ) -> Vec<(CandidateHash, ManifestKind)> { + self.pending_communication + .get(&validator_index) + .into_iter() + .flat_map(|pending| pending.iter().map(|(c, m)| (*c, *m))) + .collect() } - /// Which validators we should request the fully attested candidates from. - /// If we already have successfully requested, then this will return an empty + /// Which validators we could request the fully attested candidates from. + /// If the candidate is already confirmed, then this will return an empty /// set. pub fn validators_to_request( &self, @@ -388,30 +418,7 @@ impl GridTracker { validators } - /// Whether we can acknowledge a remote's advertisement. - pub fn can_local_acknowledge( - &self, - validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - ) -> bool { - self.confirmed_backed - .get(&candidate_hash) - .map_or(false, |c| c.can_local_acknowledge(validator_index)) - } - - /// Indicate that we've acknowledged a remote's advertisement. - pub fn note_local_acknowledged( - &mut self, - validator_index: ValidatorIndex, - candidate_hash: CandidateHash, - local_knowledge: StatementFilter, - ) { - if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { - c.note_local_acknowledged(validator_index, local_knowledge); - } - } - - /// Determine the validators which can send a statement by direct broadcast. + /// Determine the validators which can send a statement to us by direct broadcast. pub fn direct_statement_senders( &self, groups: &Groups, @@ -430,7 +437,7 @@ impl GridTracker { .unwrap_or_default() } - /// Determine the validators which can receive a statement by direct + /// Determine the validators which can receive a statement from us by direct /// broadcast. pub fn direct_statement_recipients( &self, @@ -452,7 +459,7 @@ impl GridTracker { /// Note that a direct statement about a given candidate was sent to or /// received from the given validator. - pub fn note_sent_or_received_direct_statement( + pub fn sent_or_received_direct_statement( &mut self, groups: &Groups, originator: ValidatorIndex, @@ -463,7 +470,7 @@ impl GridTracker { extract_statement_and_group_info(groups, originator, statement) { if let Some(known) = self.confirmed_backed.get_mut(&c_h) { - known.note_sent_or_received_direct_statement(counterparty, in_group, kind); + known.sent_or_received_direct_statement(counterparty, in_group, kind); } } } @@ -731,16 +738,19 @@ impl KnownBackedCandidate { &self.group_index } - // should only be invoked for validators which are known - // to be valid recipients of advertisement. - fn should_advertise(&self, validator: ValidatorIndex) -> bool { + fn has_received_manifest_from(&self, validator: ValidatorIndex) -> bool { self.mutual_knowledge .get(&validator) - .map_or(true, |k| k.local_knowledge.is_none()) + .map_or(false, |k| k.remote_knowledge.is_some()) } - // is a no-op when either they or we have advertised. - fn note_advertised_to(&mut self, validator: ValidatorIndex, local_knowledge: StatementFilter) { + fn has_sent_manifest_to(&self, validator: ValidatorIndex) -> bool { + self.mutual_knowledge + .get(&validator) + .map_or(false, |k| k.local_knowledge.is_some()) + } + + fn manifest_sent_to(&mut self, validator: ValidatorIndex, local_knowledge: StatementFilter) { let k = self .mutual_knowledge .entry(validator) @@ -749,7 +759,7 @@ impl KnownBackedCandidate { k.local_knowledge = Some(local_knowledge); } - fn note_remote_advertised( + fn manifest_received_from( &mut self, validator: ValidatorIndex, remote_knowledge: StatementFilter, @@ -762,57 +772,6 @@ impl KnownBackedCandidate { k.remote_knowledge = Some(remote_knowledge); } - // whether we are allowed to acknowledge or request a candidate from a remote validator. - fn can_local_acknowledge(&self, validator: ValidatorIndex) -> bool { - match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => k.remote_knowledge.is_some() && k.local_knowledge.is_none(), - } - } - - // whether a remote is allowed to acknowledge or request a candidate from us - fn can_remote_acknowledge(&self, validator: ValidatorIndex) -> bool { - match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => k.remote_knowledge.is_none() && k.local_knowledge.is_some(), - } - } - - fn note_local_acknowledged( - &mut self, - validator: ValidatorIndex, - local_knowledge: StatementFilter, - ) { - if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { - k.local_knowledge = Some(local_knowledge); - // TODO [now]: return something for sending statements they need. - } - } - - fn note_remote_acknowledged( - &mut self, - validator: ValidatorIndex, - remote_knowledge: StatementFilter, - ) { - if let Some(ref mut k) = self.mutual_knowledge.get_mut(&validator) { - k.remote_knowledge = Some(remote_knowledge); - // TODO [now]: return something for sending statements they need. - } - } - - fn can_send_direct_statement_to( - &self, - validator: ValidatorIndex, - statement_index_in_group: usize, - statement_kind: StatementKind, - ) -> bool { - match self.mutual_knowledge.get(&validator) { - Some(MutualKnowledge { remote_knowledge: Some(r), local_knowledge: Some(_) }) => - !r.contains(statement_index_in_group, statement_kind), - _ => false, - } - } - fn direct_statement_senders( &self, group_index: GroupIndex, @@ -857,20 +816,7 @@ impl KnownBackedCandidate { .collect() } - fn can_receive_direct_statement_from( - &self, - validator: ValidatorIndex, - statement_index_in_group: usize, - statement_kind: StatementKind, - ) -> bool { - match self.mutual_knowledge.get(&validator) { - Some(MutualKnowledge { remote_knowledge: Some(_), local_knowledge: Some(l) }) => - !l.contains(statement_index_in_group, statement_kind), - _ => false, - } - } - - fn note_sent_or_received_direct_statement( + fn sent_or_received_direct_statement( &mut self, validator: ValidatorIndex, statement_index_in_group: usize, @@ -1363,4 +1309,6 @@ mod tests { Ok(()) ); } + + // TODO [now]: check that pending communication is set and cleared correctly. } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 80eccd9cc367..acdee49c3c96 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -778,7 +778,7 @@ async fn circulate_statement( }, DirectTargetKind::Grid => { statement_to.push(peer_id); - local_validator.grid_tracker.note_sent_or_received_direct_statement( + local_validator.grid_tracker.sent_or_received_direct_statement( &per_session.groups, originator, target, @@ -1147,7 +1147,7 @@ fn handle_grid_statement( Err(_) => return Err(COST_INVALID_SIGNATURE), }; - grid_tracker.note_sent_or_received_direct_statement( + grid_tracker.sent_or_received_direct_statement( &per_session.groups, checked_statement.validator_index(), grid_sender_index, @@ -1276,7 +1276,6 @@ async fn provide_candidate_to_grid( validated_in_group: filter.validated_in_group.clone(), }; let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { - relay_parent, candidate_hash, seconded_in_group: filter.seconded_in_group.clone(), validated_in_group: filter.validated_in_group.clone(), @@ -1303,22 +1302,12 @@ async fn provide_candidate_to_grid( }; match action { - grid::PostBackingAction::Advertise => { - inventory_peers.push(p); - local_validator - .grid_tracker - .note_advertised_to(v, candidate_hash, filter.clone()); - }, - grid::PostBackingAction::Acknowledge => { - ack_peers.push(p); - // TODO [now]: send follow-up statements as well - local_validator.grid_tracker.note_local_acknowledged( - v, - candidate_hash, - filter.clone(), - ); - }, + grid::ManifestKind::Full => inventory_peers.push(p), + grid::ManifestKind::Acknowledgement => ack_peers.push(p), } + + // TODO [now]: send follow-up statements as well + local_validator.grid_tracker.manifest_sent_to(v, candidate_hash, filter.clone()); } ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( From a7ff9f95fb7af9cf23ea767bc01d7606b81df2f9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 20 Jan 2023 19:02:06 -0600 Subject: [PATCH 121/220] add some test TODOs --- node/network/statement-distribution/src/vstaging/grid.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index cebeb079f1a0..d776b3b5b5fe 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1310,5 +1310,10 @@ mod tests { ); } - // TODO [now]: check that pending communication is set and cleared correctly. + // TODO [now]: test that senders can provide manifests in acknowledgement + + // TODO [now]: check that pending communication is set correctly when receiving a manifest on a confirmed candidate + // It should also overwrite any existing `Full` ManifestKind + + // TODO [now]: check that pending communication is cleared correctly in `manifest_sent_to` } From c0196d11e18dd13d597f65985b6ddb3b14f475b8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 23 Jan 2023 19:03:28 -0600 Subject: [PATCH 122/220] handle confirmed candidates & grid announcements --- .../src/vstaging/candidates.rs | 51 ++++-- .../src/vstaging/grid.rs | 2 +- .../src/vstaging/mod.rs | 164 ++++++++++++++++-- 3 files changed, 180 insertions(+), 37 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 37d41868a071..353eb9ff115a 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -40,6 +40,26 @@ use std::{ sync::Arc, }; +/// This encapsulates the correct and incorrect advertisers +/// post-confirmation of a candidate. +#[derive(Default)] +pub struct PostConfirmationReckoning { + /// Peers which advertised correctly. + pub correct: HashSet, + /// Peers which advertised the candidate incorrectly. + pub incorrect: HashSet, +} + +/// Outputs generated by initial confirmation of a candidate. +pub struct PostConfirmation { + /// The hypothetical candidate used to determine importability and membership + /// in the hypothetical frontier. + pub hypothetical: HypotheticalCandidate, + /// A "reckoning" of peers who have advertised the candidate previously, + /// either accurately or inaccurately. + pub reckoning: PostConfirmationReckoning, +} + /// A tracker for all known candidates in the view. /// /// See module docs for more info. @@ -118,9 +138,8 @@ impl Candidates { Ok(()) } - /// Note that a candidate has been confirmed, - /// yielding lists of peers which advertised it - /// both correctly and incorrectly. + /// Note that a candidate has been confirmed. If the candidate has just been + /// confirmed, then /// /// This does no sanity-checking of input data, and will overwrite /// already-confirmed canidates. @@ -130,7 +149,7 @@ impl Candidates { candidate_receipt: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, assigned_group: GroupIndex, - ) -> Option { + ) -> Option { let parent_hash = persisted_validation_data.parent_head.hash(); let relay_parent = candidate_receipt.descriptor().relay_parent; let para_id = candidate_receipt.descriptor().para_id; @@ -155,13 +174,13 @@ impl Candidates { self.by_parent.entry((parent_hash, para_id)).or_default().insert(candidate_hash); match prev_state { - None => None, + None => Some(PostConfirmation { + reckoning: Default::default(), + hypothetical: new_confirmed.to_hypothetical(candidate_hash), + }), Some(CandidateState::Confirmed(_)) => None, Some(CandidateState::Unconfirmed(u)) => Some({ - let mut reckoning = PostConfirmationReckoning { - correct: HashSet::new(), - incorrect: HashSet::new(), - }; + let mut reckoning = PostConfirmationReckoning::default(); for (leaf_hash, x) in u.unconfirmed_importable_under { if x.relay_parent == relay_parent && @@ -195,7 +214,10 @@ impl Candidates { } } - reckoning + PostConfirmation { + reckoning, + hypothetical: new_confirmed.to_hypothetical(candidate_hash), + } }), } } @@ -345,15 +367,6 @@ impl Candidates { } } -/// This encapsulates the correct and incorrect advertisers -/// post-confirmation of a candidate. -pub struct PostConfirmationReckoning { - /// Peers which advertised correctly. - pub correct: HashSet, - /// Peers which advertised the candidate incorrectly. - pub incorrect: HashSet, -} - /// A bad advertisement was recognized. #[derive(Debug)] pub struct BadAdvertisement; diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index d776b3b5b5fe..8403d73a3fbc 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -391,7 +391,7 @@ impl GridTracker { pub fn pending_manifests_for( &self, validator_index: ValidatorIndex, - ) -> Vec<(CandidateHash, ManifestKind)> { +) -> Vec<(CandidateHash, ManifestKind)> { self.pending_communication .get(&validator_index) .into_iter() diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index acdee49c3c96..9aa4c59363d5 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -59,7 +59,7 @@ use crate::{ error::{JfyiError, JfyiErrorResult}, LOG_TARGET, }; -use candidates::{BadAdvertisement, Candidates}; +use candidates::{BadAdvertisement, Candidates, PostConfirmation}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; use grid::{GridTracker, ManifestSummary, StatementFilter}; use groups::Groups; @@ -82,6 +82,19 @@ const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = Rep::CostMinor("Unexpected Statement, remote not allowed"); const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` Statements"); +const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = + Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); +const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = + Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); +const COST_CONFLICTING_MANIFEST: Rep = + Rep::CostMajor("Manifest conflicts with previous"); +const COST_INSUFFICIENT_MANIFEST: Rep = + Rep::CostMajor("Manifest statements insufficient to back candidate"); +const COST_MALFORMED_MANIFEST: Rep = + Rep::CostMajor("Manifest is malformed"); +const COST_DUPLICATE_MANIFEST: Rep = + Rep::CostMinor("Duplicate Manifest"); + const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); const COST_IMPROPERLY_DECODED_RESPONSE: Rep = Rep::CostMajor("Improperly Encoded Candidate Response"); @@ -562,20 +575,20 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } + let mut post_confirmation = None; + // Insert candidate if unknown + more sanity checks. let (compact_statement, candidate_hash) = { let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); if let FullStatementWithPVD::Seconded(ref c, ref pvd) = statement.payload() { - if let Some(reckoning) = state.candidates.confirm_candidate( + post_confirmation = state.candidates.confirm_candidate( candidate_hash, c.clone(), pvd.clone(), local_group, - ) { - apply_post_confirmation_reckoning(ctx, reckoning).await; - } + ); }; match per_relay_parent.statement_store.insert( @@ -594,12 +607,12 @@ pub(crate) async fn share_local_statement( Ok(true) => (compact_statement, candidate_hash), } - // TODO [now]: the candidate is confirmed, so - // a) cancel requests - // b) import previously-received statements. - // Probably: create a helper function that does all this + applies post-confirmation reckoning. }; + if let Some(post_confirmation) = post_confirmation { + apply_post_confirmation(ctx, state, post_confirmation); + } + // send the compact version of the statement to any peers which need it. circulate_statement(ctx, state, relay_parent, local_group, compact_statement).await; @@ -1025,7 +1038,7 @@ async fn handle_incoming_statement( request_entry.get_mut().add_peer(peer); - // We only successfully accept statements from the grid on unconfirmed + // We only successfully accept statements from the grid on confirmed // candidates, therefore this check only passes if the statement is from the cluster request_entry.get_mut().set_cluster_priority(); } @@ -1342,9 +1355,14 @@ async fn fragment_tree_update_inner( state: &mut State, active_leaf_hash: Option, required_parent_info: Option<(Hash, ParaId)>, + known_hypotheticals: Option>, ) { // 1. get hypothetical candidates - let hypotheticals = state.candidates.frontier_hypotheticals(required_parent_info); + let hypotheticals = match known_hypotheticals { + None => state.candidates.frontier_hypotheticals(required_parent_info), + Some(h) => h, + }; + // 2. find out which are in the frontier let frontier = { let (tx, rx) = oneshot::channel(); @@ -1411,7 +1429,7 @@ async fn new_leaf_fragment_tree_updates( state: &mut State, leaf_hash: Hash, ) { - fragment_tree_update_inner(ctx, state, Some(leaf_hash), None).await + fragment_tree_update_inner(ctx, state, Some(leaf_hash), None, None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1421,7 +1439,16 @@ async fn prospective_backed_notification_fragment_tree_updates( para_id: ParaId, para_head: Hash, ) { - fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id))).await + fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id)), None).await +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_confirmed_candidate_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + candidate: HypotheticalCandidate, +) { + fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1430,13 +1457,105 @@ async fn handle_incoming_manifest( state: &mut State, peer: PeerId, manifest: net_protocol::vstaging::BackedCandidateManifest, +) { + // 1. sanity checks: relay-parent in state, para ID matches group index, + let relay_parent_state = match state.per_relay_parent.get_mut(&manifest.relay_parent) { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; + return; + }, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; + return; + }, + Some(x) => x, + + }; + + let grid_topology = match per_session.grid_view.as_ref() { + None => return, + Some(x) => x, + }; + + // TODO [now]: validate that para Id matches group index. + + // 2. sanity checks: peer is validator, bitvec size, import into grid tracker + let maybe_respond = match local_validator.grid_tracker.import_manifest( + grid_topology, + &per_session.groups, + manifest.candidate_hash, + todo!(), // TODO [now]: seconding limit + grid::ManifestSummary { + claimed_parent_hash: manifest.parent_head_data_hash, + claimed_group_index: manifest.group_index, + seconded_in_group: manifest.seconded_in_group, + validated_in_group: manifest.validated_in_group, + }, + grid::ManifestKind::Full, + todo!(), // TODO [now]: validator index + ) { + Ok(x) => x, + Err(grid::ManifestImportError::Conflicting) => { + report_peer(ctx.sender(), peer, COST_CONFLICTING_MANIFEST).await; + return; + } + Err(grid::ManifestImportError::Overflow) => { + report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; + return; + } + Err(grid::ManifestImportError::Insufficient) => { + report_peer(ctx.sender(), peer, COST_INSUFFICIENT_MANIFEST).await; + return; + } + Err(grid::ManifestImportError::Malformed) => { + report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return; + } + Err(grid::ManifestImportError::Disallowed) => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; + return; + } + }; + + // 3. if accepted by grid, insert as unconfirmed. + if let Err(BadAdvertisement) = state.candidates.insert_unconfirmed( + peer.clone(), + manifest.candidate_hash, + manifest.relay_parent, + manifest.group_index, + Some((manifest.parent_head_data_hash, manifest.para_id)), + ) { + report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; + return; + } + + // TODO [now] + // 4. if already confirmed & known within grid, acknowledge candidate + // 5. if unconfirmed, add request entry +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_acknowledgement( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + manifest: net_protocol::vstaging::BackedCandidateAcknowledgement, ) { // TODO [now]: // 1. sanity checks: relay-parent in state, para ID matches group index, // 2. sanity checks: peer is validator, bitvec size, import into grid tracker // 3. if accepted by grid, insert as unconfirmed. // 4. if already confirmed, acknowledge candidate - // 5. if already unconfirmed, add request entry (if importable) + // 5. if unconfirmed, add request entry (if importable) // 6. if fresh unconfirmed, determine whether it's in the hypothetical // frontier, update candidates wrapper, add request entry if so. } @@ -1485,12 +1604,23 @@ pub(crate) async fn handle_backed_candidate_message( ).await; } +/// Applies state & p2p updates as a result of a newly confirmed candidate. +/// +/// This punishes who advertised the candidate incorrectly, as well as +/// doing an importability analysis of the confirmed candidate and providing +/// statements to the backing subsystem if importable. It also cleans up +/// any pending requests for the candidate. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn apply_post_confirmation_reckoning( +async fn apply_post_confirmation( ctx: &mut Context, - reckoning: candidates::PostConfirmationReckoning, + state: &mut State, + post_confirmation: PostConfirmation, ) { - for peer in reckoning.incorrect { + for peer in post_confirmation.reckoning.incorrect { report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; } + + let candidate_hash = post_confirmation.hypothetical.candidate_hash(); + state.request_manager.remove_for(candidate_hash); + new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; } From 00939fdf7ba0bb220807ad018f768dd2d2fcbbdd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 23 Jan 2023 23:51:56 -0600 Subject: [PATCH 123/220] finish implementing manifest handling, including follow up statements --- .../src/vstaging/grid.rs | 62 ++++++-- .../src/vstaging/mod.rs | 132 ++++++++++++++++-- .../src/vstaging/statement_store.rs | 45 +++--- 3 files changed, 192 insertions(+), 47 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 8403d73a3fbc..0831f707a6df 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -52,6 +52,14 @@ pub struct SessionTopologyView { group_views: HashMap, } +impl SessionTopologyView { + /// Returns an iterator over all validator indices from the group who are allowed to + /// send us manifests. + pub fn iter_group_senders<'a>(&'a self, group: GroupIndex) -> impl Iterator + 'a { + self.group_views.get(&group).into_iter().flat_map(|sub| sub.sending.iter().cloned()) + } +} + /// Build a view of the topology for the session. /// For groups that we are part of: we receive from nobody and send to our X/Y peers. /// For groups that we are not part of: we receive from any validator in the group we share a slice with. @@ -186,8 +194,9 @@ impl GridTracker { /// checks on the format of the manifest and the amount of votes /// it contains. It has effects on the stored state only when successful. /// - /// This returns an optional `ManifestKind` indicating the type of manifest - /// to be sent in response to the received manifest. + /// This returns a `bool` on success, which if true indicates that an acknowledgement is + /// to be sent in response to the received manifest. This only occurs when the + /// candidate is already known to be confirmed and backed. pub fn import_manifest( &mut self, session_topology: &SessionTopologyView, @@ -197,7 +206,7 @@ impl GridTracker { manifest: ManifestSummary, kind: ManifestKind, sender: ValidatorIndex, - ) -> Result, ManifestImportError> { + ) -> Result { let claimed_group_index = manifest.claimed_group_index; let group_topology = match session_topology.group_views.get(&manifest.claimed_group_index) { @@ -205,15 +214,15 @@ impl GridTracker { Some(g) => g, }; - let is_receiver = group_topology.receiving.contains(&sender); - let is_sender = group_topology.sending.contains(&sender); + let receiving_from = group_topology.receiving.contains(&sender); + let sending_to = group_topology.sending.contains(&sender); let manifest_allowed = match kind { // Peers can send manifests _if_: // * They are in the receiving set for the group AND the manifest is full OR // * They are in the sending set for the group AND we have sent them // a manifest AND the received manifest is partial. - ManifestKind::Full => is_receiver, - ManifestKind::Acknowledgement => is_sender && self.confirmed_backed + ManifestKind::Full => receiving_from, + ManifestKind::Acknowledgement => sending_to && self.confirmed_backed .get(&candidate_hash) .map_or(false, |c| c.has_sent_manifest_to(sender)), }; @@ -263,15 +272,15 @@ impl GridTracker { manifest, )?; - let mut to_send = None; + let mut ack = false; if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { - // TODO [now]: send statements they need if this is an ack - if is_receiver && !confirmed.has_sent_manifest_to(sender) { + if receiving_from && !confirmed.has_sent_manifest_to(sender) { + // due to checks above, the manifest `kind` is guaranteed to be `Full` self.pending_communication.entry(sender) .or_default() .insert(candidate_hash, ManifestKind::Acknowledgement); - to_send = Some(ManifestKind::Acknowledgement); + ack = true; } confirmed.manifest_received_from(sender, remote_knowledge); } else { @@ -282,7 +291,7 @@ impl GridTracker { .push((sender, claimed_group_index)) } - Ok(to_send) + Ok(ack) } /// Add a new backed candidate to the tracker. This yields @@ -399,6 +408,19 @@ impl GridTracker { .collect() } + /// Returns a statement filter indicating statements that a given peer + /// is awaiting concerning the given candidate, constrained by the statements + /// we have ourselves. + pub fn pending_statements_for( + &self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> Option { + self.confirmed_backed + .get(&candidate_hash) + .and_then(|x| x.pending_statements(validator_index)) + } + /// Which validators we could request the fully attested candidates from. /// If the candidate is already confirmed, then this will return an empty /// set. @@ -829,6 +851,22 @@ impl KnownBackedCandidate { } } } + + fn pending_statements( + &self, + validator: ValidatorIndex, + ) -> Option { + // existence of both remote & local knowledge indicate we have exchanged + // manifests. + // then, everything that is not in the remote knowledge is pending, and we + // further limit this by what is in the local knowledge itself. + self.mutual_knowledge.get(&validator) + .and_then(|k| k.local_knowledge.as_ref().zip(k.remote_knowledge.as_ref())) + .map(|(local, remote)| StatementFilter { + seconded_in_group: local.seconded_in_group.clone() & !remote.seconded_in_group.clone(), + validated_in_group: local.validated_in_group.clone() & !remote.validated_in_group.clone(), + }) + } } #[cfg(test)] diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9aa4c59363d5..ee56bcbc899a 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1458,7 +1458,13 @@ async fn handle_incoming_manifest( peer: PeerId, manifest: net_protocol::vstaging::BackedCandidateManifest, ) { - // 1. sanity checks: relay-parent in state, para ID matches group index, + // 1. sanity checks: peer is connected, relay-parent in state, para ID matches group index. + + let peer_state = match state.peers.get(&peer) { + None => return, + Some(p) => p, + }; + let relay_parent_state = match state.per_relay_parent.get_mut(&manifest.relay_parent) { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; @@ -1478,18 +1484,41 @@ async fn handle_incoming_manifest( return; }, Some(x) => x, - }; + let expected_group = group_for_para( + &relay_parent_state.availability_cores, + &relay_parent_state.group_rotation_info, + manifest.para_id, + ); + + if expected_group != Some(manifest.group_index) { + report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return; + } + let grid_topology = match per_session.grid_view.as_ref() { None => return, Some(x) => x, }; - // TODO [now]: validate that para Id matches group index. + let sender_index = grid_topology + .iter_group_senders(manifest.group_index) + .filter_map(|i| per_session.session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + let sender_index = match sender_index { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; + return; + } + Some(s) => s, + }; // 2. sanity checks: peer is validator, bitvec size, import into grid tracker - let maybe_respond = match local_validator.grid_tracker.import_manifest( + let acknowledge = match local_validator.grid_tracker.import_manifest( grid_topology, &per_session.groups, manifest.candidate_hash, @@ -1501,7 +1530,7 @@ async fn handle_incoming_manifest( validated_in_group: manifest.validated_in_group, }, grid::ManifestKind::Full, - todo!(), // TODO [now]: validator index + sender_index, ) { Ok(x) => x, Err(grid::ManifestImportError::Conflicting) => { @@ -1538,9 +1567,96 @@ async fn handle_incoming_manifest( return; } - // TODO [now] - // 4. if already confirmed & known within grid, acknowledge candidate - // 5. if unconfirmed, add request entry + if acknowledge { + // 4. if already confirmed & known within grid, acknowledge candidate + let local_knowledge = { + let group_size = match per_session.groups.get(manifest.group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + let mut f = StatementFilter::new(group_size); + relay_parent_state.statement_store.fill_statement_filter( + manifest.group_index, + manifest.candidate_hash, + &mut f, + ); + f + }; + let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + candidate_hash: manifest.candidate_hash, + seconded_in_group: local_knowledge.seconded_in_group.clone(), + validated_in_group: local_knowledge.validated_in_group.clone(), + }; + + let msg = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer.clone()], msg.into())).await; + local_validator.grid_tracker.manifest_sent_to(sender_index, manifest.candidate_hash, local_knowledge); + + post_acknowledgement_send_statements( + ctx, + sender_index, + peer, + manifest.relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + manifest.group_index, + manifest.candidate_hash, + ).await; + } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { + // 5. if unconfirmed, add request entry + state.request_manager + .get_or_insert( + manifest.relay_parent, + manifest.candidate_hash, + manifest.group_index + ) + .get_mut() + .add_peer(peer); + } +} + +/// Send any follow-up direct statements to a peer, following acknowledgement of a manifest. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn post_acknowledgement_send_statements( + ctx: &mut Context, + recipient: ValidatorIndex, + recipient_peer_id: PeerId, + relay_parent: Hash, + grid_tracker: &mut GridTracker, + statement_store: &StatementStore, + groups: &Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, +) { + let sending_filter = match grid_tracker + .pending_statements_for(recipient, candidate_hash) + { + None => return, + Some(f) => f, + }; + + let mut messages = Vec::new(); + for statement in statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) { + grid_tracker.sent_or_received_direct_statement( + groups, + statement.validator_index(), + recipient, + statement.payload(), + ); + + let msg = Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ).into()); + messages.push((vec![recipient_peer_id.clone()], msg)); + } + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 8909614741cf..f22c8371a194 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -161,22 +161,6 @@ impl StatementStore { Ok(true) } - /// Get a bit-slice of validators in the group which have issued statements of the - /// given form about the candidate. If unavailable, returns `None`. - pub fn group_statement_bitslice( - &self, - group_index: GroupIndex, - statement: CompactStatement, - ) -> Option<&BitSlice> { - let candidate_hash = *statement.candidate_hash(); - self.group_statements - .get(&(group_index, candidate_hash)) - .map(|g| match statement { - CompactStatement::Seconded(_) => &*g.seconded, - CompactStatement::Valid(_) => &*g.valid, - }) - } - /// Fill a `StatementFilter` to be used in the grid topology with all statements /// we are already aware of. pub fn fill_statement_filter( @@ -191,23 +175,30 @@ impl StatementStore { } } - // TODO [now]: this may not be useful. - /// Get an iterator over signed statements of the given form by the given group. + /// Get an iterator over stored signed statements by the group conforming to the + /// given filter. pub fn group_statements<'a>( &'a self, groups: &'a Groups, group_index: GroupIndex, - statement: CompactStatement, + candidate_hash: CandidateHash, + filter: &'a StatementFilter, ) -> impl Iterator + 'a { - let bitslice = self.group_statement_bitslice(group_index, statement.clone()); let group_validators = groups.get(group_index); - bitslice - .into_iter() - .flat_map(|v| v.iter_ones()) + let seconded_statements = filter.seconded_in_group + .iter_ones() .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) - .filter_map(move |v| self.known_statements.get(&(*v, statement.clone()))) - .map(|s| &s.statement) + .filter_map(move |v| self.known_statements.get(&(*v, CompactStatement::Seconded(candidate_hash)))) + .map(|s| &s.statement); + + let valid_statements = filter.validated_in_group + .iter_ones() + .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) + .filter_map(move |v| self.known_statements.get(&(*v, CompactStatement::Valid(candidate_hash)))) + .map(|s| &s.statement); + + seconded_statements.chain(valid_statements) } /// Get the full statement of this kind issued by this validator, if it is known. @@ -264,8 +255,8 @@ struct ValidatorMeta { } struct GroupStatements { - seconded: BitVec, - valid: BitVec, + seconded: BitVec, + valid: BitVec, } impl GroupStatements { From 3c976fa2a050fe541786dc268c43dfbd3d0fa33c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:06:42 -0600 Subject: [PATCH 124/220] send follow-up statements when acknowledging freshly backed --- .../src/vstaging/grid.rs | 16 ++-- .../src/vstaging/mod.rs | 76 ++++++++++++------- 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 0831f707a6df..67f46b21dc9c 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -415,10 +415,11 @@ impl GridTracker { &self, validator_index: ValidatorIndex, candidate_hash: CandidateHash, + full_local_knowledge: &StatementFilter, ) -> Option { self.confirmed_backed .get(&candidate_hash) - .and_then(|x| x.pending_statements(validator_index)) + .and_then(|x| x.pending_statements(validator_index, full_local_knowledge)) } /// Which validators we could request the fully attested candidates from. @@ -855,16 +856,19 @@ impl KnownBackedCandidate { fn pending_statements( &self, validator: ValidatorIndex, + full_local: &StatementFilter, ) -> Option { // existence of both remote & local knowledge indicate we have exchanged // manifests. // then, everything that is not in the remote knowledge is pending, and we - // further limit this by what is in the local knowledge itself. + // further limit this by what is in the local knowledge itself. we use the + // full local knowledge, as the local knowledge stored here may be outdated. self.mutual_knowledge.get(&validator) - .and_then(|k| k.local_knowledge.as_ref().zip(k.remote_knowledge.as_ref())) - .map(|(local, remote)| StatementFilter { - seconded_in_group: local.seconded_in_group.clone() & !remote.seconded_in_group.clone(), - validated_in_group: local.validated_in_group.clone() & !remote.validated_in_group.clone(), + .filter(|k| k.local_knowledge.is_some()) + .and_then(|k| k.remote_knowledge.as_ref()) + .map(|remote| StatementFilter { + seconded_in_group: full_local.seconded_in_group.clone() & !remote.seconded_in_group.clone(), + validated_in_group: full_local.validated_in_group.clone() & !remote.validated_in_group.clone(), }) } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index ee56bcbc899a..2e457477b9b2 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1303,6 +1303,8 @@ async fn provide_candidate_to_grid( let mut inventory_peers = Vec::new(); let mut ack_peers = Vec::new(); + + let mut post_statements = Vec::new(); for (v, action) in actions { let p = match connected_validator_peer(authorities, per_session, v) { None => continue, @@ -1319,18 +1321,35 @@ async fn provide_candidate_to_grid( grid::ManifestKind::Acknowledgement => ack_peers.push(p), } - // TODO [now]: send follow-up statements as well local_validator.grid_tracker.manifest_sent_to(v, candidate_hash, filter.clone()); + post_statements.extend(post_acknowledgement_statement_messages( + v, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + &filter, + ).into_iter().map(|m| (vec![p], m))); } - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - inventory_peers, - inventory_message.into(), - )) - .await; - - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(ack_peers, ack_message.into())) + if !inventory_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + inventory_peers, + inventory_message.into(), + )) .await; + } + + if !ack_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(ack_peers, ack_message.into())) + .await; + } + + if !post_statements.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)).await; + } } fn group_for_para( @@ -1596,17 +1615,22 @@ async fn handle_incoming_manifest( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer.clone()], msg.into())).await; local_validator.grid_tracker.manifest_sent_to(sender_index, manifest.candidate_hash, local_knowledge); - post_acknowledgement_send_statements( - ctx, + let messages = post_acknowledgement_statement_messages( sender_index, - peer, manifest.relay_parent, &mut local_validator.grid_tracker, &relay_parent_state.statement_store, &per_session.groups, manifest.group_index, manifest.candidate_hash, - ).await; + &local_knowledge, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( + messages.into_iter().map(|m| (vec![peer.clone()], m)).collect() + )).await; + } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry state.request_manager @@ -1620,23 +1644,22 @@ async fn handle_incoming_manifest( } } -/// Send any follow-up direct statements to a peer, following acknowledgement of a manifest. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn post_acknowledgement_send_statements( - ctx: &mut Context, +/// Produce a list of network messages to send to a peer, following acknowledgement of a manifest. +/// This notes the messages as sent within the grid state. +fn post_acknowledgement_statement_messages( recipient: ValidatorIndex, - recipient_peer_id: PeerId, relay_parent: Hash, grid_tracker: &mut GridTracker, statement_store: &StatementStore, groups: &Groups, group_index: GroupIndex, candidate_hash: CandidateHash, -) { + local_knowledge: &StatementFilter, +) -> Vec { let sending_filter = match grid_tracker - .pending_statements_for(recipient, candidate_hash) + .pending_statements_for(recipient, candidate_hash, local_knowledge) { - None => return, + None => return Vec::new(), Some(f) => f, }; @@ -1649,14 +1672,13 @@ async fn post_acknowledgement_send_statements( statement.payload(), ); - let msg = Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + messages.push(Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), - ).into()); - messages.push((vec![recipient_peer_id.clone()], msg)); + ).into())); } - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + messages } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1669,11 +1691,7 @@ async fn handle_incoming_acknowledgement( // TODO [now]: // 1. sanity checks: relay-parent in state, para ID matches group index, // 2. sanity checks: peer is validator, bitvec size, import into grid tracker - // 3. if accepted by grid, insert as unconfirmed. - // 4. if already confirmed, acknowledge candidate - // 5. if unconfirmed, add request entry (if importable) - // 6. if fresh unconfirmed, determine whether it's in the hypothetical - // frontier, update candidates wrapper, add request entry if so. + // 3. if accepted by grid, send follow-up statements. } /// Handle a notification of a candidate being backed. From f4983e7dd639b5197fa3c56ef919f4023859d5d3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:07:08 -0600 Subject: [PATCH 125/220] fmt --- node/core/backing/src/lib.rs | 6 +- .../network/statement-distribution/src/lib.rs | 3 +- .../src/vstaging/candidates.rs | 4 +- .../src/vstaging/grid.rs | 53 ++++--- .../src/vstaging/mod.rs | 148 ++++++++++-------- .../src/vstaging/statement_store.rs | 14 +- 6 files changed, 131 insertions(+), 97 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 5473ba128914..e6be86f2b9a4 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1579,9 +1579,9 @@ async fn import_statement( }) .await; - // TODO [now]: notify statement distribution of backed - // candidate. alter control flow so "Share" is always sent - // first. + // TODO [now]: notify statement distribution of backed + // candidate. alter control flow so "Share" is always sent + // first. } else { // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index e910bc410830..829f3a8554a3 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -271,7 +271,8 @@ impl StatementDistributionSubsystem { ctx, unimplemented!(), // TODO [now] state candidate_hash, - ).await; + ) + .await; }, }, } diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 353eb9ff115a..25eb1037d868 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -285,7 +285,9 @@ impl Candidates { /// Note that a candidate is backed. No-op if the candidate is not confirmed. pub fn note_backed(&mut self, candidate_hash: &CandidateHash) { - if let Some(&mut CandidateState::Confirmed(ref mut c)) = self.candidates.get_mut(candidate_hash) { + if let Some(&mut CandidateState::Confirmed(ref mut c)) = + self.candidates.get_mut(candidate_hash) + { c.backed = true; } } diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 67f46b21dc9c..a316d2b2ea51 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -55,8 +55,14 @@ pub struct SessionTopologyView { impl SessionTopologyView { /// Returns an iterator over all validator indices from the group who are allowed to /// send us manifests. - pub fn iter_group_senders<'a>(&'a self, group: GroupIndex) -> impl Iterator + 'a { - self.group_views.get(&group).into_iter().flat_map(|sub| sub.sending.iter().cloned()) + pub fn iter_group_senders<'a>( + &'a self, + group: GroupIndex, + ) -> impl Iterator + 'a { + self.group_views + .get(&group) + .into_iter() + .flat_map(|sub| sub.sending.iter().cloned()) } } @@ -222,9 +228,11 @@ impl GridTracker { // * They are in the sending set for the group AND we have sent them // a manifest AND the received manifest is partial. ManifestKind::Full => receiving_from, - ManifestKind::Acknowledgement => sending_to && self.confirmed_backed - .get(&candidate_hash) - .map_or(false, |c| c.has_sent_manifest_to(sender)), + ManifestKind::Acknowledgement => + sending_to && + self.confirmed_backed + .get(&candidate_hash) + .map_or(false, |c| c.has_sent_manifest_to(sender)), }; if !manifest_allowed { @@ -276,7 +284,8 @@ impl GridTracker { if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { if receiving_from && !confirmed.has_sent_manifest_to(sender) { // due to checks above, the manifest `kind` is guaranteed to be `Full` - self.pending_communication.entry(sender) + self.pending_communication + .entry(sender) .or_default() .insert(candidate_hash, ManifestKind::Acknowledgement); @@ -326,7 +335,6 @@ impl GridTracker { .and_then(|r| r.candidate_statement_filter(&candidate_hash)) .expect("unconfirmed is only populated by validators who have sent manifest; qed"); - // No need to send direct statements, because our local knowledge is `None` c.manifest_received_from(v, statement_filter); } @@ -338,25 +346,25 @@ impl GridTracker { // advertise onwards ad accept received advertisements - let sending_group_manifests = group_topology - .sending - .iter() - .map(|v| (*v, ManifestKind::Full)); + let sending_group_manifests = + group_topology.sending.iter().map(|v| (*v, ManifestKind::Full)); - let receiving_group_manifests = group_topology - .receiving - .iter() - .filter_map(|v| if c.has_received_manifest_from(*v) { + let receiving_group_manifests = group_topology.receiving.iter().filter_map(|v| { + if c.has_received_manifest_from(*v) { Some((*v, ManifestKind::Acknowledgement)) } else { None - }); + } + }); // Note that order is important: if a validator is part of both the sending // and receiving groups, we may overwrite a `Full` manifest with a `Acknowledgement` // one. for (v, manifest_mode) in sending_group_manifests.chain(receiving_group_manifests) { - self.pending_communication.entry(v).or_default().insert(candidate_hash, manifest_mode); + self.pending_communication + .entry(v) + .or_default() + .insert(candidate_hash, manifest_mode); } self.pending_communication @@ -400,7 +408,7 @@ impl GridTracker { pub fn pending_manifests_for( &self, validator_index: ValidatorIndex, -) -> Vec<(CandidateHash, ManifestKind)> { + ) -> Vec<(CandidateHash, ManifestKind)> { self.pending_communication .get(&validator_index) .into_iter() @@ -863,12 +871,15 @@ impl KnownBackedCandidate { // then, everything that is not in the remote knowledge is pending, and we // further limit this by what is in the local knowledge itself. we use the // full local knowledge, as the local knowledge stored here may be outdated. - self.mutual_knowledge.get(&validator) + self.mutual_knowledge + .get(&validator) .filter(|k| k.local_knowledge.is_some()) .and_then(|k| k.remote_knowledge.as_ref()) .map(|remote| StatementFilter { - seconded_in_group: full_local.seconded_in_group.clone() & !remote.seconded_in_group.clone(), - validated_in_group: full_local.validated_in_group.clone() & !remote.validated_in_group.clone(), + seconded_in_group: full_local.seconded_in_group.clone() & + !remote.seconded_in_group.clone(), + validated_in_group: full_local.validated_in_group.clone() & + !remote.validated_in_group.clone(), }) } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 2e457477b9b2..0c6a88710738 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -86,14 +86,11 @@ const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); -const COST_CONFLICTING_MANIFEST: Rep = - Rep::CostMajor("Manifest conflicts with previous"); +const COST_CONFLICTING_MANIFEST: Rep = Rep::CostMajor("Manifest conflicts with previous"); const COST_INSUFFICIENT_MANIFEST: Rep = Rep::CostMajor("Manifest statements insufficient to back candidate"); -const COST_MALFORMED_MANIFEST: Rep = - Rep::CostMajor("Manifest is malformed"); -const COST_DUPLICATE_MANIFEST: Rep = - Rep::CostMinor("Duplicate Manifest"); +const COST_MALFORMED_MANIFEST: Rep = Rep::CostMajor("Manifest is malformed"); +const COST_DUPLICATE_MANIFEST: Rep = Rep::CostMinor("Duplicate Manifest"); const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); const COST_IMPROPERLY_DECODED_RESPONSE: Rep = @@ -606,7 +603,6 @@ pub(crate) async fn share_local_statement( }, Ok(true) => (compact_statement, candidate_hash), } - }; if let Some(post_confirmation) = post_confirmation { @@ -1322,16 +1318,20 @@ async fn provide_candidate_to_grid( } local_validator.grid_tracker.manifest_sent_to(v, candidate_hash, filter.clone()); - post_statements.extend(post_acknowledgement_statement_messages( - v, - relay_parent, - &mut local_validator.grid_tracker, - &relay_parent_state.statement_store, - &per_session.groups, - group_index, - candidate_hash, - &filter, - ).into_iter().map(|m| (vec![p], m))); + post_statements.extend( + post_acknowledgement_statement_messages( + v, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + &filter, + ) + .into_iter() + .map(|m| (vec![p], m)), + ); } if !inventory_peers.is_empty() { @@ -1343,12 +1343,16 @@ async fn provide_candidate_to_grid( } if !ack_peers.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(ack_peers, ack_message.into())) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + ack_peers, + ack_message.into(), + )) + .await; } if !post_statements.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)).await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)) + .await; } } @@ -1359,13 +1363,10 @@ fn group_for_para( ) -> Option { // Note: this won't work well for parathreads as it assumes that core assignments are fixed // across blocks. - let core_index = availability_cores - .iter() - .position(|c| c.para_id() == Some(para_id)); + let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); - core_index.map(|c| { - group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len()) - }) + core_index + .map(|c| group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len())) } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -1415,7 +1416,8 @@ async fn fragment_tree_update_inner( candidate_hash, receipt, persisted_validation_data, - } = hypo { + } = hypo + { let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { @@ -1435,7 +1437,8 @@ async fn fragment_tree_update_inner( prs, confirmed, per_session, - ).await; + ) + .await; } } } @@ -1487,7 +1490,7 @@ async fn handle_incoming_manifest( let relay_parent_state = match state.per_relay_parent.get_mut(&manifest.relay_parent) { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; - return; + return }, Some(s) => s, }; @@ -1500,7 +1503,7 @@ async fn handle_incoming_manifest( let local_validator = match relay_parent_state.local_validator.as_mut() { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; - return; + return }, Some(x) => x, }; @@ -1513,7 +1516,7 @@ async fn handle_incoming_manifest( if expected_group != Some(manifest.group_index) { report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; - return; + return } let grid_topology = match per_session.grid_view.as_ref() { @@ -1531,8 +1534,8 @@ async fn handle_incoming_manifest( let sender_index = match sender_index { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; - return; - } + return + }, Some(s) => s, }; @@ -1554,24 +1557,24 @@ async fn handle_incoming_manifest( Ok(x) => x, Err(grid::ManifestImportError::Conflicting) => { report_peer(ctx.sender(), peer, COST_CONFLICTING_MANIFEST).await; - return; - } + return + }, Err(grid::ManifestImportError::Overflow) => { report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; - return; - } + return + }, Err(grid::ManifestImportError::Insufficient) => { report_peer(ctx.sender(), peer, COST_INSUFFICIENT_MANIFEST).await; - return; - } + return + }, Err(grid::ManifestImportError::Malformed) => { report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; - return; - } + return + }, Err(grid::ManifestImportError::Disallowed) => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; - return; - } + return + }, }; // 3. if accepted by grid, insert as unconfirmed. @@ -1583,7 +1586,7 @@ async fn handle_incoming_manifest( Some((manifest.parent_head_data_hash, manifest.para_id)), ) { report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; - return; + return } if acknowledge { @@ -1612,8 +1615,16 @@ async fn handle_incoming_manifest( protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer.clone()], msg.into())).await; - local_validator.grid_tracker.manifest_sent_to(sender_index, manifest.candidate_hash, local_knowledge); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vec![peer.clone()], + msg.into(), + )) + .await; + local_validator.grid_tracker.manifest_sent_to( + sender_index, + manifest.candidate_hash, + local_knowledge, + ); let messages = post_acknowledgement_statement_messages( sender_index, @@ -1628,17 +1639,15 @@ async fn handle_incoming_manifest( if !messages.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( - messages.into_iter().map(|m| (vec![peer.clone()], m)).collect() - )).await; + messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), + )) + .await; } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry - state.request_manager - .get_or_insert( - manifest.relay_parent, - manifest.candidate_hash, - manifest.group_index - ) + state + .request_manager + .get_or_insert(manifest.relay_parent, manifest.candidate_hash, manifest.group_index) .get_mut() .add_peer(peer); } @@ -1656,15 +1665,16 @@ fn post_acknowledgement_statement_messages( candidate_hash: CandidateHash, local_knowledge: &StatementFilter, ) -> Vec { - let sending_filter = match grid_tracker - .pending_statements_for(recipient, candidate_hash, local_knowledge) - { - None => return Vec::new(), - Some(f) => f, - }; + let sending_filter = + match grid_tracker.pending_statements_for(recipient, candidate_hash, local_knowledge) { + None => return Vec::new(), + Some(f) => f, + }; let mut messages = Vec::new(); - for statement in statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) { + for statement in + statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) + { grid_tracker.sent_or_received_direct_statement( groups, statement.validator_index(), @@ -1672,10 +1682,13 @@ fn post_acknowledgement_statement_messages( statement.payload(), ); - messages.push(Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - statement.as_unchecked().clone(), - ).into())); + messages.push(Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ) + .into(), + )); } messages @@ -1713,7 +1726,7 @@ pub(crate) async fn handle_backed_candidate_message( ); return - } + }, Some(c) => c, }; @@ -1735,7 +1748,8 @@ pub(crate) async fn handle_backed_candidate_message( per_session, &state.authorities, &state.peers, - ).await; + ) + .await; } /// Applies state & p2p updates as a result of a newly confirmed candidate. diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index f22c8371a194..6b9e9f216c01 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -186,16 +186,22 @@ impl StatementStore { ) -> impl Iterator + 'a { let group_validators = groups.get(group_index); - let seconded_statements = filter.seconded_in_group + let seconded_statements = filter + .seconded_in_group .iter_ones() .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) - .filter_map(move |v| self.known_statements.get(&(*v, CompactStatement::Seconded(candidate_hash)))) + .filter_map(move |v| { + self.known_statements.get(&(*v, CompactStatement::Seconded(candidate_hash))) + }) .map(|s| &s.statement); - let valid_statements = filter.validated_in_group + let valid_statements = filter + .validated_in_group .iter_ones() .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) - .filter_map(move |v| self.known_statements.get(&(*v, CompactStatement::Valid(candidate_hash)))) + .filter_map(move |v| { + self.known_statements.get(&(*v, CompactStatement::Valid(candidate_hash))) + }) .map(|s| &s.statement); seconded_statements.chain(valid_statements) From 15f08cc87fb70784769de6c6be3001b744b98993 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:37:21 -0600 Subject: [PATCH 126/220] handle incoming acknowledgements --- .../src/vstaging/mod.rs | 307 +++++++++++++----- 1 file changed, 222 insertions(+), 85 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 0c6a88710738..dd37ffe9c34c 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -91,6 +91,8 @@ const COST_INSUFFICIENT_MANIFEST: Rep = Rep::CostMajor("Manifest statements insufficient to back candidate"); const COST_MALFORMED_MANIFEST: Rep = Rep::CostMajor("Manifest is malformed"); const COST_DUPLICATE_MANIFEST: Rep = Rep::CostMinor("Duplicate Manifest"); +const COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE: Rep = + Rep::CostMinor("Unexpected acknowledgement, unknown candidate"); const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); const COST_IMPROPERLY_DECODED_RESPONSE: Rep = @@ -1473,37 +1475,54 @@ async fn new_confirmed_candidate_fragment_tree_updates( fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await } +struct ManifestImportSuccess<'a> { + relay_parent_state: &'a mut PerRelayParentState, + per_session: &'a PerSessionState, + acknowledge: bool, + sender_index: ValidatorIndex, +} + +/// Handles the common part of incoming manifests of both types (full & acknowledgement) +/// +/// Basic sanity checks around data, importing the manifest into the grid tracker, finding the +/// sending peer's validator index, reporting the peer for any misbehavior, etc. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn handle_incoming_manifest( +async fn handle_incoming_manifest_common<'a, Context>( ctx: &mut Context, - state: &mut State, peer: PeerId, - manifest: net_protocol::vstaging::BackedCandidateManifest, -) { + peers: &HashMap, + per_relay_parent: &'a mut HashMap, + per_session: &'a HashMap, + candidates: &mut Candidates, + candidate_hash: CandidateHash, + relay_parent: Hash, + para_id: ParaId, + manifest_summary: grid::ManifestSummary, + manifest_kind: grid::ManifestKind, +) -> Option> { // 1. sanity checks: peer is connected, relay-parent in state, para ID matches group index. - - let peer_state = match state.peers.get(&peer) { - None => return, + let peer_state = match peers.get(&peer) { + None => return None, Some(p) => p, }; - let relay_parent_state = match state.per_relay_parent.get_mut(&manifest.relay_parent) { + let relay_parent_state = match per_relay_parent.get_mut(&relay_parent) { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; - return + return None; }, Some(s) => s, }; - let per_session = match state.per_session.get(&relay_parent_state.session) { - None => return, + let per_session = match per_session.get(&relay_parent_state.session) { + None => return None, Some(s) => s, }; let local_validator = match relay_parent_state.local_validator.as_mut() { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; - return + return None }, Some(x) => x, }; @@ -1511,21 +1530,21 @@ async fn handle_incoming_manifest( let expected_group = group_for_para( &relay_parent_state.availability_cores, &relay_parent_state.group_rotation_info, - manifest.para_id, + para_id, ); - if expected_group != Some(manifest.group_index) { + if expected_group != Some(manifest_summary.claimed_group_index) { report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; - return + return None; } let grid_topology = match per_session.grid_view.as_ref() { - None => return, + None => return None, Some(x) => x, }; let sender_index = grid_topology - .iter_group_senders(manifest.group_index) + .iter_group_senders(manifest_summary.claimed_group_index) .filter_map(|i| per_session.session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) @@ -1534,7 +1553,7 @@ async fn handle_incoming_manifest( let sender_index = match sender_index { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; - return + return None; }, Some(s) => s, }; @@ -1543,52 +1562,132 @@ async fn handle_incoming_manifest( let acknowledge = match local_validator.grid_tracker.import_manifest( grid_topology, &per_session.groups, - manifest.candidate_hash, + candidate_hash, todo!(), // TODO [now]: seconding limit - grid::ManifestSummary { - claimed_parent_hash: manifest.parent_head_data_hash, - claimed_group_index: manifest.group_index, - seconded_in_group: manifest.seconded_in_group, - validated_in_group: manifest.validated_in_group, - }, - grid::ManifestKind::Full, + manifest_summary, + manifest_kind, sender_index, ) { Ok(x) => x, Err(grid::ManifestImportError::Conflicting) => { report_peer(ctx.sender(), peer, COST_CONFLICTING_MANIFEST).await; - return + return None }, Err(grid::ManifestImportError::Overflow) => { report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; - return + return None }, Err(grid::ManifestImportError::Insufficient) => { report_peer(ctx.sender(), peer, COST_INSUFFICIENT_MANIFEST).await; - return + return None }, Err(grid::ManifestImportError::Malformed) => { report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; - return + return None }, Err(grid::ManifestImportError::Disallowed) => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; - return + return None }, }; // 3. if accepted by grid, insert as unconfirmed. - if let Err(BadAdvertisement) = state.candidates.insert_unconfirmed( + if let Err(BadAdvertisement) = candidates.insert_unconfirmed( peer.clone(), - manifest.candidate_hash, - manifest.relay_parent, - manifest.group_index, - Some((manifest.parent_head_data_hash, manifest.para_id)), + candidate_hash, + relay_parent, + manifest_summary.claimed_group_index, + Some((manifest_summary.claimed_parent_hash, para_id)), ) { report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; - return + return None; + } + + Some(ManifestImportSuccess { + relay_parent_state, + per_session, + acknowledge, + sender_index, + }) +} + +/// Produce a list of network messages to send to a peer, following acknowledgement of a manifest. +/// This notes the messages as sent within the grid state. +fn post_acknowledgement_statement_messages( + recipient: ValidatorIndex, + relay_parent: Hash, + grid_tracker: &mut GridTracker, + statement_store: &StatementStore, + groups: &Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, + local_knowledge: &StatementFilter, +) -> Vec { + let sending_filter = + match grid_tracker.pending_statements_for(recipient, candidate_hash, local_knowledge) { + None => return Vec::new(), + Some(f) => f, + }; + + let mut messages = Vec::new(); + for statement in + statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) + { + grid_tracker.sent_or_received_direct_statement( + groups, + statement.validator_index(), + recipient, + statement.payload(), + ); + + messages.push(Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ) + .into(), + )); } + messages +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_manifest( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + manifest: net_protocol::vstaging::BackedCandidateManifest, +) { + let x = match handle_incoming_manifest_common( + ctx, + peer.clone(), + &state.peers, + &mut state.per_relay_parent, + &state.per_session, + &mut state.candidates, + manifest.candidate_hash, + manifest.relay_parent, + manifest.para_id, + grid::ManifestSummary { + claimed_parent_hash: manifest.parent_head_data_hash, + claimed_group_index: manifest.group_index, + seconded_in_group: manifest.seconded_in_group, + validated_in_group: manifest.validated_in_group, + }, + grid::ManifestKind::Full, + ).await { + Some(x) => x, + None => return, + }; + + let ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index } = x; + + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + if acknowledge { // 4. if already confirmed & known within grid, acknowledge candidate let local_knowledge = { @@ -1612,7 +1711,7 @@ async fn handle_incoming_manifest( }; let msg = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -1623,7 +1722,7 @@ async fn handle_incoming_manifest( local_validator.grid_tracker.manifest_sent_to( sender_index, manifest.candidate_hash, - local_knowledge, + local_knowledge.clone(), ); let messages = post_acknowledgement_statement_messages( @@ -1653,58 +1752,96 @@ async fn handle_incoming_manifest( } } -/// Produce a list of network messages to send to a peer, following acknowledgement of a manifest. -/// This notes the messages as sent within the grid state. -fn post_acknowledgement_statement_messages( - recipient: ValidatorIndex, - relay_parent: Hash, - grid_tracker: &mut GridTracker, - statement_store: &StatementStore, - groups: &Groups, - group_index: GroupIndex, - candidate_hash: CandidateHash, - local_knowledge: &StatementFilter, -) -> Vec { - let sending_filter = - match grid_tracker.pending_statements_for(recipient, candidate_hash, local_knowledge) { - None => return Vec::new(), - Some(f) => f, - }; - - let mut messages = Vec::new(); - for statement in - statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) - { - grid_tracker.sent_or_received_direct_statement( - groups, - statement.validator_index(), - recipient, - statement.payload(), - ); - - messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - statement.as_unchecked().clone(), - ) - .into(), - )); - } - - messages -} - #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn handle_incoming_acknowledgement( ctx: &mut Context, state: &mut State, peer: PeerId, - manifest: net_protocol::vstaging::BackedCandidateAcknowledgement, + acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement, ) { - // TODO [now]: - // 1. sanity checks: relay-parent in state, para ID matches group index, - // 2. sanity checks: peer is validator, bitvec size, import into grid tracker - // 3. if accepted by grid, send follow-up statements. + // The key difference between acknowledgments and full manifests is that only + // the candidate hash is included alongside the bitfields, so the candidate + // must be confirmed for us to even process it. + + let candidate_hash = acknowledgement.candidate_hash; + let (relay_parent, parent_head_data_hash, group_index, para_id) = { + match state.candidates.get_confirmed(&candidate_hash) { + Some(c) => ( + c.relay_parent(), + c.parent_head_data_hash(), + c.group_index(), + c.para_id(), + ), + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE).await; + return; + } + } + }; + + let x = match handle_incoming_manifest_common( + ctx, + peer.clone(), + &state.peers, + &mut state.per_relay_parent, + &state.per_session, + &mut state.candidates, + candidate_hash, + relay_parent, + para_id, + grid::ManifestSummary { + claimed_parent_hash: parent_head_data_hash, + claimed_group_index: group_index, + seconded_in_group: acknowledgement.seconded_in_group, + validated_in_group: acknowledgement.validated_in_group, + }, + grid::ManifestKind::Acknowledgement, + ).await { + Some(x) => x, + None => return, + }; + + let ManifestImportSuccess { relay_parent_state, per_session, sender_index, .. } = x; + + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + + // if already confirmed & known within grid, follow up with direct statements + // the counterparty is not aware of. + let local_knowledge = { + let group_size = match per_session.groups.get(group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + let mut f = StatementFilter::new(group_size); + relay_parent_state.statement_store.fill_statement_filter( + group_index, + candidate_hash, + &mut f, + ); + f + }; + + let messages = post_acknowledgement_statement_messages( + sender_index, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + &local_knowledge, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( + messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), + )) + .await; + } } /// Handle a notification of a candidate being backed. From c8d11bedbad0dc5ed788c01c8a1e930a7df5caf6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:55:25 -0600 Subject: [PATCH 127/220] a little DRYing --- .../network/statement-distribution/src/lib.rs | 2 + .../src/vstaging/mod.rs | 53 +++++++++++-------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 829f3a8554a3..a307c85aad16 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -145,6 +145,8 @@ impl StatementDistributionSubsystem { ) .map_err(FatalError::SpawnTask)?; + // TODO [now]: handle vstaging req/res: dispatch pending statements & handling responses. + loop { let message = MuxedMessage::receive(&mut ctx, &mut v1_req_receiver, &mut v1_res_receiver).await; diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index dd37ffe9c34c..595598610b9f 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -366,7 +366,8 @@ pub(crate) async fn handle_active_leaves_update( None => return Ok(()), }; - for new_relay_parent in state.implicit_view.all_allowed_relay_parents() { + let new_relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); + for new_relay_parent in new_relay_parents.iter().cloned() { if state.per_relay_parent.contains_key(new_relay_parent) { continue } @@ -459,8 +460,6 @@ pub(crate) async fn handle_active_leaves_update( // TODO [now]: update peers which have the leaf in their view. // update their implicit view. send any messages accordingly. - // TODO [now]: determine which candidates are importable under the given - // active leaf new_leaf_fragment_tree_updates(ctx, state, leaf.hash).await; Ok(()) @@ -1210,6 +1209,21 @@ async fn send_backing_fresh_statements( } } +fn local_knowledge_filter( + group_size: usize, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_store: &StatementStore, +) -> StatementFilter { + let mut f = StatementFilter::new(group_size); + statement_store.fill_statement_filter( + group_index, + candidate_hash, + &mut f, + ); + f +} + // This provides a backable candidate to the grid and dispatches backable candidate announcements // and acknowledgements via the grid topology. If the session topology is not yet // available, this will be a no-op. @@ -1267,15 +1281,12 @@ async fn provide_candidate_to_grid( group_size, ); - let filter = { - let mut f = StatementFilter::new(group_size); - relay_parent_state.statement_store.fill_statement_filter( - group_index, - candidate_hash, - &mut f, - ); - f - }; + let filter = local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ); let manifest = protocol_vstaging::BackedCandidateManifest { relay_parent, @@ -1696,13 +1707,12 @@ async fn handle_incoming_manifest( Some(x) => x.len(), }; - let mut f = StatementFilter::new(group_size); - relay_parent_state.statement_store.fill_statement_filter( + local_knowledge_filter( + group_size, manifest.group_index, manifest.candidate_hash, - &mut f, - ); - f + &relay_parent_state.statement_store, + ) }; let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { candidate_hash: manifest.candidate_hash, @@ -1816,13 +1826,12 @@ async fn handle_incoming_acknowledgement( Some(x) => x.len(), }; - let mut f = StatementFilter::new(group_size); - relay_parent_state.statement_store.fill_statement_filter( + local_knowledge_filter( + group_size, group_index, candidate_hash, - &mut f, - ); - f + &relay_parent_state.statement_store, + ) }; let messages = post_acknowledgement_statement_messages( From cc1d05051ec5cd87edb56ad818dc086dd175a094 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:58:35 -0600 Subject: [PATCH 128/220] wire up network messages to handlers --- .../src/vstaging/mod.rs | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 595598610b9f..702e6cb37fb3 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -325,13 +325,29 @@ pub(crate) async fn handle_network_update( relay_parent, statement, ), - ) => {}, // TODO [now] + ) => handle_incoming_statement( + ctx, + state, + peer_id, + relay_parent, + statement, + ).await, net_protocol::StatementDistributionMessage::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), - ) => {}, // TODO [now] + ) => handle_incoming_manifest( + ctx, + state, + peer_id, + inner, + ).await, net_protocol::StatementDistributionMessage::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), - ) => {}, // TODO [now] + ) => handle_incoming_acknowledgement( + ctx, + state, + peer_id, + inner, + ).await, } }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { From 1380aa9c06786e75fa3a148b9807dedbe7c47c27 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 00:58:43 -0600 Subject: [PATCH 129/220] fmt --- .../src/vstaging/mod.rs | 92 +++++++------------ 1 file changed, 31 insertions(+), 61 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 702e6cb37fb3..a674d623d6a7 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -314,41 +314,20 @@ pub(crate) async fn handle_network_update( // in practice, this is a small issue & the API of receiving topologies could // be altered to fix it altogether. }, - NetworkBridgeEvent::PeerMessage(peer_id, message) => { - match message { - net_protocol::StatementDistributionMessage::V1(_) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), - ) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - statement, - ), - ) => handle_incoming_statement( - ctx, - state, - peer_id, - relay_parent, - statement, - ).await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), - ) => handle_incoming_manifest( - ctx, - state, - peer_id, - inner, - ).await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), - ) => handle_incoming_acknowledgement( - ctx, - state, - peer_id, - inner, - ).await, - } + NetworkBridgeEvent::PeerMessage(peer_id, message) => match message { + net_protocol::StatementDistributionMessage::V1(_) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + ) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement).await, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + ) => handle_incoming_manifest(ctx, state, peer_id, inner).await, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { // TODO [now] update explicit and implicit views @@ -1232,11 +1211,7 @@ fn local_knowledge_filter( statement_store: &StatementStore, ) -> StatementFilter { let mut f = StatementFilter::new(group_size); - statement_store.fill_statement_filter( - group_index, - candidate_hash, - &mut f, - ); + statement_store.fill_statement_filter(group_index, candidate_hash, &mut f); f } @@ -1536,7 +1511,7 @@ async fn handle_incoming_manifest_common<'a, Context>( let relay_parent_state = match per_relay_parent.get_mut(&relay_parent) { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; - return None; + return None }, Some(s) => s, }; @@ -1562,7 +1537,7 @@ async fn handle_incoming_manifest_common<'a, Context>( if expected_group != Some(manifest_summary.claimed_group_index) { report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; - return None; + return None } let grid_topology = match per_session.grid_view.as_ref() { @@ -1580,7 +1555,7 @@ async fn handle_incoming_manifest_common<'a, Context>( let sender_index = match sender_index { None => { report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; - return None; + return None }, Some(s) => s, }; @@ -1627,15 +1602,10 @@ async fn handle_incoming_manifest_common<'a, Context>( Some((manifest_summary.claimed_parent_hash, para_id)), ) { report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; - return None; + return None } - Some(ManifestImportSuccess { - relay_parent_state, - per_session, - acknowledge, - sender_index, - }) + Some(ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index }) } /// Produce a list of network messages to send to a peer, following acknowledgement of a manifest. @@ -1703,7 +1673,9 @@ async fn handle_incoming_manifest( validated_in_group: manifest.validated_in_group, }, grid::ManifestKind::Full, - ).await { + ) + .await + { Some(x) => x, None => return, }; @@ -1792,16 +1764,12 @@ async fn handle_incoming_acknowledgement( let candidate_hash = acknowledgement.candidate_hash; let (relay_parent, parent_head_data_hash, group_index, para_id) = { match state.candidates.get_confirmed(&candidate_hash) { - Some(c) => ( - c.relay_parent(), - c.parent_head_data_hash(), - c.group_index(), - c.para_id(), - ), + Some(c) => (c.relay_parent(), c.parent_head_data_hash(), c.group_index(), c.para_id()), None => { - report_peer(ctx.sender(), peer, COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE).await; - return; - } + report_peer(ctx.sender(), peer, COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE) + .await; + return + }, } }; @@ -1822,7 +1790,9 @@ async fn handle_incoming_acknowledgement( validated_in_group: acknowledgement.validated_in_group, }, grid::ManifestKind::Acknowledgement, - ).await { + ) + .await + { Some(x) => x, None => return, }; From 3d7af9672ff433a8cf6daf4ff6bb2e9a477d054f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 01:11:50 -0600 Subject: [PATCH 130/220] some skeleton code for peer view update handling --- .../src/vstaging/mod.rs | 48 +++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index a674d623d6a7..65c62a672f66 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -226,6 +226,28 @@ struct PeerState { } impl PeerState { + // Update the view, returning a vector of implicit relay-parents which weren't previously + // part of the view. + fn update_view(&mut self, new_view: View, local_implicit: &ImplicitView) -> Vec { + let next_implicit = new_view + .iter() + .flat_map(|x| local_implicit.known_allowed_relay_parents_under(x, None)) + .flat_map(|x| x) + .cloned() + .collect::>(); + + let fresh_implicit = next_implicit + .iter() + .filter(|x| !self.implicit_view.contains(x)) + .cloned() + .collect(); + + self.view = new_view; + self.implicit_view = next_implicit; + + fresh_implicit + } + // Whether we know that a peer knows a relay-parent. // The peer knows the relay-parent if it is either implicit or explicit // in their view. However, if it is implicit via an active-leaf we don't @@ -329,9 +351,8 @@ pub(crate) async fn handle_network_update( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner).await, }, - NetworkBridgeEvent::PeerViewChange(peer_id, view) => { - // TODO [now] update explicit and implicit views - }, + NetworkBridgeEvent::PeerViewChange(peer_id, view) => + handle_peer_view_update(ctx, state, peer_id, view).await, NetworkBridgeEvent::OurViewChange(_view) => { // handled by `handle_activated_leaf` }, @@ -519,6 +540,27 @@ fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { state.per_session.retain(|s, _| sessions.contains(s)); } +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_peer_view_update( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + new_view: View, +) { + let fresh_implicit = { + let peer_data = match state.peers.get_mut(&peer) { + None => return, + Some(p) => p, + }; + + peer_data.update_view(new_view, &state.implicit_view) + }; + + for new_relay_parent in fresh_implicit { + // TODO [now]: send all direct statements, manifests, or acknowledgements they need. + } +} + // Imports a locally originating statement and distributes it to peers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn share_local_statement( From badfe68753a86a66330566246e378434c7329d6b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 24 Jan 2023 01:28:38 -0600 Subject: [PATCH 131/220] more peer view skeleton stuff --- .../src/vstaging/mod.rs | 70 ++++++++++++++++--- 1 file changed, 59 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 65c62a672f66..e3215822877e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -248,13 +248,29 @@ impl PeerState { fresh_implicit } + // Attempt to reconcile the view with new information about the implicit relay parents + // under an active leaf. + fn reconcile_active_leaf(&mut self, leaf_hash: Hash, implicit: &[Hash]) -> Vec { + if !self.view.contains(&leaf_hash) { + return Vec::new() + } + + let mut v = Vec::with_capacity(implicit.len()); + for i in implicit { + if self.implicit_view.insert(*i) { + v.push(*i); + } + } + v + } + // Whether we know that a peer knows a relay-parent. // The peer knows the relay-parent if it is either implicit or explicit // in their view. However, if it is implicit via an active-leaf we don't // recognize, we will not accurately be able to recognize them as 'knowing' // the relay-parent. fn knows_relay_parent(&self, relay_parent: &Hash) -> bool { - self.implicit_view.contains(relay_parent) + self.implicit_view.contains(relay_parent) || self.view.contains(relay_parent) } fn is_authority(&self, authority_id: &AuthorityDiscoveryId) -> bool { @@ -382,16 +398,17 @@ pub(crate) async fn handle_active_leaves_update( None => return Ok(()), }; - let new_relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); + let new_relay_parents = + state.implicit_view.all_allowed_relay_parents().cloned().collect::>(); for new_relay_parent in new_relay_parents.iter().cloned() { - if state.per_relay_parent.contains_key(new_relay_parent) { + if state.per_relay_parent.contains_key(&new_relay_parent) { continue } // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. let session_index = polkadot_node_subsystem_util::request_session_index_for_child( - *new_relay_parent, + new_relay_parent, ctx.sender(), ) .await @@ -400,7 +417,7 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::FetchSessionIndex)?; let availability_cores = polkadot_node_subsystem_util::request_availability_cores( - *new_relay_parent, + new_relay_parent, ctx.sender(), ) .await @@ -409,7 +426,7 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::FetchAvailabilityCores)?; let group_rotation_info = - polkadot_node_subsystem_util::request_validator_groups(*new_relay_parent, ctx.sender()) + polkadot_node_subsystem_util::request_validator_groups(new_relay_parent, ctx.sender()) .await .await .map_err(JfyiError::RuntimeApiUnavailable)? @@ -418,7 +435,7 @@ pub(crate) async fn handle_active_leaves_update( if !state.per_session.contains_key(&session_index) { let session_info = polkadot_node_subsystem_util::request_session_info( - *new_relay_parent, + new_relay_parent, session_index, ctx.sender(), ) @@ -461,7 +478,7 @@ pub(crate) async fn handle_active_leaves_update( }); state.per_relay_parent.insert( - *new_relay_parent, + new_relay_parent, PerRelayParentState { validator_state: HashMap::new(), local_validator, @@ -473,8 +490,25 @@ pub(crate) async fn handle_active_leaves_update( ); } - // TODO [now]: update peers which have the leaf in their view. - // update their implicit view. send any messages accordingly. + // Reconcile all peers' views with the active leaf and any relay parents + // it implies. If they learned about the block before we did, this reconciliation will give non-empty + // results and we should send them messages concerning all activated relay-parents. + { + let mut update_peers = Vec::new(); + for (peer, peer_state) in state.peers.iter_mut() { + let fresh = peer_state.reconcile_active_leaf(leaf.hash, &new_relay_parents); + if !fresh.is_empty() { + update_peers.push((peer.clone(), fresh)); + } + } + + for (peer, fresh) in update_peers { + for fresh_relay_parent in fresh { + send_peer_messages_for_relay_parent(ctx, state, peer.clone(), fresh_relay_parent) + .await; + } + } + } new_leaf_fragment_tree_updates(ctx, state, leaf.hash).await; @@ -557,10 +591,24 @@ async fn handle_peer_view_update( }; for new_relay_parent in fresh_implicit { - // TODO [now]: send all direct statements, manifests, or acknowledgements they need. + send_peer_messages_for_relay_parent(ctx, state, peer.clone(), new_relay_parent).await; } } +/// Send a peer, apparently just becoming aware of a relay-parent, all messages +/// concerning that relay-parent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_peer_messages_for_relay_parent( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + relay_parent: Hash, +) { + // TODO [now] determine validator index + // send cluster statements + // send grid manifests & acknowledgements +} + // Imports a locally originating statement and distributes it to peers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn share_local_statement( From 1e2848cc41d6667a7c130e006c1cac6838c32f08 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 25 Jan 2023 17:02:07 +0100 Subject: [PATCH 132/220] Fix async backing statement distribution tests (#6621) * Fix compile errors in tests * Cargo fmt --- .../src/vstaging/grid.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index a316d2b2ea51..a44ac3a53f42 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1117,7 +1117,7 @@ mod tests { }; let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), &[ AuthorityDiscoveryPair::generate().0.public(), AuthorityDiscoveryPair::generate().0.public(), @@ -1143,6 +1143,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], }, + ManifestKind::Full, ValidatorIndex(1), ), Err(ManifestImportError::Disallowed) @@ -1162,6 +1163,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Disallowed) @@ -1184,7 +1186,7 @@ mod tests { }; let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), &[ AuthorityDiscoveryPair::generate().0.public(), AuthorityDiscoveryPair::generate().0.public(), @@ -1208,6 +1210,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0, 1], validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Malformed) @@ -1225,6 +1228,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1, 0], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Malformed) @@ -1247,7 +1251,7 @@ mod tests { }; let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), &[ AuthorityDiscoveryPair::generate().0.public(), AuthorityDiscoveryPair::generate().0.public(), @@ -1271,6 +1275,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Malformed) @@ -1293,7 +1298,7 @@ mod tests { }; let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]], + vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), &[ AuthorityDiscoveryPair::generate().0.public(), AuthorityDiscoveryPair::generate().0.public(), @@ -1319,6 +1324,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Malformed) @@ -1338,6 +1344,7 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], }, + ManifestKind::Full, ValidatorIndex(0), ), Err(ManifestImportError::Malformed) @@ -1357,9 +1364,10 @@ mod tests { seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], }, + ManifestKind::Full, ValidatorIndex(0), ), - Ok(()) + Ok(None) ); } From 374cd2ca05d00039c224bcbf6eb36b2730e654b4 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 30 Jan 2023 16:47:04 +0100 Subject: [PATCH 133/220] Resolve some todos in async backing statement-distribution branch (#6482) * Implement `remove_by_relay_parent` * Extract `minimum_votes` to shared primitives. * Add `can_send_statements_received_with_prejudice` test * Fix test * Update docstrings * Cargo fmt * Fix compile error * Fix compile errors in tests * Cargo fmt * Add module docs; write `test_priority_ordering` (first draft) * Fix `test_priority_ordering` * Move `insert_or_update_priority`: `Drop` -> `set_cluster_priority` * Address review comments * Remove `Entry::get_mut` --- node/core/backing/src/lib.rs | 12 +- .../src/vstaging/cluster.rs | 53 +++++- .../src/vstaging/groups.rs | 3 +- .../src/vstaging/mod.rs | 19 +- .../src/vstaging/requests.rs | 175 +++++++++++++++--- node/primitives/src/lib.rs | 7 + runtime/parachains/src/inclusion/mod.rs | 3 +- 7 files changed, 221 insertions(+), 51 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index e6be86f2b9a4..2ec9aa651564 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -80,8 +80,8 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, StatementWithPVD, - ValidationResult, BACKING_EXECUTION_TIMEOUT, + minimum_votes, AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, + StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ messages::{ @@ -374,14 +374,6 @@ struct AttestingData { backing: Vec, } -/// How many votes we need to consider a candidate backed. -/// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module and -/// statement distribution. -fn minimum_votes(n_validators: usize) -> usize { - std::cmp::min(2, n_validators) -} - #[derive(Default)] struct TableContext { validator: Option, diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index 9c17e3e17e14..c79ebf08b663 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -172,6 +172,7 @@ impl ClusterTracker { } /// Note that we accepted an incoming statement. This updates internal structures. + /// /// Should only be called after a successful `can_receive` call. pub fn note_received( &mut self, @@ -748,5 +749,55 @@ mod tests { ); } - // TODO [now] ensure statements received with prejudice don't prevent sending later + // Ensure statements received with prejudice don't prevent sending later. + #[test] + fn can_send_statements_received_with_prejudice() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::WithPrejudice), + ); + + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(()), + ); + } } diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index 94f6c1e219b0..e03fa63bc3f1 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -16,6 +16,7 @@ //! A utility for tracking groups and their members within a session. +use polkadot_node_primitives::minimum_votes; use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, IndexedVec, ValidatorIndex}; use std::collections::HashMap; @@ -68,7 +69,7 @@ impl Groups { &self, group_index: GroupIndex, ) -> Option<(usize, usize)> { - self.get(group_index).map(|g| (g.len(), super::minimum_votes(g.len()))) + self.get(group_index).map(|g| (g.len(), minimum_votes(g.len()))) } /// Get the group index for a validator by index. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index e3215822877e..60c9b99ea877 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -75,7 +75,7 @@ mod statement_store; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = - Rep::CostMinor("Unexpected Statement, missing knowlege for relay parent"); + Rep::CostMinor("Unexpected Statement, missing knowledge for relay parent"); const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = Rep::CostMinor("Unexpected Statement, unknown candidate"); const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = @@ -202,8 +202,8 @@ pub(crate) struct State { request_manager: RequestManager, } -// For the provided validator index, if there is a connected peer -// controlling the given authority ID, +// For the provided validator index, if there is a connected peer controlling the given authority +// ID, then return that peer's `PeerId`. fn connected_validator_peer( authorities: &HashMap, per_session: &PerSessionState, @@ -278,14 +278,6 @@ impl PeerState { } } -/// How many votes we need to consider a candidate backed. -/// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. -// TODO [now]: extract to shared primitives -fn minimum_votes(n_validators: usize) -> usize { - std::cmp::min(2, n_validators) -} - #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn handle_network_update( ctx: &mut Context, @@ -1118,11 +1110,11 @@ async fn handle_incoming_statement( .request_manager .get_or_insert(relay_parent, candidate_hash, originator_group); - request_entry.get_mut().add_peer(peer); + request_entry.add_peer(peer); // We only successfully accept statements from the grid on confirmed // candidates, therefore this check only passes if the statement is from the cluster - request_entry.get_mut().set_cluster_priority(); + request_entry.set_cluster_priority(); } let was_fresh = match per_relay_parent.statement_store.insert( @@ -1835,7 +1827,6 @@ async fn handle_incoming_manifest( state .request_manager .get_or_insert(manifest.relay_parent, manifest.candidate_hash, manifest.group_index) - .get_mut() .add_peer(peer); } } diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 096b78380200..df55e9afc4f6 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -13,7 +13,18 @@ //! A requester for full information on candidates. //! -// TODO [now]: some module docs. +//! 1. We use `RequestManager::get_or_insert().get_mut()` to add and mutate [`RequestedCandidate`]s, either setting the +//! priority or adding a peer we know has the candidate. We currently prioritize "cluster" candidates (those from our +//! own group, although the cluster mechanism could be made to include multiple groups in the future) over "grid" +//! candidates (those from other groups). +//! +//! 2. The main loop of the module will invoke [`RequestManager::next_request`] in a loop until it returns `None`, +//! dispatching all requests with the `NetworkBridgeTxMessage`. The receiving half of the channel is owned by the +//! [`RequestManager`]. +//! +//! 3. The main loop of the module will also select over [`RequestManager::await_incoming`] to receive +//! [`UnhandledResponse`]s, which it then validates using [`UnhandledResponse::validate_response`] (which requires state +//! not owned by the request manager). use super::{ BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, @@ -78,20 +89,6 @@ pub struct RequestedCandidate { in_flight: bool, } -impl RequestedCandidate { - /// Add a peer to the set of known peers. - pub fn add_peer(&mut self, peer: PeerId) { - if !self.known_by.contains(&peer) { - self.known_by.push_back(peer); - } - } - - /// Note that the candidate is required for the cluster. - pub fn set_cluster_priority(&mut self) { - self.priority.origin = Origin::Cluster; - } -} - #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] enum Origin { Cluster = 0, @@ -113,14 +110,17 @@ pub struct Entry<'a> { } impl<'a> Entry<'a> { - /// Access the underlying requested candidate. - pub fn get_mut(&mut self) -> &mut RequestedCandidate { - &mut self.requested + /// Add a peer to the set of known peers. + pub fn add_peer(&mut self, peer: PeerId) { + if !self.requested.known_by.contains(&peer) { + self.requested.known_by.push_back(peer); + } } -} -impl<'a> Drop for Entry<'a> { - fn drop(&mut self) { + /// Note that the candidate is required for the cluster. + pub fn set_cluster_priority(&mut self) { + self.requested.priority.origin = Origin::Cluster; + insert_or_update_priority( &mut *self.by_priority, Some(self.prev_index), @@ -213,7 +213,35 @@ impl RequestManager { } } - // TODO [now]: removal based on relay-parent. + /// Remove based on relay-parent. + pub fn remove_by_relay_parent(&mut self, relay_parent: Hash) { + let mut candidate_hashes = HashSet::new(); + + // Remove from `by_priority` and `requests`. + self.by_priority.retain(|(_priority, id)| { + let retain = relay_parent != id.relay_parent; + if !retain { + self.requests.remove(id); + candidate_hashes.insert(id.candidate_hash); + } + retain + }); + + // Remove from `unique_identifiers`. + for candidate_hash in candidate_hashes { + match self.unique_identifiers.entry(candidate_hash) { + HEntry::Occupied(mut entry) => { + entry.get_mut().retain(|id| relay_parent != id.relay_parent); + if entry.get().is_empty() { + entry.remove(); + } + }, + // We can expect to encounter vacant entries, but only if nodes are misbehaving and + // we don't use a deduplicating collection; there are no issues from ignoring it. + HEntry::Vacant(entry) => (), + } + } + } /// Yields the next request to dispatch, if there is any. /// @@ -622,5 +650,106 @@ fn insert_or_update_priority( mod tests { use super::*; - // TODO [now]: test priority ordering. + #[test] + fn test_remove_by_relay_parent() { + let parent_a = Hash::from_low_u64_le(1); + let parent_b = Hash::from_low_u64_le(2); + let parent_c = Hash::from_low_u64_le(3); + + let candidate_a1 = CandidateHash(Hash::from_low_u64_le(11)); + let candidate_a2 = CandidateHash(Hash::from_low_u64_le(12)); + let candidate_b1 = CandidateHash(Hash::from_low_u64_le(21)); + let candidate_b2 = CandidateHash(Hash::from_low_u64_le(22)); + let candidate_c1 = CandidateHash(Hash::from_low_u64_le(31)); + let duplicate_hash = CandidateHash(Hash::from_low_u64_le(31)); + + let mut request_manager = RequestManager::new(); + request_manager.get_or_insert(parent_a, candidate_a1, 1.into()); + request_manager.get_or_insert(parent_a, candidate_a2, 1.into()); + request_manager.get_or_insert(parent_b, candidate_b1, 1.into()); + request_manager.get_or_insert(parent_b, candidate_b2, 2.into()); + request_manager.get_or_insert(parent_c, candidate_c1, 2.into()); + request_manager.get_or_insert(parent_a, duplicate_hash, 1.into()); + + assert_eq!(request_manager.requests.len(), 6); + assert_eq!(request_manager.by_priority.len(), 6); + assert_eq!(request_manager.unique_identifiers.len(), 5); + + request_manager.remove_by_relay_parent(parent_a); + + assert_eq!(request_manager.requests.len(), 3); + assert_eq!(request_manager.by_priority.len(), 3); + assert_eq!(request_manager.unique_identifiers.len(), 3); + + assert!(!request_manager.unique_identifiers.contains_key(&candidate_a1)); + assert!(!request_manager.unique_identifiers.contains_key(&candidate_a2)); + // Duplicate hash should still be there (under a different parent). + assert!(request_manager.unique_identifiers.contains_key(&duplicate_hash)); + + request_manager.remove_by_relay_parent(parent_b); + + assert_eq!(request_manager.requests.len(), 1); + assert_eq!(request_manager.by_priority.len(), 1); + assert_eq!(request_manager.unique_identifiers.len(), 1); + + assert!(!request_manager.unique_identifiers.contains_key(&candidate_b1)); + assert!(!request_manager.unique_identifiers.contains_key(&candidate_b2)); + + request_manager.remove_by_relay_parent(parent_c); + + assert!(request_manager.requests.is_empty()); + assert!(request_manager.by_priority.is_empty()); + assert!(request_manager.unique_identifiers.is_empty()); + } + + #[test] + fn test_priority_ordering() { + let parent_a = Hash::from_low_u64_le(1); + let parent_b = Hash::from_low_u64_le(2); + let parent_c = Hash::from_low_u64_le(3); + + let candidate_a1 = CandidateHash(Hash::from_low_u64_le(11)); + let candidate_a2 = CandidateHash(Hash::from_low_u64_le(12)); + let candidate_b1 = CandidateHash(Hash::from_low_u64_le(21)); + let candidate_b2 = CandidateHash(Hash::from_low_u64_le(22)); + let candidate_c1 = CandidateHash(Hash::from_low_u64_le(31)); + + let mut request_manager = RequestManager::new(); + + // Add some entries, set a couple of them to cluster (high) priority. + let identifier_a1 = request_manager + .get_or_insert(parent_a, candidate_a1, 1.into()) + .identifier + .clone(); + let identifier_a2 = { + let mut entry = request_manager.get_or_insert(parent_a, candidate_a2, 1.into()); + entry.set_cluster_priority(); + entry.identifier.clone() + }; + let identifier_b1 = request_manager + .get_or_insert(parent_b, candidate_b1, 1.into()) + .identifier + .clone(); + let identifier_b2 = request_manager + .get_or_insert(parent_b, candidate_b2, 2.into()) + .identifier + .clone(); + let identifier_c1 = { + let mut entry = request_manager.get_or_insert(parent_c, candidate_c1, 2.into()); + entry.set_cluster_priority(); + entry.identifier.clone() + }; + + let attempts = 0; + assert_eq!( + request_manager.by_priority, + vec![ + (Priority { origin: Origin::Cluster, attempts }, identifier_a2), + (Priority { origin: Origin::Cluster, attempts }, identifier_c1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_a1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_b1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_b2), + ] + ); + } } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index dcbb509b298c..ca98a32b7cb0 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -623,3 +623,10 @@ pub fn maybe_compress_pov(pov: PoV) -> PoV { let pov = PoV { block_data: BlockData(raw) }; pov } + +/// How many votes we need to consider a candidate backed. +/// +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +pub fn minimum_votes(n_validators: usize) -> usize { + std::cmp::min(2, n_validators) +} diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index 2c7435cad1db..95ad1da2599d 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -172,8 +172,7 @@ impl Default for ProcessedCandidates { /// Number of backing votes we need for a valid backing. /// -/// WARNING: This check has to be kept in sync with the node side check in the backing -/// subsystem. +/// WARNING: This check has to be kept in sync with the node side checks. pub fn minimum_backing_votes(n_validators: usize) -> usize { // For considerations on this value see: // https://github.com/paritytech/polkadot/pull/1656#issuecomment-999734650 From 23bc08486c7bff48290d566861aae69ffeaaa9de Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 30 Jan 2023 16:14:34 -0600 Subject: [PATCH 134/220] fix test compilation --- node/network/statement-distribution/src/vstaging/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index a44ac3a53f42..d38b38b3fdb5 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1367,7 +1367,7 @@ mod tests { ManifestKind::Full, ValidatorIndex(0), ), - Ok(None) + Ok(false) ); } From d6a2111c91f06813cd29f5dec3e91958cd67717f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 30 Jan 2023 16:15:40 -0600 Subject: [PATCH 135/220] add a TODO for a test --- node/network/statement-distribution/src/vstaging/grid.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index d38b38b3fdb5..1f9c0b19d8ad 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1377,4 +1377,5 @@ mod tests { // It should also overwrite any existing `Full` ManifestKind // TODO [now]: check that pending communication is cleared correctly in `manifest_sent_to` -} + + // TODO [now]: test a scenario where manifest import returns `Ok(true)`. From e037eaaddebaf96362c7e77ae824737ed8a96d59 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 30 Jan 2023 16:20:35 -0600 Subject: [PATCH 136/220] clean up a couple of TODOs --- node/network/statement-distribution/src/vstaging/grid.rs | 1 + node/network/statement-distribution/src/vstaging/mod.rs | 8 +++++--- .../statement-distribution/src/vstaging/requests.rs | 7 +++++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 1f9c0b19d8ad..0f4be2d8f176 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1379,3 +1379,4 @@ mod tests { // TODO [now]: check that pending communication is cleared correctly in `manifest_sent_to` // TODO [now]: test a scenario where manifest import returns `Ok(true)`. +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 60c9b99ea877..9aab43206180 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -219,8 +219,6 @@ fn connected_validator_peer( struct PeerState { view: View, - // TODO [now]: actually keep track of remote implicit views - // in a smooth manner implicit_view: HashSet, discovery_ids: Option>, } @@ -558,7 +556,11 @@ fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // clean up per-relay-parent data based on everything removed. state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); - // TODO [now]: clean up requests + // Clean up all requests + for leaf in leaves { + state.request_manager.remove_by_relay_parent(*leaf); + } + state.candidates.on_deactivate_leaves(&leaves, |h| relay_parents.contains(h)); // clean up sessions based on everything remaining. diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index df55e9afc4f6..88fe0eafe26d 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -370,6 +370,9 @@ impl<'a> UnhandledResponse<'a> { // this could happen if we requested the candidate under two // different identifiers at the same time, and received a valid // response on the other. + // + // it could also happen in the case that we had a request in-flight + // and the request entry was garbage-collected on outdated relay parent. let entry = match manager.requests.get_mut(&identifier) { None => return ResponseValidationOutput { @@ -752,4 +755,8 @@ mod tests { ] ); } + + // TODO [now]: test that outdated responses are handled correctly. + + // TODO [now]: test clean up by relay parent. } From bf0c063436cee2c79f2308a22e8ca880008ea109 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 31 Jan 2023 15:32:06 -0600 Subject: [PATCH 137/220] implement sending pending cluster statements --- .../src/vstaging/cluster.rs | 70 +++++++++++- .../src/vstaging/mod.rs | 102 +++++++++++++++++- 2 files changed, 164 insertions(+), 8 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index c79ebf08b663..e94d2e8662a4 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -99,6 +99,9 @@ pub struct ClusterTracker { validators: Vec, seconding_limit: usize, knowledge: HashMap>, + // Statements known locally which haven't been sent to particular validators. + // maps target validator to (originator, statement) pairs. + pending: HashMap>, } impl ClusterTracker { @@ -111,6 +114,7 @@ impl ClusterTracker { validators: cluster_validators, seconding_limit, knowledge: HashMap::new(), + pending: HashMap::new(), }) } @@ -180,6 +184,21 @@ impl ClusterTracker { originator: ValidatorIndex, statement: CompactStatement, ) { + for cluster_member in &self.validators { + if cluster_member == &sender { + if let Some(pending) = self.pending.get_mut(&sender) { + pending.remove(&(originator, statement.clone())); + } + } else if !self.they_know_statement(sender, originator, statement.clone()) { + // add the statement to pending knowledge for all peers + // which don't know the statement. + self.pending + .entry(*cluster_member) + .or_default() + .insert((originator, statement.clone())); + } + } + { let mut sender_knowledge = self.knowledge.entry(sender).or_default(); sender_knowledge.insert(TaggedKnowledge::IncomingP2P(Knowledge::Specific( @@ -214,11 +233,7 @@ impl ClusterTracker { return Err(RejectOutgoing::NotInGroup) } - if self.we_sent(target, Knowledge::Specific(statement.clone(), originator)) { - return Err(RejectOutgoing::Known) - } - - if self.they_sent(target, Knowledge::Specific(statement.clone(), originator)) { + if self.they_know_statement(target, originator, statement.clone()) { return Err(RejectOutgoing::Known) } @@ -267,6 +282,10 @@ impl ClusterTracker { let mut originator_knowledge = self.knowledge.entry(originator).or_default(); originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); } + + if let Some(pending) = self.pending.get_mut(&target) { + pending.remove(&(originator, statement)); + } } /// Get all targets as validator-indices. This doesn't attempt to filter @@ -314,6 +333,28 @@ impl ClusterTracker { None } + /// Returns a Vec of pending statements to be sent to a particular validator + /// index. `Seconded` statements are sorted to the front of the vector. + /// + /// Pending statements have the form (originator, compact statement). + pub fn pending_statements_for( + &self, + target: ValidatorIndex, + ) -> Vec<(ValidatorIndex, CompactStatement)> { + let mut v = self + .pending + .get(&target) + .map(|x| x.iter().cloned().collect::>()) + .unwrap_or_default(); + + v.sort_by_key(|(_, s)| match s { + CompactStatement::Seconded(_) => 0u8, + CompactStatement::Valid(_) => 1u8, + }); + + v + } + // returns true if it's legal to accept a new `Seconded` message from this validator. // This is either // 1. because we've already accepted it. @@ -339,6 +380,16 @@ impl ClusterTracker { seconded_other_candidates < self.seconding_limit } + fn they_know_statement( + &self, + validator: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) -> bool { + let knowledge = Knowledge::Specific(statement, originator); + self.we_sent(validator, knowledge.clone()) || self.they_sent(validator, knowledge) + } + fn they_sent(&self, validator: ValidatorIndex, knowledge: Knowledge) -> bool { self.knowledge .get(&validator) @@ -800,4 +851,13 @@ mod tests { Ok(()), ); } + + // TODO [now]: test that `pending_statements` are set whenever we receive + // a fresh statement. + + // TODO [now]: test the `pending_statements` are updated when we send or receive + // statements from others in the cluster. + + // TODO [now]: test that pending statements are sorted, with `Seconded` statements + // in the front. } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 9aab43206180..431b6c7b9c06 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -274,6 +274,12 @@ impl PeerState { fn is_authority(&self, authority_id: &AuthorityDiscoveryId) -> bool { self.discovery_ids.as_ref().map_or(false, |x| x.contains(authority_id)) } + + fn iter_known_discovery_ids<'a>( + &'a self, + ) -> impl Iterator + 'a { + self.discovery_ids.as_ref().into_iter().flat_map(|inner| inner) + } } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -395,6 +401,9 @@ pub(crate) async fn handle_active_leaves_update( continue } + // TODO [now]: request prospective parachains mode, skip disabled relay-parents + // (there should not be any) and set `seconding_limit = max_candidate_depth`. + // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. let session_index = polkadot_node_subsystem_util::request_session_index_for_child( @@ -589,8 +598,27 @@ async fn handle_peer_view_update( } } +// Returns an iterator over known validator indices, given an iterator over discovery IDs +// and a mapping from discovery IDs to validator indices. +fn find_validator_ids<'a>( + known_discovery_ids: impl IntoIterator, + discovery_mapping: impl Fn(&AuthorityDiscoveryId) -> Option<&'a ValidatorIndex>, +) -> impl IntoIterator { + known_discovery_ids.into_iter().filter_map(discovery_mapping).cloned() +} + /// Send a peer, apparently just becoming aware of a relay-parent, all messages /// concerning that relay-parent. +/// +/// In particular, we send all statements pertaining to our common cluster, +/// as well as all manifests, acknowledgements, or other grid statements. +/// +/// Note that due to the way we handle views, our knowledge of peers' relay parents +/// may "oscillate" with relay parents repeatedly leaving and entering the +/// view of a peer based on the implicit view of active leaves. +/// +/// This function is designed to be cheap and not to send duplicate messages in repeated +/// cases. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn send_peer_messages_for_relay_parent( ctx: &mut Context, @@ -598,9 +626,77 @@ async fn send_peer_messages_for_relay_parent( peer: PeerId, relay_parent: Hash, ) { - // TODO [now] determine validator index - // send cluster statements - // send grid manifests & acknowledgements + let peer_data = match state.peers.get_mut(&peer) { + None => return, + Some(p) => p, + }; + + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session_state = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let local_validator_state = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(s) => s, + }; + + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session_state.authority_lookup.get(a) + }) { + send_pending_cluster_statements( + ctx, + relay_parent, + &peer, + validator_id, + &mut local_validator_state.cluster_tracker, + &relay_parent_state.statement_store, + ) + .await; + + // TODO [now]: grid + } +} + +/// Send a peer all pending cluster statements for a relay parent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_pending_cluster_statements( + ctx: &mut Context, + relay_parent: Hash, + peer_id: &PeerId, + peer_validator_id: ValidatorIndex, + cluster_tracker: &mut ClusterTracker, + statement_store: &StatementStore, +) { + let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); + if pending_statements.is_empty() { + return + } + + let network_messages = pending_statements + .into_iter() + .filter_map(|(originator, compact)| { + let res = statement_store.validator_statement(originator, compact.clone()); + if res.is_some() { + cluster_tracker.note_sent(peer_validator_id, originator, compact); + } + + res + }) + .map(|s| s.as_unchecked().clone()) + .map(|signed| { + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + }) + .map(|msg| (vec![peer_id.clone()], Versioned::VStaging(msg).into())) + .collect::>(); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) + .await; } // Imports a locally originating statement and distributes it to peers. From 9ca8c80da98d324b93784dabcd7cb4a133f98892 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 31 Jan 2023 16:16:27 -0600 Subject: [PATCH 138/220] refactor utility function for sending acknowledgement and statements --- .../src/vstaging/mod.rs | 127 +++++++++++------- 1 file changed, 75 insertions(+), 52 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 431b6c7b9c06..34dfa80a1e7a 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1862,64 +1862,18 @@ async fn handle_incoming_manifest( let ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index } = x; - let local_validator = match relay_parent_state.local_validator.as_mut() { - None => return, - Some(l) => l, - }; - if acknowledge { // 4. if already confirmed & known within grid, acknowledge candidate - let local_knowledge = { - let group_size = match per_session.groups.get(manifest.group_index) { - None => return, // sanity - Some(x) => x.len(), - }; - - local_knowledge_filter( - group_size, - manifest.group_index, - manifest.candidate_hash, - &relay_parent_state.statement_store, - ) - }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { - candidate_hash: manifest.candidate_hash, - seconded_in_group: local_knowledge.seconded_in_group.clone(), - validated_in_group: local_knowledge.validated_in_group.clone(), - }; - - let msg = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), - ); - - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - msg.into(), - )) - .await; - local_validator.grid_tracker.manifest_sent_to( - sender_index, - manifest.candidate_hash, - local_knowledge.clone(), - ); - - let messages = post_acknowledgement_statement_messages( + send_acknowledgement_and_statements( + ctx, + peer, sender_index, + per_session, + relay_parent_state, manifest.relay_parent, - &mut local_validator.grid_tracker, - &relay_parent_state.statement_store, - &per_session.groups, manifest.group_index, manifest.candidate_hash, - &local_knowledge, - ); - - if !messages.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( - messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), - )) - .await; - } + ).await; } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry state @@ -1929,6 +1883,75 @@ async fn handle_incoming_manifest( } } +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_acknowledgement_and_statements( + ctx: &mut Context, + peer: PeerId, + validator_index: ValidatorIndex, + per_session: &PerSessionState, + relay_parent_state: &mut PerRelayParentState, + relay_parent: Hash, + group_index: GroupIndex, + candidate_hash: CandidateHash, +) { + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + + let local_knowledge = { + let group_size = match per_session.groups.get(group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ) + }; + let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + candidate_hash: candidate_hash, + seconded_in_group: local_knowledge.seconded_in_group.clone(), + validated_in_group: local_knowledge.validated_in_group.clone(), + }; + + let msg = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + ); + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vec![peer.clone()], + msg.into(), + )).await; + + local_validator.grid_tracker.manifest_sent_to( + validator_index, + candidate_hash, + local_knowledge.clone(), + ); + + let messages = post_acknowledgement_statement_messages( + validator_index, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + &local_knowledge, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( + messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), + )) + .await; + } +} + #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn handle_incoming_acknowledgement( ctx: &mut Context, From ecb3ae6a444e840d3a31dc9f08c24f516d915162 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 31 Jan 2023 16:49:56 -0600 Subject: [PATCH 139/220] mostly implement catching peers up via grid --- .../src/vstaging/mod.rs | 199 ++++++++++++++---- 1 file changed, 153 insertions(+), 46 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 34dfa80a1e7a..f44c9b3ade7d 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -641,25 +641,31 @@ async fn send_peer_messages_for_relay_parent( Some(s) => s, }; - let local_validator_state = match relay_parent_state.local_validator.as_mut() { - None => return, - Some(s) => s, - }; - for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session_state.authority_lookup.get(a) }) { - send_pending_cluster_statements( + if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + send_pending_cluster_statements( + ctx, + relay_parent, + &peer, + validator_id, + &mut local_validator_state.cluster_tracker, + &relay_parent_state.statement_store, + ) + .await; + } + + send_pending_grid_messages( ctx, relay_parent, &peer, validator_id, - &mut local_validator_state.cluster_tracker, - &relay_parent_state.statement_store, + &per_session_state.groups, + relay_parent_state, + &state.candidates, ) .await; - - // TODO [now]: grid } } @@ -699,6 +705,106 @@ async fn send_pending_cluster_statements( .await; } +/// Send a peer all pending grid messages / acknowledgements / follow up statements +/// upon learning about a new relay parent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_pending_grid_messages( + ctx: &mut Context, + relay_parent: Hash, + peer_id: &PeerId, + peer_validator_id: ValidatorIndex, + groups: &Groups, + relay_parent_state: &mut PerRelayParentState, + candidates: &Candidates, +) { + let pending_manifests = { + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + + let grid_tracker = &mut local_validator.grid_tracker; + grid_tracker.pending_manifests_for(peer_validator_id) + }; + + let mut messages: Vec<(Vec, net_protocol::VersionedValidationProtocol)> = Vec::new(); + for (candidate_hash, kind) in pending_manifests { + let confirmed_candidate = match candidates.get_confirmed(&candidate_hash) { + None => continue, // sanity + Some(c) => c, + }; + + let group_index = confirmed_candidate.group_index(); + + let local_knowledge = { + let group_size = match groups.get(group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ) + }; + + match kind { + grid::ManifestKind::Full => { + let manifest = protocol_vstaging::BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index, + para_id: confirmed_candidate.para_id(), + parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), + seconded_in_group: local_knowledge.seconded_in_group.clone(), + validated_in_group: local_knowledge.validated_in_group.clone(), + }; + + let grid = &mut relay_parent_state + .local_validator + .as_mut() + .expect("determined to be some earlier in this function; qed") + .grid_tracker; + + grid.manifest_sent_to(peer_validator_id, candidate_hash, local_knowledge.clone()); + + messages.push(( + vec![peer_id.clone()], + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest, + ), + ) + .into(), + )); + }, + grid::ManifestKind::Acknowledgement => { + messages.extend(acknowledgement_and_statement_messages( + peer_id.clone(), + peer_validator_id, + groups, + relay_parent_state, + relay_parent, + group_index, + candidate_hash, + local_knowledge, + )); + }, + } + } + + // TODO [now] we need a way to get all pending statements for a validator, not just + // those for the acknowledgements we've sent + // + // otherwise, we might receive statements while the grid peer is "out of view" and then + // not send them when they get back "in view". problem! checking for these needs to be + // cheap as well. + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; +} + // Imports a locally originating statement and distributes it to peers. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn share_local_statement( @@ -1864,16 +1970,35 @@ async fn handle_incoming_manifest( if acknowledge { // 4. if already confirmed & known within grid, acknowledge candidate - send_acknowledgement_and_statements( - ctx, + + let local_knowledge = { + let group_size = match per_session.groups.get(manifest.group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + local_knowledge_filter( + group_size, + manifest.group_index, + manifest.candidate_hash, + &relay_parent_state.statement_store, + ) + }; + + let messages = acknowledgement_and_statement_messages( peer, sender_index, - per_session, + &per_session.groups, relay_parent_state, manifest.relay_parent, manifest.group_index, manifest.candidate_hash, - ).await; + local_knowledge, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry state @@ -1883,37 +2008,25 @@ async fn handle_incoming_manifest( } } -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn send_acknowledgement_and_statements( - ctx: &mut Context, +/// Produces acknowledgement and statement messages to be sent over the network, +/// noting that they have been sent within the grid topology tracker as well. +fn acknowledgement_and_statement_messages( peer: PeerId, validator_index: ValidatorIndex, - per_session: &PerSessionState, + groups: &Groups, relay_parent_state: &mut PerRelayParentState, relay_parent: Hash, group_index: GroupIndex, candidate_hash: CandidateHash, -) { + local_knowledge: StatementFilter, +) -> Vec<(Vec, net_protocol::VersionedValidationProtocol)> { let local_validator = match relay_parent_state.local_validator.as_mut() { - None => return, + None => return Vec::new(), Some(l) => l, }; - let local_knowledge = { - let group_size = match per_session.groups.get(group_index) { - None => return, // sanity - Some(x) => x.len(), - }; - - local_knowledge_filter( - group_size, - group_index, - candidate_hash, - &relay_parent_state.statement_store, - ) - }; let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { - candidate_hash: candidate_hash, + candidate_hash, seconded_in_group: local_knowledge.seconded_in_group.clone(), validated_in_group: local_knowledge.validated_in_group.clone(), }; @@ -1922,10 +2035,7 @@ async fn send_acknowledgement_and_statements( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - msg.into(), - )).await; + let mut messages = vec![(vec![peer.clone()], msg.into())]; local_validator.grid_tracker.manifest_sent_to( validator_index, @@ -1933,23 +2043,20 @@ async fn send_acknowledgement_and_statements( local_knowledge.clone(), ); - let messages = post_acknowledgement_statement_messages( + let statement_messages = post_acknowledgement_statement_messages( validator_index, relay_parent, &mut local_validator.grid_tracker, &relay_parent_state.statement_store, - &per_session.groups, + &groups, group_index, candidate_hash, &local_knowledge, ); - if !messages.is_empty() { - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( - messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), - )) - .await; - } + messages.extend(statement_messages.into_iter().map(|m| (vec![peer.clone()], m))); + + messages } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] From 4ac25ad10f9a7fe494c827062299ab9c036cb6e6 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 1 Feb 2023 17:19:20 +0100 Subject: [PATCH 140/220] Fix clippy error --- node/core/prospective-parachains/src/lib.rs | 4 ++-- node/subsystem-util/src/inclusion_emulator/staging.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 2c5cca886cf5..f0e6eaaa14c8 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -524,8 +524,8 @@ fn answer_hypothetical_frontier_request( let hypothetical = match c { HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => fragment_tree::HypotheticalCandidate::Complete { - receipt: Cow::Borrowed(&*receipt), - persisted_validation_data: Cow::Borrowed(&*persisted_validation_data), + receipt: Cow::Borrowed(receipt), + persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, HypotheticalCandidate::Incomplete { parent_head_data_hash, diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index ccab2aea973e..3aaee43d79fe 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -547,7 +547,7 @@ impl<'a> ProspectiveCandidate<'a> { /// Partially clone the prospective candidate, but borrow the /// parts which are potentially heavy. - pub fn partial_clone<'b>(&'b self) -> ProspectiveCandidate<'b> { + pub fn partial_clone(&self) -> ProspectiveCandidate { ProspectiveCandidate { commitments: Cow::Borrowed(self.commitments.borrow()), collator: self.collator.clone(), From bf15adc8d46f9d3f1b22816a282aeabb5821a621 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 1 Feb 2023 16:07:59 -0600 Subject: [PATCH 141/220] alter grid to track all pending statements --- .../src/vstaging/grid.rs | 183 ++++++++++++++++-- .../src/vstaging/mod.rs | 144 +++++++++----- 2 files changed, 265 insertions(+), 62 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 0f4be2d8f176..939851939de9 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -189,7 +189,10 @@ pub struct GridTracker { received: HashMap, confirmed_backed: HashMap, unconfirmed: HashMap>, - pending_communication: HashMap>, + pending_manifests: HashMap>, + + // maps target to (originator, statement) pairs. + pending_statements: HashMap>, } impl GridTracker { @@ -284,14 +287,28 @@ impl GridTracker { if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { if receiving_from && !confirmed.has_sent_manifest_to(sender) { // due to checks above, the manifest `kind` is guaranteed to be `Full` - self.pending_communication + self.pending_manifests .entry(sender) .or_default() .insert(candidate_hash, ManifestKind::Acknowledgement); ack = true; } + + // add all statements in local_knowledge & !remote_knowledge + // to `pending_statements` for this validator. confirmed.manifest_received_from(sender, remote_knowledge); + if let Some(pending_statements) = confirmed.pending_statements(sender) { + self.pending_statements + .entry(sender) + .or_default() + .extend(decompose_statement_filter( + groups, + claimed_group_index, + candidate_hash, + &pending_statements, + )); + } } else { // received prevents conflicting manifests so this is max 1 per validator. self.unconfirmed @@ -313,11 +330,15 @@ impl GridTracker { candidate_hash: CandidateHash, group_index: GroupIndex, group_size: usize, + local_knowledge: StatementFilter, ) -> Vec<(ValidatorIndex, ManifestKind)> { let c = match self.confirmed_backed.entry(candidate_hash) { Entry::Occupied(_) => return Vec::new(), - Entry::Vacant(v) => - v.insert(KnownBackedCandidate { group_index, mutual_knowledge: HashMap::new() }), + Entry::Vacant(v) => v.insert(KnownBackedCandidate { + group_index, + mutual_knowledge: HashMap::new(), + local_knowledge, + }), }; // Populate the entry with previously unconfirmed manifests. @@ -361,13 +382,13 @@ impl GridTracker { // and receiving groups, we may overwrite a `Full` manifest with a `Acknowledgement` // one. for (v, manifest_mode) in sending_group_manifests.chain(receiving_group_manifests) { - self.pending_communication + self.pending_manifests .entry(v) .or_default() .insert(candidate_hash, manifest_mode); } - self.pending_communication + self.pending_manifests .iter() .filter_map(|(v, x)| x.get(&candidate_hash).map(|k| (*v, *k))) .collect() @@ -377,15 +398,28 @@ impl GridTracker { /// given validator. pub fn manifest_sent_to( &mut self, + groups: &Groups, validator_index: ValidatorIndex, candidate_hash: CandidateHash, local_knowledge: StatementFilter, ) { if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { c.manifest_sent_to(validator_index, local_knowledge); + + if let Some(pending_statements) = c.pending_statements(validator_index) { + self.pending_statements + .entry(validator_index) + .or_default() + .extend(decompose_statement_filter( + groups, + c.group_index, + candidate_hash, + &pending_statements, + )); + } } - if let Some(x) = self.pending_communication.get_mut(&validator_index) { + if let Some(x) = self.pending_manifests.get_mut(&validator_index) { x.remove(&candidate_hash); } } @@ -397,7 +431,7 @@ impl GridTracker { validator_index: ValidatorIndex, candidate_hash: &CandidateHash, ) -> Option { - self.pending_communication + self.pending_manifests .get(&validator_index) .and_then(|x| x.get(&candidate_hash)) .map(|x| *x) @@ -409,7 +443,7 @@ impl GridTracker { &self, validator_index: ValidatorIndex, ) -> Vec<(CandidateHash, ManifestKind)> { - self.pending_communication + self.pending_manifests .get(&validator_index) .into_iter() .flat_map(|pending| pending.iter().map(|(c, m)| (*c, *m))) @@ -423,11 +457,30 @@ impl GridTracker { &self, validator_index: ValidatorIndex, candidate_hash: CandidateHash, - full_local_knowledge: &StatementFilter, ) -> Option { self.confirmed_backed .get(&candidate_hash) - .and_then(|x| x.pending_statements(validator_index, full_local_knowledge)) + .and_then(|x| x.pending_statements(validator_index)) + } + + /// Returns a vector of all pending statements to the validator, sorted with + /// `Seconded` statements at the front. + /// + /// Statements are in the form `(Originator, Statement Kind)`. + pub fn all_pending_statements_for( + &self, + validator_index: ValidatorIndex, + ) -> Vec<(ValidatorIndex, CompactStatement)> { + let mut v = self.pending_statements + .get(&validator_index).map(|x| x.iter().cloned().collect()) + .unwrap_or(Vec::new()); + + v.sort_by_key(|(_, s)| match s { + CompactStatement::Seconded(_) => 0u32, + CompactStatement::Valid(_) => 1u32, + }); + + v } /// Which validators we could request the fully attested candidates from. @@ -450,7 +503,7 @@ impl GridTracker { } /// Determine the validators which can send a statement to us by direct broadcast. - pub fn direct_statement_senders( + pub fn direct_statement_providers( &self, groups: &Groups, originator: ValidatorIndex, @@ -470,7 +523,7 @@ impl GridTracker { /// Determine the validators which can receive a statement from us by direct /// broadcast. - pub fn direct_statement_recipients( + pub fn direct_statement_targets( &self, groups: &Groups, originator: ValidatorIndex, @@ -488,6 +541,45 @@ impl GridTracker { .unwrap_or_default() } + /// Note that we have learned about a statement. This will update + /// `pending_statements_for` for any relevant validators if actually + /// fresh. + pub fn learned_fresh_statement( + &mut self, + groups: &Groups, + session_topology: &SessionTopologyView, + originator: ValidatorIndex, + statement: &CompactStatement, + ) { + let (g, c_h, kind, in_group) = match extract_statement_and_group_info(groups, originator, statement) { + None => return, + Some(x) => x, + }; + + let known = match self.confirmed_backed.get_mut(&c_h) { + None => return, + Some(x) => x, + }; + + if !known.note_fresh_statement(in_group, kind) { return } + + // Add to `pending_statements` for all valdiators we communicate with + // who have exchanged manifests. + let all_group_validators = session_topology.group_views + .get(&g) + .into_iter() + .flat_map(|g| g.sending.iter().chain(g.receiving.iter())); + + for v in all_group_validators { + if known.is_pending_statement(*v, in_group, kind) { + self.pending_statements + .entry(*v) + .or_default() + .insert((originator, statement.clone())); + } + } + } + /// Note that a direct statement about a given candidate was sent to or /// received from the given validator. pub fn sent_or_received_direct_statement( @@ -502,6 +594,10 @@ impl GridTracker { { if let Some(known) = self.confirmed_backed.get_mut(&c_h) { known.sent_or_received_direct_statement(counterparty, in_group, kind); + + if let Some(pending) = self.pending_statements.get_mut(&counterparty) { + pending.remove(&(originator, statement.clone())); + } } } } @@ -527,6 +623,25 @@ fn extract_statement_and_group_info( Some((group, *candidate_hash, statement_kind, index_in_group)) } +fn decompose_statement_filter<'a> ( + groups: &'a Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_filter: &'a StatementFilter, +) -> impl Iterator + 'a { + groups.get(group_index).into_iter().flat_map(move |g| { + let s = statement_filter.seconded_in_group.iter_ones() + .map(|i| g[i].clone()) + .map(move |i| (i, CompactStatement::Seconded(candidate_hash))); + + let v = statement_filter.validated_in_group.iter_ones() + .map(|i| g[i].clone()) + .map(move |i| (i, CompactStatement::Valid(candidate_hash))); + + s.chain(v) + }) +} + /// A summary of a manifest being sent by a counterparty. #[derive(Clone)] pub struct ManifestSummary { @@ -754,6 +869,7 @@ struct MutualKnowledge { // we have confirmed as having been backed. struct KnownBackedCandidate { group_index: GroupIndex, + local_knowledge: StatementFilter, mutual_knowledge: HashMap, } @@ -847,6 +963,17 @@ impl KnownBackedCandidate { .collect() } + fn note_fresh_statement( + &mut self, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + let really_fresh = !self.local_knowledge.contains(statement_index_in_group, statement_kind); + self.local_knowledge.set(statement_index_in_group, statement_kind); + + really_fresh + } + fn sent_or_received_direct_statement( &mut self, validator: ValidatorIndex, @@ -861,16 +988,34 @@ impl KnownBackedCandidate { } } + fn is_pending_statement( + &self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + // existence of both remote & local knowledge indicate we have exchanged + // manifests. + // then, everything that is not in the remote knowledge is pending + self.mutual_knowledge + .get(&validator) + .filter(|k| k.local_knowledge.is_some()) + .and_then(|k| k.remote_knowledge.as_ref()) + .map(|k| !k.contains(statement_index_in_group, statement_kind)) + .unwrap_or(false) + } + fn pending_statements( &self, validator: ValidatorIndex, - full_local: &StatementFilter, ) -> Option { // existence of both remote & local knowledge indicate we have exchanged // manifests. // then, everything that is not in the remote knowledge is pending, and we // further limit this by what is in the local knowledge itself. we use the // full local knowledge, as the local knowledge stored here may be outdated. + let full_local = &self.local_knowledge; + self.mutual_knowledge .get(&validator) .filter(|k| k.local_knowledge.is_some()) @@ -882,6 +1027,8 @@ impl KnownBackedCandidate { !remote.validated_in_group.clone(), }) } + + } #[cfg(test)] @@ -1379,4 +1526,12 @@ mod tests { // TODO [now]: check that pending communication is cleared correctly in `manifest_sent_to` // TODO [now]: test a scenario where manifest import returns `Ok(true)`. + + // TODO [now]: test that pending statements are updated after manifest exchange + + // TODO [now]: test that pending statements are updated when importing a fresh statement + + // TODO [now]: test that pending statements respect remote knowledge + + // TODO [now]: test that pending statements are cleared when sending/receiving. } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index f44c9b3ade7d..d8d2c44f9ecc 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -669,6 +669,19 @@ async fn send_peer_messages_for_relay_parent( } } +fn pending_statement_network_message( + statement_store: &StatementStore, + relay_parent: Hash, + peer: &PeerId, + originator: ValidatorIndex, + compact: CompactStatement, +) -> Option<(Vec, net_protocol::VersionedValidationProtocol)> { + statement_store.validator_statement(originator, compact) + .map(|s| s.as_unchecked().clone()) + .map(|signed| protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed)) + .map(|msg| (vec![peer.clone()], Versioned::VStaging(msg).into())) +} + /// Send a peer all pending cluster statements for a relay parent. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn send_pending_cluster_statements( @@ -680,27 +693,27 @@ async fn send_pending_cluster_statements( statement_store: &StatementStore, ) { let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); - if pending_statements.is_empty() { - return - } - let network_messages = pending_statements .into_iter() .filter_map(|(originator, compact)| { - let res = statement_store.validator_statement(originator, compact.clone()); + let res = pending_statement_network_message( + &statement_store, + relay_parent, + peer_id, + originator, + compact.clone(), + ); + if res.is_some() { cluster_tracker.note_sent(peer_validator_id, originator, compact); } res }) - .map(|s| s.as_unchecked().clone()) - .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) - }) - .map(|msg| (vec![peer_id.clone()], Versioned::VStaging(msg).into())) .collect::>(); + if network_messages.is_empty() { return } + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) .await; } @@ -768,7 +781,7 @@ async fn send_pending_grid_messages( .expect("determined to be some earlier in this function; qed") .grid_tracker; - grid.manifest_sent_to(peer_validator_id, candidate_hash, local_knowledge.clone()); + grid.manifest_sent_to(groups, peer_validator_id, candidate_hash, local_knowledge.clone()); messages.push(( vec![peer_id.clone()], @@ -795,13 +808,45 @@ async fn send_pending_grid_messages( } } - // TODO [now] we need a way to get all pending statements for a validator, not just - // those for the acknowledgements we've sent + // Send all remaining pending grid statements for a validator, not just + // those for the acknowledgements we've sent. // // otherwise, we might receive statements while the grid peer is "out of view" and then - // not send them when they get back "in view". problem! checking for these needs to be - // cheap as well. + // not send them when they get back "in view". problem! + { + let grid_tracker = &mut relay_parent_state.local_validator.as_mut() + .expect("checked earlier; qed") + .grid_tracker; + + let pending_statements = grid_tracker.all_pending_statements_for(peer_validator_id); + + let extra_statements = pending_statements + .into_iter() + .filter_map(|(originator, compact)| { + let res = pending_statement_network_message( + &relay_parent_state.statement_store, + relay_parent, + peer_id, + originator, + compact.clone(), + ); + + if res.is_some() { + grid_tracker.sent_or_received_direct_statement( + groups, + originator, + peer_validator_id, + &compact, + ); + } + + res + }); + + messages.extend(extra_statements); + } + if messages.is_empty() { return } ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; } @@ -870,7 +915,7 @@ pub(crate) async fn share_local_statement( ); }; - match per_relay_parent.statement_store.insert( + let x = match per_relay_parent.statement_store.insert( &per_session.groups, compact_statement.clone(), StatementOrigin::Local, @@ -883,8 +928,20 @@ pub(crate) async fn share_local_statement( ); return Err(JfyiError::InvalidShare) }, - Ok(true) => (compact_statement, candidate_hash), + Ok(true) => {}, + }; + + if let Some(ref session_topology) = per_session.grid_view { + let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + l.grid_tracker.learned_fresh_statement( + &per_session.groups, + session_topology, + local_index, + &compact_statement.payload(), + ); } + + (compact_statement, candidate_hash) }; if let Some(post_confirmation) = post_confirmation { @@ -977,7 +1034,7 @@ async fn circulate_statement( let grid_targets = local_validator .grid_tracker - .direct_statement_recipients(&per_session.groups, originator, &compact_statement) + .direct_statement_targets(&per_session.groups, originator, &compact_statement) .into_iter() .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) .map(|v| (v, DirectTargetKind::Grid)); @@ -1244,7 +1301,7 @@ async fn handle_incoming_statement( } else { let grid_sender_index = local_validator .grid_tracker - .direct_statement_senders( + .direct_statement_providers( &per_session.groups, statement.unchecked_validator_index(), statement.unchecked_payload(), @@ -1344,6 +1401,15 @@ async fn handle_incoming_statement( report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; let is_importable = state.candidates.is_importable(&candidate_hash); + if let Some(ref session_topology) = per_session.grid_view { + local_validator.grid_tracker.learned_fresh_statement( + &per_session.groups, + session_topology, + local_validator.index, + &statement, + ); + } + if let (true, &Some(confirmed)) = (is_importable, &confirmed) { send_backing_fresh_statements( ctx, @@ -1551,13 +1617,6 @@ async fn provide_candidate_to_grid( Some(g) => g.len(), }; - let actions = local_validator.grid_tracker.add_backed_candidate( - grid_view, - candidate_hash, - group_index, - group_size, - ); - let filter = local_knowledge_filter( group_size, group_index, @@ -1565,6 +1624,14 @@ async fn provide_candidate_to_grid( &relay_parent_state.statement_store, ); + let actions = local_validator.grid_tracker.add_backed_candidate( + grid_view, + candidate_hash, + group_index, + group_size, + filter.clone(), + ); + let manifest = protocol_vstaging::BackedCandidateManifest { relay_parent, candidate_hash, @@ -1607,7 +1674,7 @@ async fn provide_candidate_to_grid( grid::ManifestKind::Acknowledgement => ack_peers.push(p), } - local_validator.grid_tracker.manifest_sent_to(v, candidate_hash, filter.clone()); + local_validator.grid_tracker.manifest_sent_to(&per_session.groups, v, candidate_hash, filter.clone()); post_statements.extend( post_acknowledgement_statement_messages( v, @@ -1617,7 +1684,6 @@ async fn provide_candidate_to_grid( &per_session.groups, group_index, candidate_hash, - &filter, ) .into_iter() .map(|m| (vec![p], m)), @@ -1904,10 +1970,9 @@ fn post_acknowledgement_statement_messages( groups: &Groups, group_index: GroupIndex, candidate_hash: CandidateHash, - local_knowledge: &StatementFilter, ) -> Vec { let sending_filter = - match grid_tracker.pending_statements_for(recipient, candidate_hash, local_knowledge) { + match grid_tracker.pending_statements_for(recipient, candidate_hash) { None => return Vec::new(), Some(f) => f, }; @@ -2038,6 +2103,7 @@ fn acknowledgement_and_statement_messages( let mut messages = vec![(vec![peer.clone()], msg.into())]; local_validator.grid_tracker.manifest_sent_to( + groups, validator_index, candidate_hash, local_knowledge.clone(), @@ -2051,7 +2117,6 @@ fn acknowledgement_and_statement_messages( &groups, group_index, candidate_hash, - &local_knowledge, ); messages.extend(statement_messages.into_iter().map(|m| (vec![peer.clone()], m))); @@ -2113,22 +2178,6 @@ async fn handle_incoming_acknowledgement( Some(l) => l, }; - // if already confirmed & known within grid, follow up with direct statements - // the counterparty is not aware of. - let local_knowledge = { - let group_size = match per_session.groups.get(group_index) { - None => return, // sanity - Some(x) => x.len(), - }; - - local_knowledge_filter( - group_size, - group_index, - candidate_hash, - &relay_parent_state.statement_store, - ) - }; - let messages = post_acknowledgement_statement_messages( sender_index, relay_parent, @@ -2137,7 +2186,6 @@ async fn handle_incoming_acknowledgement( &per_session.groups, group_index, candidate_hash, - &local_knowledge, ); if !messages.is_empty() { From 6b29c7483cff61559f51cb89ecb18546202899e7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 1 Feb 2023 16:22:53 -0600 Subject: [PATCH 142/220] fix more TODOs and format --- .../src/vstaging/grid.rs | 57 ++++++----- .../src/vstaging/mod.rs | 98 +++++++++++++------ .../src/vstaging/statement_store.rs | 6 +- 3 files changed, 104 insertions(+), 57 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 939851939de9..f6b81f2be6ba 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -299,15 +299,14 @@ impl GridTracker { // to `pending_statements` for this validator. confirmed.manifest_received_from(sender, remote_knowledge); if let Some(pending_statements) = confirmed.pending_statements(sender) { - self.pending_statements - .entry(sender) - .or_default() - .extend(decompose_statement_filter( + self.pending_statements.entry(sender).or_default().extend( + decompose_statement_filter( groups, claimed_group_index, candidate_hash, &pending_statements, - )); + ), + ); } } else { // received prevents conflicting manifests so this is max 1 per validator. @@ -407,15 +406,14 @@ impl GridTracker { c.manifest_sent_to(validator_index, local_knowledge); if let Some(pending_statements) = c.pending_statements(validator_index) { - self.pending_statements - .entry(validator_index) - .or_default() - .extend(decompose_statement_filter( + self.pending_statements.entry(validator_index).or_default().extend( + decompose_statement_filter( groups, c.group_index, candidate_hash, &pending_statements, - )); + ), + ); } } @@ -471,8 +469,10 @@ impl GridTracker { &self, validator_index: ValidatorIndex, ) -> Vec<(ValidatorIndex, CompactStatement)> { - let mut v = self.pending_statements - .get(&validator_index).map(|x| x.iter().cloned().collect()) + let mut v = self + .pending_statements + .get(&validator_index) + .map(|x| x.iter().cloned().collect()) .unwrap_or(Vec::new()); v.sort_by_key(|(_, s)| match s { @@ -551,21 +551,25 @@ impl GridTracker { originator: ValidatorIndex, statement: &CompactStatement, ) { - let (g, c_h, kind, in_group) = match extract_statement_and_group_info(groups, originator, statement) { - None => return, - Some(x) => x, - }; + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return, + Some(x) => x, + }; let known = match self.confirmed_backed.get_mut(&c_h) { None => return, Some(x) => x, }; - if !known.note_fresh_statement(in_group, kind) { return } + if !known.note_fresh_statement(in_group, kind) { + return + } // Add to `pending_statements` for all valdiators we communicate with // who have exchanged manifests. - let all_group_validators = session_topology.group_views + let all_group_validators = session_topology + .group_views .get(&g) .into_iter() .flat_map(|g| g.sending.iter().chain(g.receiving.iter())); @@ -623,18 +627,22 @@ fn extract_statement_and_group_info( Some((group, *candidate_hash, statement_kind, index_in_group)) } -fn decompose_statement_filter<'a> ( +fn decompose_statement_filter<'a>( groups: &'a Groups, group_index: GroupIndex, candidate_hash: CandidateHash, statement_filter: &'a StatementFilter, ) -> impl Iterator + 'a { groups.get(group_index).into_iter().flat_map(move |g| { - let s = statement_filter.seconded_in_group.iter_ones() + let s = statement_filter + .seconded_in_group + .iter_ones() .map(|i| g[i].clone()) .map(move |i| (i, CompactStatement::Seconded(candidate_hash))); - let v = statement_filter.validated_in_group.iter_ones() + let v = statement_filter + .validated_in_group + .iter_ones() .map(|i| g[i].clone()) .map(move |i| (i, CompactStatement::Valid(candidate_hash))); @@ -1005,10 +1013,7 @@ impl KnownBackedCandidate { .unwrap_or(false) } - fn pending_statements( - &self, - validator: ValidatorIndex, - ) -> Option { + fn pending_statements(&self, validator: ValidatorIndex) -> Option { // existence of both remote & local knowledge indicate we have exchanged // manifests. // then, everything that is not in the remote knowledge is pending, and we @@ -1027,8 +1032,6 @@ impl KnownBackedCandidate { !remote.validated_in_group.clone(), }) } - - } #[cfg(test)] diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index d8d2c44f9ecc..1f9521ed5c8a 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -37,7 +37,10 @@ use polkadot_node_subsystem::{ }, overseer, ActivatedLeaf, ActiveLeavesUpdate, PerLeafSpan, StatementDistributionSenderTrait, }; -use polkadot_node_subsystem_util::backing_implicit_view::{FetchError, View as ImplicitView}; +use polkadot_node_subsystem_util::{ + backing_implicit_view::{FetchError, View as ImplicitView}, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, +}; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, @@ -114,6 +117,7 @@ struct PerRelayParentState { statement_store: StatementStore, availability_cores: Vec, group_rotation_info: GroupRotationInfo, + seconding_limit: usize, session: SessionIndex, } @@ -401,11 +405,19 @@ pub(crate) async fn handle_active_leaves_update( continue } - // TODO [now]: request prospective parachains mode, skip disabled relay-parents - // (there should not be any) and set `seconding_limit = max_candidate_depth`. - // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. + + let mode = prospective_parachains_mode(ctx.sender(), new_relay_parent).await; + + // request prospective parachains mode, skip disabled relay-parents + // (there should not be any) and set `seconding_limit = max_candidate_depth`. + let seconding_limit = match mode { + Ok(ProspectiveParachainsMode::Disabled) | Err(_) => continue, + Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }) => + max_candidate_depth, + }; + let session_index = polkadot_node_subsystem_util::request_session_index_for_child( new_relay_parent, ctx.sender(), @@ -473,6 +485,7 @@ pub(crate) async fn handle_active_leaves_update( &per_session.groups, &availability_cores, &group_rotation_info, + seconding_limit, ) }); @@ -484,6 +497,7 @@ pub(crate) async fn handle_active_leaves_update( statement_store: StatementStore::new(&per_session.groups), availability_cores, group_rotation_info, + seconding_limit, session: session_index, }, ); @@ -520,6 +534,7 @@ fn find_local_validator_state( groups: &Groups, availability_cores: &[CoreState], group_rotation_info: &GroupRotationInfo, + seconding_limit: usize, ) -> Option { if groups.all().is_empty() { return None @@ -540,11 +555,8 @@ fn find_local_validator_state( index: validator_index, group: our_group, assignment: para, - cluster_tracker: ClusterTracker::new( - group_validators, - todo!(), // TODO [now]: seconding limit? - ) - .expect("group is non-empty because we are in it; qed"), + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), grid_tracker: GridTracker::default(), }) } @@ -676,9 +688,12 @@ fn pending_statement_network_message( originator: ValidatorIndex, compact: CompactStatement, ) -> Option<(Vec, net_protocol::VersionedValidationProtocol)> { - statement_store.validator_statement(originator, compact) + statement_store + .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) - .map(|signed| protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed)) + .map(|signed| { + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + }) .map(|msg| (vec![peer.clone()], Versioned::VStaging(msg).into())) } @@ -712,7 +727,9 @@ async fn send_pending_cluster_statements( }) .collect::>(); - if network_messages.is_empty() { return } + if network_messages.is_empty() { + return + } ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) .await; @@ -781,7 +798,12 @@ async fn send_pending_grid_messages( .expect("determined to be some earlier in this function; qed") .grid_tracker; - grid.manifest_sent_to(groups, peer_validator_id, candidate_hash, local_knowledge.clone()); + grid.manifest_sent_to( + groups, + peer_validator_id, + candidate_hash, + local_knowledge.clone(), + ); messages.push(( vec![peer_id.clone()], @@ -814,15 +836,16 @@ async fn send_pending_grid_messages( // otherwise, we might receive statements while the grid peer is "out of view" and then // not send them when they get back "in view". problem! { - let grid_tracker = &mut relay_parent_state.local_validator.as_mut() + let grid_tracker = &mut relay_parent_state + .local_validator + .as_mut() .expect("checked earlier; qed") .grid_tracker; let pending_statements = grid_tracker.all_pending_statements_for(peer_validator_id); - let extra_statements = pending_statements - .into_iter() - .filter_map(|(originator, compact)| { + let extra_statements = + pending_statements.into_iter().filter_map(|(originator, compact)| { let res = pending_statement_network_message( &relay_parent_state.statement_store, relay_parent, @@ -846,7 +869,9 @@ async fn send_pending_grid_messages( messages.extend(extra_statements); } - if messages.is_empty() { return } + if messages.is_empty() { + return + } ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; } @@ -892,8 +917,16 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } - // TODO [now]: ensure seconded_count isn't too high. Needs our definition - // of 'too high' i.e. max_depth, which isn't done yet. + if per_relay_parent.statement_store.seconded_count(&local_index) == + per_relay_parent.seconding_limit + { + gum::warn!( + target: LOG_TARGET, + limit = ?per_relay_parent.seconding_limit, + "Local node has issued too many `Seconded` statements", + ); + return Err(JfyiError::InvalidShare) + } if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { return Err(JfyiError::InvalidShare) @@ -1674,7 +1707,12 @@ async fn provide_candidate_to_grid( grid::ManifestKind::Acknowledgement => ack_peers.push(p), } - local_validator.grid_tracker.manifest_sent_to(&per_session.groups, v, candidate_hash, filter.clone()); + local_validator.grid_tracker.manifest_sent_to( + &per_session.groups, + v, + candidate_hash, + filter.clone(), + ); post_statements.extend( post_acknowledgement_statement_messages( v, @@ -1746,6 +1784,7 @@ async fn fragment_tree_update_inner( HypotheticalFrontierRequest { candidates: hypotheticals, fragment_tree_relay_parent: active_leaf_hash, + backed_in_path_only: false, }, tx, )) @@ -1913,11 +1952,13 @@ async fn handle_incoming_manifest_common<'a, Context>( }; // 2. sanity checks: peer is validator, bitvec size, import into grid tracker + let group_index = manifest_summary.claimed_group_index; + let claimed_parent_hash = manifest_summary.claimed_parent_hash; let acknowledge = match local_validator.grid_tracker.import_manifest( grid_topology, &per_session.groups, candidate_hash, - todo!(), // TODO [now]: seconding limit + relay_parent_state.seconding_limit, manifest_summary, manifest_kind, sender_index, @@ -1950,8 +1991,8 @@ async fn handle_incoming_manifest_common<'a, Context>( peer.clone(), candidate_hash, relay_parent, - manifest_summary.claimed_group_index, - Some((manifest_summary.claimed_parent_hash, para_id)), + group_index, + Some((claimed_parent_hash, para_id)), ) { report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; return None @@ -1971,11 +2012,10 @@ fn post_acknowledgement_statement_messages( group_index: GroupIndex, candidate_hash: CandidateHash, ) -> Vec { - let sending_filter = - match grid_tracker.pending_statements_for(recipient, candidate_hash) { - None => return Vec::new(), - Some(f) => f, - }; + let sending_filter = match grid_tracker.pending_statements_for(recipient, candidate_hash) { + None => return Vec::new(), + Some(f) => f, + }; let mut messages = Vec::new(); for statement in diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 6b9e9f216c01..fe07d2427f10 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -97,7 +97,6 @@ impl StatementStore { /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. - // TODO [now]: perhaps reject over-seconded statements. pub fn insert( &mut self, groups: &Groups, @@ -237,6 +236,11 @@ impl StatementStore { .map(|stored| &stored.statement) } + /// Get the amount of known `Seconded` statements by the given validator index. + pub fn seconded_count(&self, validator_index: &ValidatorIndex) -> usize { + self.validator_meta.get(validator_index).map_or(0, |m| m.seconded_count) + } + /// Note that a statement is known by the backing subsystem. pub fn note_known_by_backing( &mut self, From 38c4779243974e2725167df837749c0e99331ce0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 1 Feb 2023 16:34:41 -0600 Subject: [PATCH 143/220] tweak a TODO in requests --- .../src/vstaging/cluster.rs | 2 -- .../src/vstaging/requests.rs | 21 +++++++++++-------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index e94d2e8662a4..26829feb03f6 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -54,8 +54,6 @@ //! are in, based on _the most charitable possible interpretation of our protocol rules_, //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -// TODO [now]: decide if we want to also distribute statements to validators -// that are assigned as-of an active leaf i.e. the next group. use std::ops::Range; diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 88fe0eafe26d..155ff12c0c74 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -359,7 +359,7 @@ impl<'a> UnhandledResponse<'a> { group: &[ValidatorIndex], session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, - allowed_para_lookup: impl Fn(ParaId) -> bool, + allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { let UnhandledResponse { manager, @@ -434,12 +434,11 @@ impl<'a> UnhandledResponse<'a> { group, session, validator_key_lookup, + allowed_para_lookup, ); if let CandidateRequestStatus::Complete { .. } = output.request_status { - // TODO [now]: clean up everything else to do with the candidate. - // add reputation punishments for all peers advertising the candidate under - // different identifiers. + manager.remove_for(identifier.candidate_hash); } output @@ -454,6 +453,7 @@ fn validate_complete_response( group: &[ValidatorIndex], session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, + allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { // sanity check bitmask size. this is based entirely on // local logic here. @@ -488,6 +488,13 @@ fn validate_complete_response( return invalid_candidate_output() } + if !allowed_para_lookup( + response.candidate_receipt.descriptor.para_id, + identifier.group_index, + ) { + return invalid_candidate_output() + } + if response.candidate_receipt.hash() != identifier.candidate_hash { return invalid_candidate_output() } @@ -601,10 +608,6 @@ pub enum CandidateRequestStatus { /// expected may not be present, and higher-level code should /// evaluate whether the candidate is still worth storing and whether /// the sender should be punished. - /// - /// This also does not indicate that the para has actually been checked - /// to be one that the group is assigned under. Higher-level code should - /// verify that this is the case and ignore the candidate accordingly if so. Complete { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, @@ -758,5 +761,5 @@ mod tests { // TODO [now]: test that outdated responses are handled correctly. - // TODO [now]: test clean up by relay parent. + // TODO [now]: test that successful requests lead to clean up. } From 254efc35449a115667e3bfe758c79a73686a48d8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 2 Feb 2023 13:24:48 -0600 Subject: [PATCH 144/220] some logic for dispatching requests --- .../src/vstaging/mod.rs | 57 +++++++++++++++++++ .../src/vstaging/requests.rs | 36 ++++++++++-- 2 files changed, 87 insertions(+), 6 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1f9521ed5c8a..daf12efab168 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -25,6 +25,8 @@ use polkadot_node_network_protocol::{ grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, Versioned, View, + request_response::Requests, + IfDisconnected, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -69,6 +71,8 @@ use groups::Groups; use requests::RequestManager; use statement_store::{StatementOrigin, StatementStore}; +pub use requests::UnhandledResponse; + mod candidates; mod cluster; mod grid; @@ -2301,3 +2305,56 @@ async fn apply_post_confirmation( state.request_manager.remove_for(candidate_hash); new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; } + +/// Dispatch pending requests for candidate data & statements. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn dispatch_requests( + ctx: &mut Context, + state: &mut State, +) { + let peers = &state.peers; + let peer_connected = |id: &_| peers.contains_key(id); + let seconded_mask = |identifier: &requests::CandidateIdentifier| { + let &requests::CandidateIdentifier { + relay_parent, candidate_hash, group_index, + } = identifier; + + let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; + let per_session = state.per_session.get(&relay_parent_state.session)?; + let group_size = per_session.groups.get(group_index).map(|x| x.len())?; + + let knowledge = local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ); + + // We request the opposite of what we know. + Some(!knowledge.seconded_in_group) + }; + + while let Some(request) = state.request_manager.next_request( + peer_connected, + seconded_mask, + ) { + // Peer is supposedly connected. + ctx.send_message(NetworkBridgeTxMessage::SendRequests( + vec![Requests::AttestedCandidateV2(request)], + IfDisconnected::ImmediateError, + )).await; + } +} + +/// Wait on the next incoming response. If there are no requests pending, this +/// future never resolves. It is the responsibility of the user of this API +/// to interrupt the future. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn receive_response( + state: &mut State, +) -> UnhandledResponse { + match state.request_manager.await_incoming().await { + Some(r) => r, + None => futures::future::pending().await, + } +} diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 155ff12c0c74..b4a2a1094051 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -248,20 +248,25 @@ impl RequestManager { /// This function accepts two closures as an argument. /// The first closure indicates whether a peer is still connected. /// The second closure is used to construct a mask for limiting the - /// `Seconded` statements the response is allowed to contain. + /// `Seconded` statements the response is allowed to contain. The mask + /// has `AND` semantics. pub fn next_request( &mut self, peer_connected: impl Fn(&PeerId) -> bool, - seconded_mask: impl Fn(&CandidateIdentifier) -> BitVec, + seconded_mask: impl Fn(&CandidateIdentifier) -> Option>, ) -> Option> { if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { return None } + let mut res = None; + // loop over all requests, in order of priority. // do some active maintenance of the connected peers. // dispatch the first request which is not in-flight already. - for (_priority, id) in &self.by_priority { + + let mut cleanup_outdated = Vec::new(); + for (i, (_priority, id)) in self.by_priority.iter().enumerate() { let entry = match self.requests.get_mut(&id) { None => { gum::error!( @@ -288,7 +293,14 @@ impl RequestManager { entry.known_by.push_back(recipient.clone()); - let seconded_mask = seconded_mask(&id); + let seconded_mask = match seconded_mask(&id) { + None => { + cleanup_outdated.push((i, id.clone())); + continue + } + Some(s) => s, + }; + let (request, response_fut) = OutgoingRequest::new( RequestRecipient::Peer(recipient.clone()), AttestedCandidateRequest { @@ -309,10 +321,22 @@ impl RequestManager { entry.in_flight = true; - return Some(request) + res = Some(request); + break; + } + + for (priority_index, identifier) in cleanup_outdated.into_iter().rev() { + self.by_priority.remove(priority_index); + self.requests.remove(&identifier); + if let HEntry::Occupied(mut e) = self.unique_identifiers.entry(identifier.candidate_hash) { + e.get_mut().remove(&identifier); + if e.get().is_empty() { + e.remove(); + } + } } - None + res } /// Await the next incoming response to a sent request, or immediately From 04fe29175e812785c8a2cc35d6f4c3b0941f336e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Feb 2023 22:55:41 -0600 Subject: [PATCH 145/220] fmt --- .../src/vstaging/mod.rs | 26 +++++++------------ .../src/vstaging/requests.rs | 8 +++--- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index daf12efab168..725c4318120b 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -24,9 +24,9 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, - vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, Versioned, View, request_response::Requests, - IfDisconnected, + vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -2308,16 +2308,12 @@ async fn apply_post_confirmation( /// Dispatch pending requests for candidate data & statements. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn dispatch_requests( - ctx: &mut Context, - state: &mut State, -) { +pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut State) { let peers = &state.peers; let peer_connected = |id: &_| peers.contains_key(id); let seconded_mask = |identifier: &requests::CandidateIdentifier| { - let &requests::CandidateIdentifier { - relay_parent, candidate_hash, group_index, - } = identifier; + let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = + identifier; let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; let per_session = state.per_session.get(&relay_parent_state.session)?; @@ -2334,15 +2330,13 @@ pub(crate) async fn dispatch_requests( Some(!knowledge.seconded_in_group) }; - while let Some(request) = state.request_manager.next_request( - peer_connected, - seconded_mask, - ) { + while let Some(request) = state.request_manager.next_request(peer_connected, seconded_mask) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( vec![Requests::AttestedCandidateV2(request)], IfDisconnected::ImmediateError, - )).await; + )) + .await; } } @@ -2350,9 +2344,7 @@ pub(crate) async fn dispatch_requests( /// future never resolves. It is the responsibility of the user of this API /// to interrupt the future. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn receive_response( - state: &mut State, -) -> UnhandledResponse { +pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { match state.request_manager.await_incoming().await { Some(r) => r, None => futures::future::pending().await, diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index b4a2a1094051..37ab04183e98 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -297,7 +297,7 @@ impl RequestManager { None => { cleanup_outdated.push((i, id.clone())); continue - } + }, Some(s) => s, }; @@ -322,13 +322,15 @@ impl RequestManager { entry.in_flight = true; res = Some(request); - break; + break } for (priority_index, identifier) in cleanup_outdated.into_iter().rev() { self.by_priority.remove(priority_index); self.requests.remove(&identifier); - if let HEntry::Occupied(mut e) = self.unique_identifiers.entry(identifier.candidate_hash) { + if let HEntry::Occupied(mut e) = + self.unique_identifiers.entry(identifier.candidate_hash) + { e.get_mut().remove(&identifier); if e.get().is_empty() { e.remove(); From a69372796439b64a61ca9f858b6055f290506f98 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Feb 2023 22:57:10 -0600 Subject: [PATCH 146/220] skeleton for response receiving --- .../statement-distribution/src/vstaging/mod.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 725c4318120b..fa7e0e8f180e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2343,10 +2343,23 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St /// Wait on the next incoming response. If there are no requests pending, this /// future never resolves. It is the responsibility of the user of this API /// to interrupt the future. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { match state.request_manager.await_incoming().await { Some(r) => r, None => futures::future::pending().await, } } + +/// Handles an incoming response. This does the actual work of validating the response, +/// importing statements, sending acknowledgements, etc. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn receive_response( + ctx: &mut Context, + state: &mut State, +) -> UnhandledResponse { + // 1. handle response + // 1. send acknowledgement & needed statements. + // 1. import statements into statement store + // 1. `circulate_statement` for all fresh statements (also, `learned_fresh_statement`) + // 1. if includable, send fresh statements to backing. +} From 3a7864f5d7ce1da2e57ba4e2d4c5fec23348f510 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 7 Feb 2023 18:46:50 +0100 Subject: [PATCH 147/220] Async backing statement distribution: cluster tests (#6678) * Add `pending_statements_set_when_receiving_fresh_statements` * Add `pending_statements_updated_when_sending_statements` test --- .../src/vstaging/cluster.rs | 352 +++++++++++++++++- 1 file changed, 343 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index 26829feb03f6..922dbbbb8f53 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -49,7 +49,7 @@ //! on fire. Nevertheless, we handle the case here to ensure that the behavior of the //! system is well-defined even if an adversary is willing to be slashed. //! -//! More concretely, this module exposes a "ClusterTracker" utility which allows us to determine +//! More concretely, this module exposes a [`ClusterTracker`] utility which allows us to determine //! whether to accept or reject messages from other validators in the same group as we //! are in, based on _the most charitable possible interpretation of our protocol rules_, //! and to keep track of what we have sent to other validators in the group and what we may @@ -597,7 +597,7 @@ mod tests { let seconding_limit = 2; let hash_a = CandidateHash(Hash::repeat_byte(1)); - let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let mut tracker = ClusterTracker::new(group, seconding_limit).expect("not empty"); tracker.note_received( ValidatorIndex(5), @@ -637,7 +637,7 @@ mod tests { let seconding_limit = 2; - let tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let tracker = ClusterTracker::new(group, seconding_limit).expect("not empty"); let hash_a = CandidateHash(Hash::repeat_byte(1)); @@ -850,12 +850,346 @@ mod tests { ); } - // TODO [now]: test that `pending_statements` are set whenever we receive - // a fresh statement. + // Test that the `pending_statements` are set whenever we receive a fresh statement. + // + // Also test that pending statements are sorted, with `Seconded` statements in the front. + #[test] + fn pending_statements_set_when_receiving_fresh_statements() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + + // Receive a 'Seconded' statement for candidate A. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!(tracker.pending_statements_for(ValidatorIndex(200)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + } + + // Receive a 'Valid' statement for candidate A. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + // We have to see that the candidate is known by the sender, e.g. we sent them 'Seconded' + // above. + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + } + + // Receive a 'Seconded' statement for candidate B. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(146), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(146), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![ + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)), + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)) + ], + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)), + ] + ); + } + } + + // Test that the `pending_statements` are updated when we send or receive statements from others + // in the cluster. + #[test] + fn pending_statements_updated_when_sending_statements() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; - // TODO [now]: test the `pending_statements` are updated when we send or receive - // statements from others in the cluster. + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); - // TODO [now]: test that pending statements are sorted, with `Seconded` statements - // in the front. + // Receive a 'Seconded' statement for candidate A. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!(tracker.pending_statements_for(ValidatorIndex(200)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + } + + // Receive a 'Valid' statement for candidate B. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b), + ); + + // We have to see the candidate is known by the sender, e.g. we sent them 'Seconded'. + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + + // Send a 'Seconded' statement. + { + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + + // Send a 'Valid' statement. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b), + ); + + // We have to see that the candidate is known by the sender, e.g. we sent them 'Seconded' + // above. + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ); + + // Pending statements should be updated. + assert_eq!(tracker.pending_statements_for(ValidatorIndex(5)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + } } From 1cd0ae0c120965f404254d13115af6cf5b1bc2c0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Feb 2023 23:04:16 -0700 Subject: [PATCH 148/220] fix up --- .../src/vstaging/mod.rs | 66 +++++++++++++++++-- .../src/vstaging/requests.rs | 14 ++-- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index fa7e0e8f180e..e74e74a12350 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2353,13 +2353,65 @@ pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { /// Handles an incoming response. This does the actual work of validating the response, /// importing statements, sending acknowledgements, etc. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn receive_response( +pub(crate) async fn handle_response<'a, Context>( ctx: &mut Context, state: &mut State, -) -> UnhandledResponse { - // 1. handle response - // 1. send acknowledgement & needed statements. - // 1. import statements into statement store - // 1. `circulate_statement` for all fresh statements (also, `learned_fresh_statement`) - // 1. if includable, send fresh statements to backing. + response: UnhandledResponse<'a>, // TODO [now]: needs to be altered as this borrows `State` in practice. +) { + let &requests::CandidateIdentifier { + relay_parent, + candidate_hash, + group_index, + } = response.candidate_identifier(); + + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let group = match per_session.groups.get(group_index) { + None => return, + Some(g) => g, + }; + + let res = response.validate_response( + group, + relay_parent_state.session, + |v| per_session.session_info.validators.get(v).map(|x| x.clone()), + |para, g_index| { + let expected_group = group_for_para( + &relay_parent_state.availability_cores, + &relay_parent_state.group_rotation_info, + para, + ); + + Some(g_index) == expected_group + } + ); + + for (peer, rep) in res.reputation_changes { + report_peer(ctx.sender(), peer, rep).await; + } + + let (candidate, pvd, statements) = match res.request_status { + requests::CandidateRequestStatus::Outdated => return, + requests::CandidateRequestStatus::Incomplete => return, + requests::CandidateRequestStatus::Complete { + candidate, + persisted_validation_data, + statements, + } => (candidate, persisted_validation_data, statements), + }; + + // TODO [now] + // - send acknowledgement & statements desired by counterparty + // - import statements into statement store + // - clean up other requests if confirmed. + // - `circulate_statement` for all fresh statements (also, `learned_fresh_statement`) + // - if includable, send fresh statements to backing. } diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 37ab04183e98..d51fe28f370e 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -384,7 +384,7 @@ impl<'a> UnhandledResponse<'a> { self, group: &[ValidatorIndex], session: SessionIndex, - validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, + validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { let UnhandledResponse { @@ -478,7 +478,7 @@ fn validate_complete_response( mut sent_seconded_bitmask: BitVec, group: &[ValidatorIndex], session: SessionIndex, - validator_key_lookup: impl Fn(ValidatorIndex) -> ValidatorId, + validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { // sanity check bitmask size. this is based entirely on @@ -583,8 +583,14 @@ fn validate_complete_response( }, } - let validator_public = - validator_key_lookup(unchecked_statement.unchecked_validator_index()); + let validator_public = match validator_key_lookup(unchecked_statement.unchecked_validator_index()) { + None => { + rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); + continue + } + Some(p) => p, + }; + let checked_statement = match unchecked_statement.try_into_checked(&signing_context, &validator_public) { Err(_) => { From dc1ae581f81050cc8d89d987b87cf58811094db6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Feb 2023 23:04:22 -0700 Subject: [PATCH 149/220] fmt --- .../statement-distribution/src/vstaging/mod.rs | 9 +++------ .../src/vstaging/requests.rs | 15 ++++++++------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index e74e74a12350..4296ef981f67 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2358,11 +2358,8 @@ pub(crate) async fn handle_response<'a, Context>( state: &mut State, response: UnhandledResponse<'a>, // TODO [now]: needs to be altered as this borrows `State` in practice. ) { - let &requests::CandidateIdentifier { - relay_parent, - candidate_hash, - group_index, - } = response.candidate_identifier(); + let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = + response.candidate_identifier(); let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { None => return, @@ -2391,7 +2388,7 @@ pub(crate) async fn handle_response<'a, Context>( ); Some(g_index) == expected_group - } + }, ); for (peer, rep) in res.reputation_changes { diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index d51fe28f370e..49c511da1617 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -583,13 +583,14 @@ fn validate_complete_response( }, } - let validator_public = match validator_key_lookup(unchecked_statement.unchecked_validator_index()) { - None => { - rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); - continue - } - Some(p) => p, - }; + let validator_public = + match validator_key_lookup(unchecked_statement.unchecked_validator_index()) { + None => { + rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); + continue + }, + Some(p) => p, + }; let checked_statement = match unchecked_statement.try_into_checked(&signing_context, &validator_public) { From e745a60b482b9d8dba23a57f70d8ac4f0c3ce942 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Feb 2023 23:04:50 -0700 Subject: [PATCH 150/220] update TODO --- node/network/statement-distribution/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index e07c78a52639..f4e4dde9a185 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -148,7 +148,7 @@ impl StatementDistributionSubsystem { ) .map_err(FatalError::SpawnTask)?; - // TODO [now]: handle vstaging req/res: dispatch pending statements & handling responses. + // TODO [now]: handle vstaging req/res: dispatch pending requests & handling responses. loop { let message = From 040d877780b2f4096c12b35596732b192f671c50 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Feb 2023 17:50:38 -0700 Subject: [PATCH 151/220] rework seconded mask in requests --- .../src/vstaging/grid.rs | 49 +++++++-- .../src/vstaging/mod.rs | 55 +++++++---- .../src/vstaging/requests.rs | 99 +++++++++++++++---- 3 files changed, 158 insertions(+), 45 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index f6b81f2be6ba..3936c8bed231 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -258,14 +258,13 @@ impl GridTracker { return Err(ManifestImportError::Malformed) } + let remote_knowledge = StatementFilter { + seconded_in_group: manifest.seconded_in_group.clone(), + validated_in_group: manifest.validated_in_group.clone(), + }; + // ensure votes are sufficient to back. - let votes = manifest - .seconded_in_group - .iter() - .by_vals() - .zip(manifest.validated_in_group.iter().by_vals()) - .filter(|&(s, v)| s || v) // no double-counting - .count(); + let votes = remote_knowledge.backing_validators(); if votes < backing_threshold { return Err(ManifestImportError::Malformed) @@ -605,6 +604,15 @@ impl GridTracker { } } } + + /// Get the advertised statement filter of a validator for a candidate. + pub fn advertised_statements( + &self, + validator: ValidatorIndex, + candidate_hash: &CandidateHash, + ) -> Option { + self.received.get(&validator)?.candidate_statement_filter(candidate_hash) + } } fn extract_statement_and_group_info( @@ -844,6 +852,33 @@ impl StatementFilter { } } + /// Determine the number of backing validators in the statement filter. + pub fn backing_validators(&self) -> usize { + self.seconded_in_group + .iter() + .by_vals() + .zip(self.validated_in_group.iter().by_vals()) + .filter(|&(s, v)| s || v) // no double-counting + .count() + } + + /// Whether the statement filter has at least one seconded statement. + pub fn has_seconded(&self) -> bool { + self.seconded_in_group.iter().by_vals().any(|x| x) + } + + /// Mask out `seconded` statements in `self` according to the provided + /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. + pub fn mask_seconded(&mut self, mask: &BitSlice) { + for (mut x, mask) in self + .seconded_in_group + .iter_mut() + .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) + { + *x = *x && mask; + } + } + fn contains(&self, index: usize, statement_kind: StatementKind) -> bool { match statement_kind { StatementKind::Seconded => self.seconded_in_group.get(index).map_or(false, |x| *x), diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 4296ef981f67..d235e652ce08 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -68,10 +68,10 @@ use candidates::{BadAdvertisement, Candidates, PostConfirmation}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; use grid::{GridTracker, ManifestSummary, StatementFilter}; use groups::Groups; -use requests::RequestManager; +use requests::CandidateIdentifier; use statement_store::{StatementOrigin, StatementStore}; -pub use requests::UnhandledResponse; +pub use requests::{UnhandledResponse, RequestManager}; mod candidates; mod cluster; @@ -2310,27 +2310,48 @@ async fn apply_post_confirmation( #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut State) { let peers = &state.peers; - let peer_connected = |id: &_| peers.contains_key(id); - let seconded_mask = |identifier: &requests::CandidateIdentifier| { - let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = - identifier; + let peer_advertised = |identifier: &CandidateIdentifier, peer: &_| { + let peer_data = peers.get(peer)?; - let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; + let relay_parent_state = state.per_relay_parent.get(&identifier.relay_parent)?; let per_session = state.per_session.get(&relay_parent_state.session)?; - let group_size = per_session.groups.get(group_index).map(|x| x.len())?; - let knowledge = local_knowledge_filter( - group_size, - group_index, - candidate_hash, - &relay_parent_state.statement_store, - ); + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session.authority_lookup.get(a) + }) { + let filter = relay_parent_state + .local_validator + .as_ref()? + .grid_tracker + .advertised_statements(validator_id, &identifier.candidate_hash); + + if let Some(f) = filter { + return Some(f) + } + } + + None + }; + let seconded_mask = |identifier: &CandidateIdentifier| { + let &CandidateIdentifier { relay_parent, candidate_hash, group_index } = identifier; + + let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; + let per_session = state.per_session.get(&relay_parent_state.session)?; + let group = per_session.groups.get(group_index)?; + let seconding_limit = relay_parent_state.seconding_limit; + + // Request nothing which would be an 'over-seconded' statement. + let mut seconded_mask = bitvec::vec::BitVec::repeat(false, group.len()); + for (i, v) in group.iter().enumerate() { + if relay_parent_state.statement_store.seconded_count(v) >= seconding_limit { + seconded_mask.set(i, true); + } + } - // We request the opposite of what we know. - Some(!knowledge.seconded_in_group) + Some((seconded_mask, polkadot_node_primitives::minimum_votes(group.len()))) }; - while let Some(request) = state.request_manager.next_request(peer_connected, seconded_mask) { + while let Some(request) = state.request_manager.next_request(seconded_mask, peer_advertised) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( vec![Requests::AttestedCandidateV2(request)], diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 49c511da1617..b1634e723046 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -27,8 +27,9 @@ //! not owned by the request manager). use super::{ - BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, - COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, + grid::StatementFilter, BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, + COST_IMPROPERLY_DECODED_RESPONSE, COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, + COST_UNREQUESTED_RESPONSE_STATEMENT, }; use crate::LOG_TARGET; @@ -46,7 +47,7 @@ use polkadot_primitives::vstaging::{ ValidatorIndex, }; -use bitvec::{order::Lsb0, vec::BitVec}; +use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; use std::{ @@ -245,15 +246,21 @@ impl RequestManager { /// Yields the next request to dispatch, if there is any. /// - /// This function accepts two closures as an argument. - /// The first closure indicates whether a peer is still connected. - /// The second closure is used to construct a mask for limiting the + /// This function accepts three closures as an argument. + /// + /// The first closure is used to construct a mask for limiting the /// `Seconded` statements the response is allowed to contain. The mask - /// has `AND` semantics. + /// has `OR` semantics: seconded statements by validators corresponding to bits in the mask + /// are not desired. It also returns the required backing threshold + /// for the candidate. + /// + /// The second closure is used to determine the specific advertised + /// statements by a peer, to be compared against the mask and backing + /// threshold and returns `None` if the peer is no longer connected. pub fn next_request( &mut self, - peer_connected: impl Fn(&PeerId) -> bool, - seconded_mask: impl Fn(&CandidateIdentifier) -> Option>, + seconded_mask: impl Fn(&CandidateIdentifier) -> Option<(BitVec, usize)>, + peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option> { if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { return None @@ -284,16 +291,7 @@ impl RequestManager { continue } - entry.known_by.retain(&peer_connected); - - let recipient = match entry.known_by.pop_front() { - None => continue, // no peers. - Some(r) => r, - }; - - entry.known_by.push_back(recipient.clone()); - - let seconded_mask = match seconded_mask(&id) { + let (seconded_mask, backing_threshold) = match seconded_mask(&id) { None => { cleanup_outdated.push((i, id.clone())); continue @@ -301,8 +299,26 @@ impl RequestManager { Some(s) => s, }; + if seconded_mask.count_ones() == seconded_mask.len() { + // If higher-level code doesn't want any new statements from + // any group validators, this request is moot. + cleanup_outdated.push((i, id.clone())); + continue + } + + let target = match find_request_target_with_update( + &mut entry.known_by, + id, + &*seconded_mask, + backing_threshold, + &peer_advertised, + ) { + None => continue, + Some(t) => t, + }; + let (request, response_fut) = OutgoingRequest::new( - RequestRecipient::Peer(recipient.clone()), + RequestRecipient::Peer(target.clone()), AttestedCandidateRequest { candidate_hash: id.candidate_hash, seconded_mask: seconded_mask.clone(), @@ -313,7 +329,7 @@ impl RequestManager { self.pending_responses.push(Box::pin(async move { TaggedResponse { identifier: stored_id, - requested_peer: recipient, + requested_peer: target, seconded_mask, response: response_fut.await, } @@ -351,6 +367,47 @@ impl RequestManager { } } +/// Finds a valid request target, returning `None` if none exists. +/// Cleans up disconnected peers and places the returned peer at the back of the queue. +fn find_request_target_with_update( + known_by: &mut VecDeque, + candidate_identifier: &CandidateIdentifier, + seconded_mask: &BitSlice, + backing_threshold: usize, + peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, +) -> Option { + let mut prune = Vec::new(); + let mut target = None; + for (i, p) in known_by.iter().enumerate() { + let mut filter = match peer_advertised(candidate_identifier, p) { + None => { + prune.push(i); + continue + }, + Some(f) => f, + }; + + filter.mask_seconded(seconded_mask); + if filter.has_seconded() && filter.backing_validators() >= backing_threshold { + target = Some((i, p.clone())); + break + } + } + + let prune_count = prune.len(); + for i in prune { + known_by.remove(i); + } + + if let Some((i, p)) = target { + known_by.remove(i - prune_count); + known_by.push_back(p.clone()); + Some(p) + } else { + None + } +} + /// A response to a request, which has not yet been handled. pub struct UnhandledResponse<'a> { manager: &'a mut RequestManager, From 6269f6c60c450f425faa85b23ee0b0866d36f2a4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Feb 2023 17:51:07 -0700 Subject: [PATCH 152/220] change doc --- node/network/protocol/src/request_response/vstaging.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index 819bc9881af4..a7beb112a097 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -31,14 +31,14 @@ use super::{IsRequest, Protocol}; pub struct AttestedCandidateRequest { /// Hash of the candidate we want to request. pub candidate_hash: CandidateHash, - /// bitfield with 'AND' semantics, indicating which validators - /// to send `Seconded` statements for. + /// bitfield with 'OR' semantics, indicating which validators + /// not to send `Seconded` statements for. /// /// The mask must have exactly the minimum size required to /// fit all validators from the backing group. /// - /// The response may not contain any `Seconded` statements outside - /// of this mask. + /// The response may not contain any `Seconded` statements from validators + /// within this mask. pub seconded_mask: BitVec, } From 43ef9278f539786ff6fc29cdf456b74f351c337c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Feb 2023 17:56:00 -0700 Subject: [PATCH 153/220] change unhandledresponse not to borrow request manager --- node/network/statement-distribution/src/vstaging/mod.rs | 5 +++-- .../statement-distribution/src/vstaging/requests.rs | 9 ++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index d235e652ce08..e500ca16127d 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -71,7 +71,7 @@ use groups::Groups; use requests::CandidateIdentifier; use statement_store::{StatementOrigin, StatementStore}; -pub use requests::{UnhandledResponse, RequestManager}; +pub use requests::{RequestManager, UnhandledResponse}; mod candidates; mod cluster; @@ -2377,7 +2377,7 @@ pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { pub(crate) async fn handle_response<'a, Context>( ctx: &mut Context, state: &mut State, - response: UnhandledResponse<'a>, // TODO [now]: needs to be altered as this borrows `State` in practice. + response: UnhandledResponse, ) { let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); @@ -2398,6 +2398,7 @@ pub(crate) async fn handle_response<'a, Context>( }; let res = response.validate_response( + &mut state.request_manager, group, relay_parent_state.session, |v| per_session.session_info.validators.get(v).map(|x| x.clone()), diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index b1634e723046..1e30780abd1b 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -362,7 +362,7 @@ impl RequestManager { pub async fn await_incoming(&mut self) -> Option { match self.pending_responses.next().await { None => None, - Some(response) => Some(UnhandledResponse { manager: self, response }), + Some(response) => Some(UnhandledResponse { response }), } } } @@ -409,12 +409,11 @@ fn find_request_target_with_update( } /// A response to a request, which has not yet been handled. -pub struct UnhandledResponse<'a> { - manager: &'a mut RequestManager, +pub struct UnhandledResponse { response: TaggedResponse, } -impl<'a> UnhandledResponse<'a> { +impl UnhandledResponse { /// Get the candidate identifier which the corresponding request /// was classified under. pub fn candidate_identifier(&self) -> &CandidateIdentifier { @@ -439,13 +438,13 @@ impl<'a> UnhandledResponse<'a> { /// will not be queried except for validator indices in the group. pub fn validate_response( self, + manager: &mut RequestManager, group: &[ValidatorIndex], session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { let UnhandledResponse { - manager, response: TaggedResponse { identifier, requested_peer, response, seconded_mask }, } = self; From 8563f320e3c18fc6ea081bf187796630302393fc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Feb 2023 18:16:25 -0700 Subject: [PATCH 154/220] only accept responses sufficient to back --- .../src/vstaging/mod.rs | 9 +++- .../src/vstaging/requests.rs | 41 +++++++++++++------ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index e500ca16127d..3b4cffbc7373 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2428,9 +2428,14 @@ pub(crate) async fn handle_response<'a, Context>( }; // TODO [now] - // - send acknowledgement & statements desired by counterparty // - import statements into statement store // - clean up other requests if confirmed. - // - `circulate_statement` for all fresh statements (also, `learned_fresh_statement`) // - if includable, send fresh statements to backing. + // we don't need to send acknowledgement yet because + // 1. the candidate is not known yet, so cannot be backed + // 2. providing the statements to backing will lead to 'Backed' message. + // 3. on 'Backed' we will send acknowledgements/follow up statements when this becomes + // includable. } + +// TODO [now]: answer request. diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 1e30780abd1b..a6a8710900bc 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -80,6 +80,7 @@ struct TaggedResponse { identifier: CandidateIdentifier, requested_peer: PeerId, seconded_mask: BitVec, + backing_threshold: usize, response: OutgoingResult, } @@ -331,6 +332,7 @@ impl RequestManager { identifier: stored_id, requested_peer: target, seconded_mask, + backing_threshold, response: response_fut.await, } })); @@ -424,6 +426,9 @@ impl UnhandledResponse { /// candidate, the [`PersistedValidationData`] of the candidate, and requested /// checked statements. /// + /// Valid responses are defined as those which provide a valid candidate + /// and signatures which match the identifier, and provide enough statements to back the candidate. + /// /// This will also produce a record of misbehaviors by peers: /// * If the response is partially valid, misbehavior by the responding peer. /// * If there are other peers which have advertised the same candidate for different @@ -445,7 +450,14 @@ impl UnhandledResponse { allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { let UnhandledResponse { - response: TaggedResponse { identifier, requested_peer, response, seconded_mask }, + response: + TaggedResponse { + identifier, + requested_peer, + backing_threshold, + response, + seconded_mask, + }, } = self; // handle races if the candidate is no longer known. @@ -510,6 +522,7 @@ impl UnhandledResponse { let mut output = validate_complete_response( &identifier, + backing_threshold, complete_response, requested_peer, seconded_mask, @@ -529,6 +542,7 @@ impl UnhandledResponse { fn validate_complete_response( identifier: &CandidateIdentifier, + backing_threshold: usize, response: AttestedCandidateResponse, requested_peer: PeerId, mut sent_seconded_bitmask: BitVec, @@ -588,8 +602,7 @@ fn validate_complete_response( let mut statements = Vec::with_capacity(std::cmp::min(response.statements.len(), group.len() * 2)); - let mut received_seconded = BitVec::::repeat(false, group.len()); - let mut received_valid = BitVec::::repeat(false, group.len()); + let mut received_filter = StatementFilter::new(group.len()); let index_in_group = |v: ValidatorIndex| group.iter().position(|x| &v == x); @@ -619,20 +632,20 @@ fn validate_complete_response( // duplicate trackers have the correct size for the group. match unchecked_statement.unchecked_payload() { CompactStatement::Seconded(_) => { - if !sent_seconded_bitmask[i] { + if sent_seconded_bitmask[i] { rep_changes .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } - if received_seconded[i] { + if received_filter.seconded_in_group[i] { rep_changes .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } }, CompactStatement::Valid(_) => - if received_valid[i] { + if received_filter.validated_in_group[i] { rep_changes .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); continue @@ -659,10 +672,10 @@ fn validate_complete_response( match checked_statement.payload() { CompactStatement::Seconded(_) => { - received_seconded.set(i, true); + received_filter.seconded_in_group.set(i, true); }, CompactStatement::Valid(_) => { - received_valid.set(i, true); + received_filter.validated_in_group.set(i, true); }, } @@ -670,6 +683,13 @@ fn validate_complete_response( rep_changes.push((requested_peer.clone(), BENEFIT_VALID_STATEMENT)); } + // Only accept responses which can back the candidate. + if !received_filter.has_seconded() || + received_filter.backing_validators() < backing_threshold + { + return invalid_candidate_output() + } + statements }; @@ -693,10 +713,7 @@ pub enum CandidateRequestStatus { /// The response either did not arrive or was invalid. Incomplete, /// The response completed the request. Statements sent beyond the - /// mask have been ignored. More statements which may have been - /// expected may not be present, and higher-level code should - /// evaluate whether the candidate is still worth storing and whether - /// the sender should be punished. + /// mask have been ignored. Complete { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, From 72394c4e7c9ea45c52f1956b31a0cf6b705a454f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Feb 2023 18:35:18 -0700 Subject: [PATCH 155/220] finish implementing response handling --- .../src/vstaging/candidates.rs | 2 +- .../src/vstaging/mod.rs | 134 +++++++++++++----- 2 files changed, 96 insertions(+), 40 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 25eb1037d868..9d7a0550bb0a 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -139,7 +139,7 @@ impl Candidates { } /// Note that a candidate has been confirmed. If the candidate has just been - /// confirmed, then + /// confirmed, then this returns `Some`. Otherwise, `None`. /// /// This does no sanity-checking of input data, and will overwrite /// already-confirmed canidates. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 3b4cffbc7373..a5872fd0a2e9 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2325,6 +2325,8 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St .grid_tracker .advertised_statements(validator_id, &identifier.candidate_hash); + // TODO [now]: this doesn't handle requests in the cluster properly. + if let Some(f) = filter { return Some(f) } @@ -2382,6 +2384,86 @@ pub(crate) async fn handle_response<'a, Context>( let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); + let post_confirmation = { + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let group = match per_session.groups.get(group_index) { + None => return, + Some(g) => g, + }; + + let res = response.validate_response( + &mut state.request_manager, + group, + relay_parent_state.session, + |v| per_session.session_info.validators.get(v).map(|x| x.clone()), + |para, g_index| { + let expected_group = group_for_para( + &relay_parent_state.availability_cores, + &relay_parent_state.group_rotation_info, + para, + ); + + Some(g_index) == expected_group + }, + ); + + for (peer, rep) in res.reputation_changes { + report_peer(ctx.sender(), peer, rep).await; + } + + let (candidate, pvd, statements) = match res.request_status { + requests::CandidateRequestStatus::Outdated => return, + requests::CandidateRequestStatus::Incomplete => return, + requests::CandidateRequestStatus::Complete { + candidate, + persisted_validation_data, + statements, + } => (candidate, persisted_validation_data, statements), + }; + + for statement in statements { + let _ = relay_parent_state.statement_store.insert( + &per_session.groups, + statement, + StatementOrigin::Remote, + ); + } + + if let Some(post_confirmation) = + state.candidates.confirm_candidate(candidate_hash, candidate, pvd, group_index) + { + post_confirmation + } else { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Candidate re-confirmed by request/response: logic error", + ); + + return + } + }; + + apply_post_confirmation(ctx, state, post_confirmation).await; + + let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); + + // Although the candidate is confirmed, it isn't yet on the + // hypothetical frontier of the fragment tree. Later, when it is, + // we will import statements. + if !confirmed.is_importable(None) { + return + } + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { None => return, Some(s) => s, @@ -2392,47 +2474,21 @@ pub(crate) async fn handle_response<'a, Context>( Some(s) => s, }; - let group = match per_session.groups.get(group_index) { - None => return, - Some(g) => g, - }; - - let res = response.validate_response( - &mut state.request_manager, - group, - relay_parent_state.session, - |v| per_session.session_info.validators.get(v).map(|x| x.clone()), - |para, g_index| { - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para, - ); - - Some(g_index) == expected_group - }, - ); - - for (peer, rep) in res.reputation_changes { - report_peer(ctx.sender(), peer, rep).await; - } - - let (candidate, pvd, statements) = match res.request_status { - requests::CandidateRequestStatus::Outdated => return, - requests::CandidateRequestStatus::Incomplete => return, - requests::CandidateRequestStatus::Complete { - candidate, - persisted_validation_data, - statements, - } => (candidate, persisted_validation_data, statements), - }; + send_backing_fresh_statements( + ctx, + candidate_hash, + group_index, + &relay_parent, + relay_parent_state, + confirmed, + per_session, + ) + .await; - // TODO [now] - // - import statements into statement store - // - clean up other requests if confirmed. - // - if includable, send fresh statements to backing. // we don't need to send acknowledgement yet because - // 1. the candidate is not known yet, so cannot be backed + // 1. the candidate is not known yet, so cannot be backed. + // any previous confirmation is a bug, because `apply_post_confirmation` is meant to + // clear requests. // 2. providing the statements to backing will lead to 'Backed' message. // 3. on 'Backed' we will send acknowledgements/follow up statements when this becomes // includable. From 9d9770d879d62d43011c22275ff9fa78e8594230 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Feb 2023 13:34:11 -0700 Subject: [PATCH 156/220] extract statement filter to protocol crate --- node/network/protocol/src/lib.rs | 107 +++++++++++++----- .../protocol/src/request_response/vstaging.rs | 13 +-- 2 files changed, 84 insertions(+), 36 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 44100d4b1faf..be07fd794249 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -589,7 +589,7 @@ pub mod v1 { /// vstaging network protocol types. pub mod vstaging { - use bitvec::vec::BitVec; + use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ @@ -610,6 +610,71 @@ pub mod vstaging { Bitfield(Hash, UncheckedSignedAvailabilityBitfield), } + /// Bitfields indicating the statements that are known or undesired + /// about a candidate. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct StatementFilter { + /// Seconded statements. '1' is known or undesired. + pub seconded_in_group: BitVec, + /// Valid statements. '1' is known or undesired. + pub validated_in_group: BitVec, + } + + impl StatementFilter { + /// Create a new filter with the given group size. + pub fn new(group_size: usize) -> Self { + StatementFilter { + seconded_in_group: BitVec::repeat(false, group_size), + validated_in_group: BitVec::repeat(false, group_size), + } + } + + /// Whether the filter has a specific expected length, consistent across both + /// bitfields. + pub fn has_len(&self, len: usize) -> bool { + self.seconded_in_group.len() == len && self.validated_in_group.len() == len + } + + /// Determine the number of backing validators in the statement filter. + pub fn backing_validators(&self) -> usize { + self.seconded_in_group + .iter() + .by_vals() + .zip(self.validated_in_group.iter().by_vals()) + .filter(|&(s, v)| s || v) // no double-counting + .count() + } + + /// Whether the statement filter has at least one seconded statement. + pub fn has_seconded(&self) -> bool { + self.seconded_in_group.iter().by_vals().any(|x| x) + } + + /// Mask out `Seconded` statements in `self` according to the provided + /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. + pub fn mask_seconded(&mut self, mask: &BitSlice) { + for (mut x, mask) in self + .seconded_in_group + .iter_mut() + .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) + { + *x = *x && mask; + } + } + + /// Mask out `Valid1 statements in `self` according to the provided + /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. + pub fn mask_valid(&mut self, mask: &BitSlice) { + for (mut x, mask) in self + .validated_in_group + .iter_mut() + .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) + { + *x = *x && mask; + } + } + } + /// An inventory of a backed candidate, which can be requested. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub struct BackedCandidateManifest { @@ -625,22 +690,14 @@ pub mod vstaging { pub para_id: ParaId, /// The head-data corresponding to the candidate. pub parent_head_data_hash: Hash, - /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have validated this candidate - /// and issued `Seconded` statements about it. - /// - /// This MUST have exactly the minimum amount of bytes - /// necessary to represent the number of validators in the - /// assigned backing group as-of the relay-parent. - pub seconded_in_group: BitVec, - /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have validated this candidate - /// and issued `Valid` statements about it. + /// A statement filter which indicates which validators in the + /// para's group at the relay-parent have validated this candidate + /// and issued statements about it, to the advertiser's knowledge. /// /// This MUST have exactly the minimum amount of bytes - /// necessary to represent the number of validators in the - /// assigned backing group as-of the relay-parent. - pub validated_in_group: BitVec, + /// necessary to represent the number of validators in the assigned + /// backing group as-of the relay-parent. + pub statement_knowledge: StatementFilter, } /// An acknowledgement of a backed candidate being known. @@ -648,22 +705,14 @@ pub mod vstaging { pub struct BackedCandidateAcknowledgement { /// The hash of the candidate. pub candidate_hash: CandidateHash, - /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have validated this candidate - /// and issued `Seconded` statements about it. - /// - /// This MUST have exactly the minimum amount of bytes - /// necessary to represent the number of validators in the - /// assigned backing group as-of the relay-parent. - pub seconded_in_group: BitVec, - /// A bitfield which indicates which validators in the para's - /// group at the relay-parent have validated this candidate - /// and issued `Valid` statements about it. + /// A statement filter which indicates which validators in the + /// para's group at the relay-parent have validated this candidate + /// and issued statements about it, to the advertiser's knowledge. /// /// This MUST have exactly the minimum amount of bytes - /// necessary to represent the number of validators in the - /// assigned backing group as-of the relay-parent. - pub validated_in_group: BitVec, + /// necessary to represent the number of validators in the assigned + /// backing group as-of the relay-parent. + pub statement_knowledge: StatementFilter, } /// Network messages used by the statement distribution subsystem. diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index a7beb112a097..8bf34b3cc8e9 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -18,28 +18,27 @@ use parity_scale_codec::{Decode, Encode}; -use bitvec::{order::Lsb0, vec::BitVec}; use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, UncheckedSignedStatement, }; use super::{IsRequest, Protocol}; +use crate::vstaging::StatementFilter; /// Request a candidate with statements. #[derive(Debug, Clone, Encode, Decode)] pub struct AttestedCandidateRequest { /// Hash of the candidate we want to request. pub candidate_hash: CandidateHash, - /// bitfield with 'OR' semantics, indicating which validators - /// not to send `Seconded` statements for. + /// Statement filter with 'OR' semantics, indicating which validators + /// not to send statements for. /// - /// The mask must have exactly the minimum size required to + /// The filter must have exactly the minimum size required to /// fit all validators from the backing group. /// - /// The response may not contain any `Seconded` statements from validators - /// within this mask. - pub seconded_mask: BitVec, + /// The response may not contain any statements masked out by this mask. + pub mask: StatementFilter, } /// Response to an `AttestedCandidateRequest`. From 784c1e75507f1a028d5327b9d1a052ff6b01d2b4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Feb 2023 13:46:17 -0700 Subject: [PATCH 157/220] rework requests: use statement filter in network protocol --- .../src/vstaging/grid.rs | 103 +++++------------ .../src/vstaging/mod.rs | 43 +++---- .../src/vstaging/requests.rs | 107 ++++++++++-------- .../src/vstaging/statement_store.rs | 3 +- 4 files changed, 110 insertions(+), 146 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 3936c8bed231..cae653a2e955 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -17,7 +17,9 @@ //! Utilities for handling distribution of backed candidates along //! the grid. -use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, PeerId}; +use polkadot_node_network_protocol::{ + grid_topology::SessionGridTopology, vstaging::StatementFilter, PeerId, +}; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, }; @@ -248,21 +250,16 @@ impl GridTracker { None => return Err(ManifestImportError::Malformed), }; - if manifest.seconded_in_group.len() != group_size || - manifest.validated_in_group.len() != group_size - { + let remote_knowledge = manifest.statement_knowledge.clone(); + + if !remote_knowledge.has_len(group_size) { return Err(ManifestImportError::Malformed) } - if manifest.seconded_in_group.count_ones() == 0 { + if !remote_knowledge.has_seconded() { return Err(ManifestImportError::Malformed) } - let remote_knowledge = StatementFilter { - seconded_in_group: manifest.seconded_in_group.clone(), - validated_in_group: manifest.validated_in_group.clone(), - }; - // ensure votes are sufficient to back. let votes = remote_knowledge.backing_validators(); @@ -270,11 +267,6 @@ impl GridTracker { return Err(ManifestImportError::Malformed) } - let remote_knowledge = StatementFilter { - seconded_in_group: manifest.seconded_in_group.clone(), - validated_in_group: manifest.validated_in_group.clone(), - }; - self.received.entry(sender).or_default().import_received( group_size, seconding_limit, @@ -665,12 +657,9 @@ pub struct ManifestSummary { pub claimed_parent_hash: Hash, /// The claimed group index assigned to the candidate. pub claimed_group_index: GroupIndex, - /// A bitfield of validators in the group which seconded the - /// candidate. - pub seconded_in_group: BitVec, - /// A bitfield of validators in the group which validated the - /// candidate. - pub validated_in_group: BitVec, + /// A statement filter sent alongisde the candidate, communicating + /// knowledge. + pub statement_knowledge: StatementFilter, } /// Errors in importing a manifest. @@ -707,10 +696,7 @@ impl ReceivedManifests { &self, candidate_hash: &CandidateHash, ) -> Option { - self.received.get(candidate_hash).map(|m| StatementFilter { - seconded_in_group: m.seconded_in_group.clone(), - validated_in_group: m.validated_in_group.clone(), - }) + self.received.get(candidate_hash).map(|m| m.statement_knowledge.clone()) } /// Attempt to import a received manifest from a counterparty. @@ -748,16 +734,25 @@ impl ReceivedManifests { return Err(ManifestImportError::Conflicting) } - if !manifest_summary.seconded_in_group.contains(&prev.seconded_in_group) { + if !manifest_summary + .statement_knowledge + .seconded_in_group + .contains(&prev.statement_knowledge.seconded_in_group) + { return Err(ManifestImportError::Conflicting) } - if !manifest_summary.validated_in_group.contains(&prev.validated_in_group) { + if !manifest_summary + .statement_knowledge + .validated_in_group + .contains(&prev.statement_knowledge.validated_in_group) + { return Err(ManifestImportError::Conflicting) } - let mut fresh_seconded = manifest_summary.seconded_in_group.clone(); - fresh_seconded |= &prev.seconded_in_group; + let mut fresh_seconded = + manifest_summary.statement_knowledge.seconded_in_group.clone(); + fresh_seconded |= &prev.statement_knowledge.seconded_in_group; let within_limits = updating_ensure_within_seconding_limit( &mut self.seconded_counts, @@ -783,7 +778,7 @@ impl ReceivedManifests { manifest_summary.claimed_group_index, group_size, seconding_limit, - &*manifest_summary.seconded_in_group, + &*manifest_summary.statement_knowledge.seconded_in_group, ); if within_limits { @@ -833,52 +828,12 @@ enum StatementKind { Valid, } -/// Bitfields indicating the statements that are known or undesired -/// about a candidate. -#[derive(Clone)] -pub struct StatementFilter { - /// Seconded statements. '1' is known or undesired. - pub seconded_in_group: BitVec, - /// Valid statements. '1' is known or undesired. - pub validated_in_group: BitVec, +trait FilterQuery { + fn contains(&self, index: usize, statement_kind: StatementKind) -> bool; + fn set(&mut self, index: usize, statement_kind: StatementKind); } -impl StatementFilter { - /// Create a new filter with the given group size. - pub fn new(group_size: usize) -> Self { - StatementFilter { - seconded_in_group: BitVec::repeat(false, group_size), - validated_in_group: BitVec::repeat(false, group_size), - } - } - - /// Determine the number of backing validators in the statement filter. - pub fn backing_validators(&self) -> usize { - self.seconded_in_group - .iter() - .by_vals() - .zip(self.validated_in_group.iter().by_vals()) - .filter(|&(s, v)| s || v) // no double-counting - .count() - } - - /// Whether the statement filter has at least one seconded statement. - pub fn has_seconded(&self) -> bool { - self.seconded_in_group.iter().by_vals().any(|x| x) - } - - /// Mask out `seconded` statements in `self` according to the provided - /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. - pub fn mask_seconded(&mut self, mask: &BitSlice) { - for (mut x, mask) in self - .seconded_in_group - .iter_mut() - .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) - { - *x = *x && mask; - } - } - +impl FilterQuery for StatementFilter { fn contains(&self, index: usize, statement_kind: StatementKind) -> bool { match statement_kind { StatementKind::Seconded => self.seconded_in_group.get(index).map_or(false, |x| *x), diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index a5872fd0a2e9..2a50ec989c40 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -25,8 +25,8 @@ use polkadot_node_network_protocol::{ grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, request_response::Requests, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + vstaging::{self as protocol_vstaging, StatementFilter}, + IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -66,9 +66,9 @@ use crate::{ }; use candidates::{BadAdvertisement, Candidates, PostConfirmation}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; -use grid::{GridTracker, ManifestSummary, StatementFilter}; +use grid::{GridTracker, ManifestSummary}; use groups::Groups; -use requests::CandidateIdentifier; +use requests::{CandidateIdentifier, RequestProperties}; use statement_store::{StatementOrigin, StatementStore}; pub use requests::{RequestManager, UnhandledResponse}; @@ -792,8 +792,7 @@ async fn send_pending_grid_messages( group_index, para_id: confirmed_candidate.para_id(), parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), - seconded_in_group: local_knowledge.seconded_in_group.clone(), - validated_in_group: local_knowledge.validated_in_group.clone(), + statement_knowledge: local_knowledge.clone(), }; let grid = &mut relay_parent_state @@ -1675,13 +1674,11 @@ async fn provide_candidate_to_grid( group_index, para_id: confirmed_candidate.para_id(), parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), - seconded_in_group: filter.seconded_in_group.clone(), - validated_in_group: filter.validated_in_group.clone(), + statement_knowledge: filter.clone(), }; let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { candidate_hash, - seconded_in_group: filter.seconded_in_group.clone(), - validated_in_group: filter.validated_in_group.clone(), + statement_knowledge: filter.clone(), }; let inventory_message = Versioned::VStaging( @@ -2064,8 +2061,7 @@ async fn handle_incoming_manifest( grid::ManifestSummary { claimed_parent_hash: manifest.parent_head_data_hash, claimed_group_index: manifest.group_index, - seconded_in_group: manifest.seconded_in_group, - validated_in_group: manifest.validated_in_group, + statement_knowledge: manifest.statement_knowledge, }, grid::ManifestKind::Full, ) @@ -2136,8 +2132,7 @@ fn acknowledgement_and_statement_messages( let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { candidate_hash, - seconded_in_group: local_knowledge.seconded_in_group.clone(), - validated_in_group: local_knowledge.validated_in_group.clone(), + statement_knowledge: local_knowledge.clone(), }; let msg = Versioned::VStaging( @@ -2204,8 +2199,7 @@ async fn handle_incoming_acknowledgement( grid::ManifestSummary { claimed_parent_hash: parent_head_data_hash, claimed_group_index: group_index, - seconded_in_group: acknowledgement.seconded_in_group, - validated_in_group: acknowledgement.validated_in_group, + statement_knowledge: acknowledgement.statement_knowledge, }, grid::ManifestKind::Acknowledgement, ) @@ -2334,7 +2328,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St None }; - let seconded_mask = |identifier: &CandidateIdentifier| { + let request_props = |identifier: &CandidateIdentifier| { let &CandidateIdentifier { relay_parent, candidate_hash, group_index } = identifier; let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; @@ -2343,17 +2337,21 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St let seconding_limit = relay_parent_state.seconding_limit; // Request nothing which would be an 'over-seconded' statement. - let mut seconded_mask = bitvec::vec::BitVec::repeat(false, group.len()); + let mut unwanted_mask = StatementFilter::new(group.len()); for (i, v) in group.iter().enumerate() { if relay_parent_state.statement_store.seconded_count(v) >= seconding_limit { - seconded_mask.set(i, true); + unwanted_mask.seconded_in_group.set(i, true); } } - Some((seconded_mask, polkadot_node_primitives::minimum_votes(group.len()))) + // TODO [now]: don't require backing threshold for cluster candidates. + Some(RequestProperties { + unwanted_mask, + backing_threshold: Some(polkadot_node_primitives::minimum_votes(group.len())), + }) }; - while let Some(request) = state.request_manager.next_request(seconded_mask, peer_advertised) { + while let Some(request) = state.request_manager.next_request(request_props, peer_advertised) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( vec![Requests::AttestedCandidateV2(request)], @@ -2485,6 +2483,9 @@ pub(crate) async fn handle_response<'a, Context>( ) .await; + // TODO [now]: circulate fresh statements. if this is a grid candidate, this'll be a + // no-op. + // we don't need to send acknowledgement yet because // 1. the candidate is not known yet, so cannot be backed. // any previous confirmation is a bug, because `apply_post_confirmation` is meant to diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index a6a8710900bc..3fab5d875619 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -27,9 +27,8 @@ //! not owned by the request manager). use super::{ - grid::StatementFilter, BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, - COST_IMPROPERLY_DECODED_RESPONSE, COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, - COST_UNREQUESTED_RESPONSE_STATEMENT, + BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, + COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, }; use crate::LOG_TARGET; @@ -39,6 +38,7 @@ use polkadot_node_network_protocol::{ vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, + vstaging::StatementFilter, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::vstaging::{ @@ -79,8 +79,7 @@ pub struct CandidateIdentifier { struct TaggedResponse { identifier: CandidateIdentifier, requested_peer: PeerId, - seconded_mask: BitVec, - backing_threshold: usize, + props: RequestProperties, response: OutgoingResult, } @@ -249,18 +248,16 @@ impl RequestManager { /// /// This function accepts three closures as an argument. /// - /// The first closure is used to construct a mask for limiting the - /// `Seconded` statements the response is allowed to contain. The mask - /// has `OR` semantics: seconded statements by validators corresponding to bits in the mask - /// are not desired. It also returns the required backing threshold - /// for the candidate. + /// The first closure is used to gather information about the desired + /// properties of a response, which is used to select targets and validate + /// the response later on. /// /// The second closure is used to determine the specific advertised /// statements by a peer, to be compared against the mask and backing /// threshold and returns `None` if the peer is no longer connected. pub fn next_request( &mut self, - seconded_mask: impl Fn(&CandidateIdentifier) -> Option<(BitVec, usize)>, + request_props: impl Fn(&CandidateIdentifier) -> Option, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option> { if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { @@ -292,7 +289,7 @@ impl RequestManager { continue } - let (seconded_mask, backing_threshold) = match seconded_mask(&id) { + let props = match request_props(&id) { None => { cleanup_outdated.push((i, id.clone())); continue @@ -300,18 +297,10 @@ impl RequestManager { Some(s) => s, }; - if seconded_mask.count_ones() == seconded_mask.len() { - // If higher-level code doesn't want any new statements from - // any group validators, this request is moot. - cleanup_outdated.push((i, id.clone())); - continue - } - let target = match find_request_target_with_update( &mut entry.known_by, id, - &*seconded_mask, - backing_threshold, + &props, &peer_advertised, ) { None => continue, @@ -322,7 +311,7 @@ impl RequestManager { RequestRecipient::Peer(target.clone()), AttestedCandidateRequest { candidate_hash: id.candidate_hash, - seconded_mask: seconded_mask.clone(), + mask: props.unwanted_mask.clone(), }, ); @@ -331,8 +320,7 @@ impl RequestManager { TaggedResponse { identifier: stored_id, requested_peer: target, - seconded_mask, - backing_threshold, + props, response: response_fut.await, } })); @@ -369,13 +357,26 @@ impl RequestManager { } } +/// Properties used in target selection and validation of a request. +pub struct RequestProperties { + /// A mask for limiting the statements the response is allowed to contain. + /// The mask has `OR` semantics: statements by validators corresponding to bits + /// in the mask are not desired. It also returns the required backing threshold + /// for the candidate. + pub unwanted_mask: StatementFilter, + /// The required backing threshold, if any. If this is `Some`, then requests will only + /// be made to peers which can provide enough statements to back the candidate, when + /// taking into account the unwanted_mask`, and a response will only be validated + /// in the case of those statements. + pub backing_threshold: Option, +} + /// Finds a valid request target, returning `None` if none exists. /// Cleans up disconnected peers and places the returned peer at the back of the queue. fn find_request_target_with_update( known_by: &mut VecDeque, candidate_identifier: &CandidateIdentifier, - seconded_mask: &BitSlice, - backing_threshold: usize, + props: &RequestProperties, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option { let mut prune = Vec::new(); @@ -389,8 +390,9 @@ fn find_request_target_with_update( Some(f) => f, }; - filter.mask_seconded(seconded_mask); - if filter.has_seconded() && filter.backing_validators() >= backing_threshold { + filter.mask_seconded(&props.unwanted_mask.seconded_in_group); + filter.mask_valid(&props.unwanted_mask.validated_in_group); + if seconded_and_sufficient(&filter, props.backing_threshold) { target = Some((i, p.clone())); break } @@ -410,6 +412,12 @@ fn find_request_target_with_update( } } +// TODO [now]: for cases where we already have statements, this isn't +// necessary. +fn seconded_and_sufficient(filter: &StatementFilter, backing_threshold: Option) -> bool { + filter.has_seconded() && backing_threshold.map_or(true, |t| filter.backing_validators() >= t) +} + /// A response to a request, which has not yet been handled. pub struct UnhandledResponse { response: TaggedResponse, @@ -450,14 +458,7 @@ impl UnhandledResponse { allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { let UnhandledResponse { - response: - TaggedResponse { - identifier, - requested_peer, - backing_threshold, - response, - seconded_mask, - }, + response: TaggedResponse { identifier, requested_peer, props, response }, } = self; // handle races if the candidate is no longer known. @@ -522,10 +523,9 @@ impl UnhandledResponse { let mut output = validate_complete_response( &identifier, - backing_threshold, + props, complete_response, requested_peer, - seconded_mask, group, session, validator_key_lookup, @@ -542,27 +542,28 @@ impl UnhandledResponse { fn validate_complete_response( identifier: &CandidateIdentifier, - backing_threshold: usize, + props: RequestProperties, response: AttestedCandidateResponse, requested_peer: PeerId, - mut sent_seconded_bitmask: BitVec, group: &[ValidatorIndex], session: SessionIndex, validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, ) -> ResponseValidationOutput { + let RequestProperties { backing_threshold, mut unwanted_mask } = props; + // sanity check bitmask size. this is based entirely on // local logic here. - if sent_seconded_bitmask.len() != group.len() { + if unwanted_mask.has_len(group.len()) { gum::error!( target: LOG_TARGET, group_len = group.len(), - sent_bitmask_len = sent_seconded_bitmask.len(), "Logic bug: group size != sent bitmask len" ); // resize and attempt to continue. - sent_seconded_bitmask.resize(group.len(), true); + unwanted_mask.seconded_in_group.resize(group.len(), true); + unwanted_mask.validated_in_group.resize(group.len(), true); } let invalid_candidate_output = || ResponseValidationOutput { @@ -632,7 +633,7 @@ fn validate_complete_response( // duplicate trackers have the correct size for the group. match unchecked_statement.unchecked_payload() { CompactStatement::Seconded(_) => { - if sent_seconded_bitmask[i] { + if unwanted_mask.seconded_in_group[i] { rep_changes .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); continue @@ -644,12 +645,19 @@ fn validate_complete_response( continue } }, - CompactStatement::Valid(_) => + CompactStatement::Valid(_) => { + if unwanted_mask.validated_in_group[i] { + rep_changes + .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + if received_filter.validated_in_group[i] { rep_changes .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); continue - }, + } + }, } let validator_public = @@ -683,10 +691,9 @@ fn validate_complete_response( rep_changes.push((requested_peer.clone(), BENEFIT_VALID_STATEMENT)); } - // Only accept responses which can back the candidate. - if !received_filter.has_seconded() || - received_filter.backing_validators() < backing_threshold - { + // Only accept responses which are sufficient, according to our + // required backing threshold. + if !seconded_and_sufficient(&received_filter, backing_threshold) { return invalid_candidate_output() } diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index fe07d2427f10..a91812fb952e 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -24,12 +24,13 @@ //! groups, and views based on the validators themselves. use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec}; +use polkadot_node_network_protocol::vstaging::StatementFilter; use polkadot_primitives::vstaging::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; use std::collections::hash_map::{Entry as HEntry, HashMap}; -use super::{grid::StatementFilter, groups::Groups}; +use super::groups::Groups; /// Possible origins of a statement. pub enum StatementOrigin { From ad940349cfc7055d62a548fb98da555d24be0ab6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Feb 2023 14:19:37 -0700 Subject: [PATCH 158/220] dispatch cluster requests correctly --- .../src/vstaging/mod.rs | 29 ++++++++++++++----- .../src/vstaging/requests.rs | 6 ++-- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 2a50ec989c40..794c1c316efc 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1048,6 +1048,8 @@ async fn circulate_statement( let statement_group = per_session.groups.by_validator_index(originator); + // TODO [now]: we're not meant to circulate statements in the cluster + // until we have the confirmed candidate. let cluster_relevant = Some(local_validator.group) == statement_group; let cluster_targets = if cluster_relevant { Some( @@ -2310,17 +2312,24 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St let relay_parent_state = state.per_relay_parent.get(&identifier.relay_parent)?; let per_session = state.per_session.get(&relay_parent_state.session)?; + let local_validator = relay_parent_state.local_validator.as_ref()?; + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session.authority_lookup.get(a) }) { - let filter = relay_parent_state - .local_validator - .as_ref()? + // For cluster members, they haven't advertised any statements in particular, + // but have surely sent us some. + if local_validator + .cluster_tracker + .knows_candidate(validator_id, identifier.candidate_hash) + { + return Some(StatementFilter::new(local_validator.cluster_tracker.targets().len())) + } + + let filter = local_validator .grid_tracker .advertised_statements(validator_id, &identifier.candidate_hash); - // TODO [now]: this doesn't handle requests in the cluster properly. - if let Some(f) = filter { return Some(f) } @@ -2344,10 +2353,16 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } } - // TODO [now]: don't require backing threshold for cluster candidates. + // don't require a backing threshold for cluster candidates. + let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + Some(RequestProperties { unwanted_mask, - backing_threshold: Some(polkadot_node_primitives::minimum_votes(group.len())), + backing_threshold: if require_backing { + Some(polkadot_node_primitives::minimum_votes(group.len())) + } else { + None + }, }) }; diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 3fab5d875619..c2ed3f1b255a 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -368,6 +368,8 @@ pub struct RequestProperties { /// be made to peers which can provide enough statements to back the candidate, when /// taking into account the unwanted_mask`, and a response will only be validated /// in the case of those statements. + /// + /// If this is `None`, it is assumed that only the candidate itself is needed. pub backing_threshold: Option, } @@ -412,10 +414,8 @@ fn find_request_target_with_update( } } -// TODO [now]: for cases where we already have statements, this isn't -// necessary. fn seconded_and_sufficient(filter: &StatementFilter, backing_threshold: Option) -> bool { - filter.has_seconded() && backing_threshold.map_or(true, |t| filter.backing_validators() >= t) + backing_threshold.map_or(true, |t| filter.has_seconded() && filter.backing_validators() >= t) } /// A response to a request, which has not yet been handled. From 0face3ff3c27bd262b93fdd6ad9867a76bcf2290 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 11 Feb 2023 17:17:45 -0700 Subject: [PATCH 159/220] rework cluster statement sending --- node/network/protocol/src/lib.rs | 12 +- .../src/vstaging/cluster.rs | 12 - .../src/vstaging/mod.rs | 211 +++++++++--------- .../src/vstaging/requests.rs | 2 +- .../src/vstaging/statement_store.rs | 2 + 5 files changed, 123 insertions(+), 116 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index be07fd794249..ca7a3d677832 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -621,14 +621,22 @@ pub mod vstaging { } impl StatementFilter { - /// Create a new filter with the given group size. - pub fn new(group_size: usize) -> Self { + /// Create a new blank filter with the given group size. + pub fn blank(group_size: usize) -> Self { StatementFilter { seconded_in_group: BitVec::repeat(false, group_size), validated_in_group: BitVec::repeat(false, group_size), } } + /// Create a new full filter with the given group size. + pub fn full(group_size: usize) -> Self { + StatementFilter { + seconded_in_group: BitVec::repeat(true, group_size), + validated_in_group: BitVec::repeat(true, group_size), + } + } + /// Whether the filter has a specific expected length, consistent across both /// bitfields. pub fn has_len(&self, len: usize) -> bool { diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index 922dbbbb8f53..20bd46e0a540 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -319,18 +319,6 @@ impl ClusterTracker { self.validator_seconded(validator, candidate_hash) } - /// Returns the validator-index of the producer a `Seconded` statement - /// for the candidate which is legal for us to send to all nodes in the cluster. - pub fn sendable_seconder(&self, candidate_hash: CandidateHash) -> Option { - for (v, k) in &self.knowledge { - if k.contains(&TaggedKnowledge::Seconded(candidate_hash)) { - return Some(*v) - } - } - - None - } - /// Returns a Vec of pending statements to be sent to a particular validator /// index. `Seconded` statements are sorted to the front of the vector. /// diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 794c1c316efc..0f0d9f2bfcee 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -723,6 +723,8 @@ async fn send_pending_cluster_statements( compact.clone(), ); + // TODO [now]: only send for confirmed candidates. + if res.is_some() { cluster_tracker.note_sent(peer_validator_id, originator, compact); } @@ -980,13 +982,24 @@ pub(crate) async fn share_local_statement( (compact_statement, candidate_hash) }; + // send the compact version of the statement to any peers which need it. + circulate_statement( + ctx, + relay_parent, + per_relay_parent, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + local_group, + compact_statement, + ) + .await; + if let Some(post_confirmation) = post_confirmation { apply_post_confirmation(ctx, state, post_confirmation); } - // send the compact version of the statement to any peers which need it. - circulate_statement(ctx, state, relay_parent, local_group, compact_statement).await; - Ok(()) } @@ -1014,44 +1027,39 @@ enum DirectTargetKind { #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn circulate_statement( ctx: &mut Context, - state: &mut State, relay_parent: Hash, + relay_parent_state: &mut PerRelayParentState, + per_session: &PerSessionState, + candidates: &Candidates, + authorities: &HashMap, + peers: &HashMap, group_index: GroupIndex, statement: SignedStatement, ) { - let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { - Some(x) => x, - None => return, - }; - - let per_session = match state.per_session.get(&per_relay_parent.session) { - Some(s) => s, - None => return, - }; let session_info = &per_session.session_info; let candidate_hash = statement.payload().candidate_hash().clone(); - let mut prior_seconded = None; let compact_statement = statement.payload().clone(); let is_seconded = match compact_statement { CompactStatement::Seconded(_) => true, CompactStatement::Valid(_) => false, }; + let is_confirmed = candidates.is_confirmed(&candidate_hash); + let originator = statement.validator_index(); let (local_validator, targets) = { - let local_validator = match per_relay_parent.local_validator.as_mut() { + let local_validator = match relay_parent_state.local_validator.as_mut() { Some(v) => v, - None => return, // sanity: should be impossible to reach this. + None => return, // sanity: nothing to propagate if not a validator. }; let statement_group = per_session.groups.by_validator_index(originator); - // TODO [now]: we're not meant to circulate statements in the cluster - // until we have the confirmed candidate. + // We're not meant to circulate statements in the cluster until we have the confirmed candidate. let cluster_relevant = Some(local_validator.group) == statement_group; - let cluster_targets = if cluster_relevant { + let cluster_targets = if is_confirmed && cluster_relevant { Some( local_validator .cluster_tracker @@ -1089,64 +1097,17 @@ async fn circulate_statement( (local_validator, targets) }; - let mut prior_to = Vec::new(); let mut statement_to = Vec::new(); for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. - let peer_id: PeerId = match state.authorities.get(&authority_id) { - Some(p) - if state.peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => + let peer_id: PeerId = match authorities.get(&authority_id) { + Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => p.clone(), None | Some(_) => continue, }; match kind { DirectTargetKind::Cluster => { - if !local_validator.cluster_tracker.knows_candidate(target, candidate_hash) && - !is_seconded - { - // lazily initialize this. - let prior_seconded = if let Some(ref p) = prior_seconded.as_ref() { - p - } else { - // This should always succeed because: - // 1. If this is not a `Seconded` statement we must have - // received at least one `Seconded` statement from other validators - // in our cluster. - // 2. We should have deposited all statements we've received into the statement store. - - match cluster_sendable_seconded_statement( - &local_validator.cluster_tracker, - &per_relay_parent.statement_store, - candidate_hash, - ) { - None => { - gum::warn!( - target: LOG_TARGET, - ?candidate_hash, - ?relay_parent, - "degenerate state: we authored a `Valid` statement without \ - knowing any `Seconded` statements." - ); - - return - }, - Some(s) => &*prior_seconded.get_or_insert(s.as_unchecked().clone()), - } - }; - - // One of the properties of the 'cluster sendable seconded statement' - // is that we `can_send` it to all nodes in the cluster which don't have the candidate already. And - // we're already in a branch that's gated off from cluster nodes - // which have knowledge of the candidate. - local_validator.cluster_tracker.note_sent( - target, - prior_seconded.unchecked_validator_index(), - CompactStatement::Seconded(candidate_hash), - ); - prior_to.push(peer_id); - } - // At this point, all peers in the cluster should 'know' // the candidate, so we don't expect for this to fail. if let Ok(()) = local_validator.cluster_tracker.can_send( @@ -1176,20 +1137,6 @@ async fn circulate_statement( // ship off the network messages to the network bridge. - if !prior_to.is_empty() { - let prior_seconded = - prior_seconded.expect("prior_to is only non-empty when prior_seconded exists; qed"); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - prior_to, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - prior_seconded.clone(), - )) - .into(), - )) - .await; - } - if !statement_to.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to, @@ -1202,17 +1149,6 @@ async fn circulate_statement( .await; } } - -fn cluster_sendable_seconded_statement<'a>( - cluster_tracker: &ClusterTracker, - statement_store: &'a StatementStore, - candidate_hash: CandidateHash, -) -> Option<&'a SignedStatement> { - cluster_tracker.sendable_seconder(candidate_hash).and_then(|v| { - statement_store.validator_statement(v, CompactStatement::Seconded(candidate_hash)) - }) -} - /// Check a statement signature under this parent hash. fn check_statement_signature( session_index: SessionIndex, @@ -1462,7 +1398,18 @@ async fn handle_incoming_statement( } // We always circulate statements at this point. - circulate_statement(ctx, state, relay_parent, originator_group, checked_statement).await; + circulate_statement( + ctx, + relay_parent, + per_relay_parent, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + originator_group, + checked_statement, + ) + .await; } else { report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; } @@ -1600,7 +1547,7 @@ fn local_knowledge_filter( candidate_hash: CandidateHash, statement_store: &StatementStore, ) -> StatementFilter { - let mut f = StatementFilter::new(group_size); + let mut f = StatementFilter::blank(group_size); statement_store.fill_statement_filter(group_index, candidate_hash, &mut f); f } @@ -2281,9 +2228,65 @@ pub(crate) async fn handle_backed_candidate_message( .await; } +/// Sends all messages about a candidate to all peers in the cluster, +/// with `Seconded` statements first. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_cluster_candidate_statements( + ctx: &mut Context, + state: &mut State, + candidate_hash: CandidateHash, + relay_parent: Hash, +) { + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let local_group = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(v) => v.group, + }; + + let group_size = match per_session.groups.get(local_group) { + None => return, + Some(g) => g.len(), + }; + + let statements: Vec<_> = relay_parent_state + .statement_store + .group_statements( + &per_session.groups, + local_group, + candidate_hash, + &StatementFilter::full(group_size), + ) + .map(|x| x.clone()) + .collect(); + + for statement in statements { + circulate_statement( + ctx, + relay_parent, + relay_parent_state, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + local_group, + statement, + ) + .await; + } +} + /// Applies state & p2p updates as a result of a newly confirmed candidate. /// -/// This punishes who advertised the candidate incorrectly, as well as +/// This punishes peers which advertised the candidate incorrectly, as well as /// doing an importability analysis of the confirmed candidate and providing /// statements to the backing subsystem if importable. It also cleans up /// any pending requests for the candidate. @@ -2299,6 +2302,14 @@ async fn apply_post_confirmation( let candidate_hash = post_confirmation.hypothetical.candidate_hash(); state.request_manager.remove_for(candidate_hash); + + send_cluster_candidate_statements( + ctx, + state, + candidate_hash, + post_confirmation.hypothetical.relay_parent(), + ) + .await; new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; } @@ -2323,7 +2334,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St .cluster_tracker .knows_candidate(validator_id, identifier.candidate_hash) { - return Some(StatementFilter::new(local_validator.cluster_tracker.targets().len())) + return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) } let filter = local_validator @@ -2346,7 +2357,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St let seconding_limit = relay_parent_state.seconding_limit; // Request nothing which would be an 'over-seconded' statement. - let mut unwanted_mask = StatementFilter::new(group.len()); + let mut unwanted_mask = StatementFilter::blank(group.len()); for (i, v) in group.iter().enumerate() { if relay_parent_state.statement_store.seconded_count(v) >= seconding_limit { unwanted_mask.seconded_in_group.set(i, true); @@ -2466,6 +2477,7 @@ pub(crate) async fn handle_response<'a, Context>( } }; + // Note that this implicitly circulates all statements via the cluster. apply_post_confirmation(ctx, state, post_confirmation).await; let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); @@ -2498,9 +2510,6 @@ pub(crate) async fn handle_response<'a, Context>( ) .await; - // TODO [now]: circulate fresh statements. if this is a grid candidate, this'll be a - // no-op. - // we don't need to send acknowledgement yet because // 1. the candidate is not known yet, so cannot be backed. // any previous confirmation is a bug, because `apply_post_confirmation` is meant to diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index c2ed3f1b255a..32ab1e30695f 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -603,7 +603,7 @@ fn validate_complete_response( let mut statements = Vec::with_capacity(std::cmp::min(response.statements.len(), group.len() * 2)); - let mut received_filter = StatementFilter::new(group.len()); + let mut received_filter = StatementFilter::blank(group.len()); let index_in_group = |v: ValidatorIndex| group.iter().position(|x| &v == x); diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index a91812fb952e..b18530c48578 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -177,6 +177,8 @@ impl StatementStore { /// Get an iterator over stored signed statements by the group conforming to the /// given filter. + /// + /// Seconded statements are provided first. pub fn group_statements<'a>( &'a self, groups: &'a Groups, From b21498827fea2222fbccb921eaa56388bd13fea5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 12 Feb 2023 18:40:06 -0700 Subject: [PATCH 160/220] implement request answering --- .../src/vstaging/grid.rs | 11 ++ .../src/vstaging/mod.rs | 111 +++++++++++++++++- 2 files changed, 119 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index cae653a2e955..5961b5d5207f 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -474,6 +474,17 @@ impl GridTracker { v } + /// Whether a validator can request a manifest from us. + pub fn can_request( + &self, + validator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + self.confirmed_backed.get(&candidate_hash).map_or(false, |c| { + c.has_sent_manifest_to(validator) && !c.has_received_manifest_from(validator) + }) + } + /// Which validators we could request the fully attested candidates from. /// If the candidate is already confirmed, then this will return an empty /// set. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 0f0d9f2bfcee..ef0b5ace4b0e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -24,7 +24,10 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, - request_response::Requests, + request_response::{ + vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + IncomingRequest, Requests, incoming::OutgoingResponse, + }, vstaging::{self as protocol_vstaging, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; @@ -110,6 +113,11 @@ const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = const COST_INACCURATE_ADVERTISEMENT: Rep = Rep::CostMajor("Peer advertised a candidate inaccurately"); +const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = + Rep::CostMajor("Attested candidate request bitfields have wrong size"); +const COST_UNEXPECTED_REQUEST: Rep = + Rep::CostMajor("Unexpected ttested candidate request"); + const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); const BENEFIT_VALID_STATEMENT_FIRST: Rep = @@ -2400,7 +2408,7 @@ pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { /// Handles an incoming response. This does the actual work of validating the response, /// importing statements, sending acknowledgements, etc. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn handle_response<'a, Context>( +pub(crate) async fn handle_response( ctx: &mut Context, state: &mut State, response: UnhandledResponse, @@ -2519,4 +2527,101 @@ pub(crate) async fn handle_response<'a, Context>( // includable. } -// TODO [now]: answer request. +/// Answer an incoming request for a candidate. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn answer_request( + ctx: &mut Context, + state: &mut State, + request: IncomingRequest, +) { + // TODO [now]: wire this up with something like the legacy_v1 `responder` running in the background + // to bound the amount of parallel requests we serve. + + let AttestedCandidateRequest { + candidate_hash, + ref mask, + } = &request.payload; + + let confirmed = match state.candidates.get_confirmed(&candidate_hash) { + None => return, // drop request, candidate not known. + Some(c) => c, + }; + + let relay_parent_state = match state.per_relay_parent.get(&confirmed.relay_parent()) { + None => return, + Some(s) => s, + }; + + let local_validator = match relay_parent_state.local_validator.as_ref() { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let peer_data = match state.peers.get(&request.peer) { + None => return, + Some(d) => d, + }; + + let group_size = per_session.groups.get(confirmed.group_index()) + .expect("group from session's candidate always known; qed") + .len(); + + // check request bitfields are right size. + if mask.seconded_in_group.len() != group_size || mask.validated_in_group.len() != group_size { + request.send_outgoing_response(OutgoingResponse { + result: Err(()), + reputation_changes: vec![COST_INVALID_REQUEST_BITFIELD_SIZE], + sent_feedback: None, + }); + + return; + } + + // check peer is allowed to request the candidate (i.e. we've sent them a manifest) + { + let mut can_request = false; + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session.authority_lookup.get(a) + }) { + if local_validator.grid_tracker.can_request(validator_id, *candidate_hash) { + can_request = true; + break; + } + } + + if !can_request { + request.send_outgoing_response(OutgoingResponse { + result: Err(()), + reputation_changes: vec![COST_UNEXPECTED_REQUEST], + sent_feedback: None, + }); + + return; + } + } + + // Transform mask with 'OR' semantics into one with 'AND' semantics for the API used + // below. + let and_mask = StatementFilter { + seconded_in_group: !mask.seconded_in_group.clone(), + validated_in_group: !mask.validated_in_group.clone(), + }; + + let response = AttestedCandidateResponse { + candidate_receipt: (&**confirmed.candidate_receipt()).clone(), + persisted_validation_data: confirmed.persisted_validation_data().clone(), + statements: relay_parent_state.statement_store.group_statements( + &per_session.groups, + confirmed.group_index(), + *candidate_hash, + &and_mask, + ).map(|s| s.as_unchecked().clone()).collect() + }; + + request.send_response(response); +} From 78b1de6130681f424b1150245716b55a87f25f9a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 12 Feb 2023 18:40:13 -0700 Subject: [PATCH 161/220] fmt --- .../src/vstaging/grid.rs | 6 +-- .../src/vstaging/mod.rs | 37 ++++++++++--------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 5961b5d5207f..710dad47b30a 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -475,11 +475,7 @@ impl GridTracker { } /// Whether a validator can request a manifest from us. - pub fn can_request( - &self, - validator: ValidatorIndex, - candidate_hash: CandidateHash, - ) -> bool { + pub fn can_request(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { self.confirmed_backed.get(&candidate_hash).map_or(false, |c| { c.has_sent_manifest_to(validator) && !c.has_received_manifest_from(validator) }) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index ef0b5ace4b0e..e95c8e6c2895 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -25,8 +25,9 @@ use polkadot_node_network_protocol::{ grid_topology::{RequiredRouting, SessionGridTopology}, peer_set::ValidationVersion, request_response::{ + incoming::OutgoingResponse, vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, - IncomingRequest, Requests, incoming::OutgoingResponse, + IncomingRequest, Requests, }, vstaging::{self as protocol_vstaging, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, @@ -115,8 +116,7 @@ const COST_INACCURATE_ADVERTISEMENT: Rep = const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = Rep::CostMajor("Attested candidate request bitfields have wrong size"); -const COST_UNEXPECTED_REQUEST: Rep = - Rep::CostMajor("Unexpected ttested candidate request"); +const COST_UNEXPECTED_REQUEST: Rep = Rep::CostMajor("Unexpected ttested candidate request"); const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); @@ -2537,10 +2537,7 @@ pub(crate) async fn answer_request( // TODO [now]: wire this up with something like the legacy_v1 `responder` running in the background // to bound the amount of parallel requests we serve. - let AttestedCandidateRequest { - candidate_hash, - ref mask, - } = &request.payload; + let AttestedCandidateRequest { candidate_hash, ref mask } = &request.payload; let confirmed = match state.candidates.get_confirmed(&candidate_hash) { None => return, // drop request, candidate not known. @@ -2567,7 +2564,9 @@ pub(crate) async fn answer_request( Some(d) => d, }; - let group_size = per_session.groups.get(confirmed.group_index()) + let group_size = per_session + .groups + .get(confirmed.group_index()) .expect("group from session's candidate always known; qed") .len(); @@ -2579,7 +2578,7 @@ pub(crate) async fn answer_request( sent_feedback: None, }); - return; + return } // check peer is allowed to request the candidate (i.e. we've sent them a manifest) @@ -2590,7 +2589,7 @@ pub(crate) async fn answer_request( }) { if local_validator.grid_tracker.can_request(validator_id, *candidate_hash) { can_request = true; - break; + break } } @@ -2601,7 +2600,7 @@ pub(crate) async fn answer_request( sent_feedback: None, }); - return; + return } } @@ -2615,12 +2614,16 @@ pub(crate) async fn answer_request( let response = AttestedCandidateResponse { candidate_receipt: (&**confirmed.candidate_receipt()).clone(), persisted_validation_data: confirmed.persisted_validation_data().clone(), - statements: relay_parent_state.statement_store.group_statements( - &per_session.groups, - confirmed.group_index(), - *candidate_hash, - &and_mask, - ).map(|s| s.as_unchecked().clone()).collect() + statements: relay_parent_state + .statement_store + .group_statements( + &per_session.groups, + confirmed.group_index(), + *candidate_hash, + &and_mask, + ) + .map(|s| s.as_unchecked().clone()) + .collect(), }; request.send_response(response); From 2a7df7b912323c0cfc510efa62e8aa6b453c3b75 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 12 Feb 2023 18:44:18 -0700 Subject: [PATCH 162/220] only send confirmed candidate statement messages on unified relay-parent --- node/network/statement-distribution/src/vstaging/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index e95c8e6c2895..30d41a8809e0 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -675,6 +675,7 @@ async fn send_peer_messages_for_relay_parent( &peer, validator_id, &mut local_validator_state.cluster_tracker, + &state.candidates, &relay_parent_state.statement_store, ) .await; @@ -717,12 +718,17 @@ async fn send_pending_cluster_statements( peer_id: &PeerId, peer_validator_id: ValidatorIndex, cluster_tracker: &mut ClusterTracker, + candidates: &Candidates, statement_store: &StatementStore, ) { let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); let network_messages = pending_statements .into_iter() .filter_map(|(originator, compact)| { + if !candidates.is_confirmed(compact.candidate_hash()) { + return None + } + let res = pending_statement_network_message( &statement_store, relay_parent, @@ -731,8 +737,6 @@ async fn send_pending_cluster_statements( compact.clone(), ); - // TODO [now]: only send for confirmed candidates. - if res.is_some() { cluster_tracker.note_sent(peer_validator_id, originator, compact); } From f09a1335b2d04b1b33c1d1eb3c1fc672f1da0c05 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 14 Feb 2023 13:07:47 +0100 Subject: [PATCH 163/220] Fix Tests In Statement Distribution Branch --- .../src/legacy_v1/tests.rs | 55 +++++++++++++ .../src/vstaging/cluster.rs | 38 +++++---- .../src/vstaging/grid.rs | 82 ++++++++++++------- primitives/src/v2/mod.rs | 2 +- 4 files changed, 133 insertions(+), 44 deletions(-) diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index f768a7370363..2191400d3d55 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -786,6 +786,17 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -1008,6 +1019,17 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -1536,6 +1558,17 @@ fn share_prioritizes_backing_group() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -1840,6 +1873,17 @@ fn peer_cant_flood_with_large_statements() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -2047,6 +2091,17 @@ fn handle_multiple_seconded_statements() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == relay_parent_hash + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index 20bd46e0a540..8102a68915b8 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -970,21 +970,29 @@ mod tests { (ValidatorIndex(200), CompactStatement::Valid(hash_a)), ] ); - assert_eq!( - tracker.pending_statements_for(ValidatorIndex(24)), - vec![ - (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), - (ValidatorIndex(146), CompactStatement::Seconded(hash_b)) - ], - ); - assert_eq!( - tracker.pending_statements_for(ValidatorIndex(146)), - vec![ - (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), - (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), - (ValidatorIndex(200), CompactStatement::Valid(hash_a)), - ] - ); + { + let mut pending_statements = tracker.pending_statements_for(ValidatorIndex(24)); + pending_statements.sort(); + assert_eq!( + pending_statements, + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)) + ], + ); + } + { + let mut pending_statements = tracker.pending_statements_for(ValidatorIndex(146)); + pending_statements.sort(); + assert_eq!( + pending_statements, + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)), + ] + ); + } } } diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 710dad47b30a..b777034d4d9d 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1137,8 +1137,10 @@ mod tests { let expected_manifest_summary = ManifestSummary { claimed_parent_hash: Hash::repeat_byte(2), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, }; knowledge @@ -1171,7 +1173,7 @@ mod tests { // conflicting seconded statements bitfield let mut s = expected_manifest_summary.clone(); - s.seconded_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; + s.statement_knowledge.seconded_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; assert_matches!( knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) @@ -1180,7 +1182,7 @@ mod tests { // conflicting valid statements bitfield let mut s = expected_manifest_summary.clone(); - s.validated_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; + s.statement_knowledge.validated_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; assert_matches!( knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), Err(ManifestImportError::Conflicting) @@ -1198,8 +1200,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0xA), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, }, ) .unwrap(); @@ -1212,8 +1216,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0xB), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, }, ) .unwrap(); @@ -1226,8 +1232,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + } }, ), Err(ManifestImportError::Overflow) @@ -1241,8 +1249,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0xC), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, }, ) .unwrap(); @@ -1287,8 +1297,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } }, ManifestKind::Full, ValidatorIndex(1), @@ -1307,8 +1319,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(1), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1354,8 +1368,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1372,8 +1388,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1, 0], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1, 0], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1419,8 +1437,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1468,8 +1488,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1488,8 +1510,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + } }, ManifestKind::Full, ValidatorIndex(0), @@ -1508,8 +1532,10 @@ mod tests { ManifestSummary { claimed_parent_hash: Hash::repeat_byte(0), claimed_group_index: GroupIndex(0), - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], - validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + } }, ManifestKind::Full, ValidatorIndex(0), diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index e8e80c931a88..11cffc1272c2 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -1451,7 +1451,7 @@ const BACKING_STATEMENT_MAGIC: [u8; 4] = *b"BKNG"; /// Statements that can be made about parachain candidates. These are the /// actual values that are signed. -#[derive(Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Hash))] pub enum CompactStatement { /// Proposal of a parachain candidate. From 0996882831c194fe9b9da8fa91ab25460fd4251c Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 15 Feb 2023 22:52:39 +0100 Subject: [PATCH 164/220] Async Backing: Integrate `vstaging` of statement distribution into `lib.rs` (#6715) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors --- .../src/legacy_v1/mod.rs | 4 +- .../src/legacy_v1/responder.rs | 4 +- .../src/legacy_v1/tests.rs | 10 + .../network/statement-distribution/src/lib.rs | 202 ++++++++++++------ .../src/vstaging/mod.rs | 85 +++++++- .../src/vstaging/statement_store.rs | 2 +- node/service/src/lib.rs | 3 + node/service/src/overseer.rs | 4 + .../src/inclusion_emulator/staging.rs | 2 +- 9 files changed, 242 insertions(+), 74 deletions(-) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 3a4d7e49663b..5cedf9677976 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -103,7 +103,7 @@ const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; /// Overall state of the legacy-v1 portion of the subsystem. pub(crate) struct State { - peers: HashMap, + pub peers: HashMap, topology_storage: SessionBoundGridTopologyStorage, authorities: HashMap, active_heads: HashMap, @@ -426,7 +426,7 @@ impl PeerRelayParentKnowledge { } } -struct PeerData { +pub struct PeerData { view: View, protocol_version: ValidationVersion, view_knowledge: HashMap, diff --git a/node/network/statement-distribution/src/legacy_v1/responder.rs b/node/network/statement-distribution/src/legacy_v1/responder.rs index 8db38385e581..e9e45f56fe68 100644 --- a/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -48,8 +48,8 @@ pub enum ResponderMessage { /// A fetching task, taking care of fetching large statements via request/response. /// -/// A fetch task does not know about a particular `Statement` instead it just tries fetching a -/// `CommittedCandidateReceipt` from peers, whether this can be used to re-assemble one ore +/// A fetch task does not know about a particular `Statement`, instead it just tries fetching a +/// `CommittedCandidateReceipt` from peers, whether this can be used to re-assemble one or /// many `SignedFullStatement`s needs to be verified by the caller. pub async fn respond( mut receiver: IncomingRequestReceiver, diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index 2191400d3d55..0764040921cd 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -762,11 +762,13 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( Arc::new(LocalKeystore::in_memory()), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -995,11 +997,13 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -1534,11 +1538,13 @@ fn share_prioritizes_backing_group() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -1850,10 +1856,12 @@ fn peer_cant_flood_with_large_statements() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -2067,11 +2075,13 @@ fn handle_multiple_seconded_statements() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let virtual_overseer_fut = async move { let s = StatementDistributionSubsystem::new( Arc::new(LocalKeystore::in_memory()), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index f4e4dde9a185..5f63baee4bc5 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -25,7 +25,9 @@ use error::{log_error, FatalResult}; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, IncomingRequestReceiver}, + request_response::{ + v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver, + }, vstaging as protocol_vstaging, Versioned, }; use polkadot_node_primitives::StatementWithPVD; @@ -66,6 +68,8 @@ pub struct StatementDistributionSubsystem { keystore: SyncCryptoStorePtr, /// Receiver for incoming large statement requests. v1_req_receiver: Option>, + /// Receiver for incoming candidate requests. + req_receiver: Option>, /// Prometheus metrics metrics: Metrics, /// Pseudo-random generator for peers selection logic @@ -95,25 +99,41 @@ enum MuxedMessage { V1Requester(Option), /// Messages from spawned v1 (legacy) responder background task. V1Responder(Option), + /// Messages from candidate responder background task. + Responder(Option), + /// Messages from answered requests. + Response(vstaging::UnhandledResponse), } #[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] impl MuxedMessage { async fn receive( ctx: &mut Context, + state: &mut vstaging::State, from_v1_requester: &mut mpsc::Receiver, from_v1_responder: &mut mpsc::Receiver, + from_responder: &mut mpsc::Receiver, ) -> MuxedMessage { // We are only fusing here to make `select` happy, in reality we will quit if one of those // streams end: let from_orchestra = ctx.recv().fuse(); let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); - futures::pin_mut!(from_orchestra, from_v1_requester, from_v1_responder); + let from_responder = from_responder.next(); + let receive_response = vstaging::receive_response(state).fuse(); + futures::pin_mut!( + from_orchestra, + from_v1_requester, + from_v1_responder, + from_responder, + receive_response + ); futures::select! { msg = from_orchestra => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)), msg = from_v1_requester => MuxedMessage::V1Requester(msg), msg = from_v1_responder => MuxedMessage::V1Responder(msg), + msg = from_responder => MuxedMessage::Responder(msg), + msg = receive_response => MuxedMessage::Response(msg), } } } @@ -124,14 +144,22 @@ impl StatementDistributionSubsystem { pub fn new( keystore: SyncCryptoStorePtr, v1_req_receiver: IncomingRequestReceiver, + req_receiver: IncomingRequestReceiver, metrics: Metrics, rng: R, ) -> Self { - Self { keystore, v1_req_receiver: Some(v1_req_receiver), metrics, rng } + Self { + keystore, + v1_req_receiver: Some(v1_req_receiver), + req_receiver: Some(req_receiver), + metrics, + rng, + } } async fn run(mut self, mut ctx: Context) -> std::result::Result<(), FatalError> { let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); + let mut state = crate::vstaging::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); @@ -148,16 +176,34 @@ impl StatementDistributionSubsystem { ) .map_err(FatalError::SpawnTask)?; - // TODO [now]: handle vstaging req/res: dispatch pending requests & handling responses. + // Sender/receiver for getting news from our candidate responder task. + let (res_sender, mut res_receiver) = mpsc::channel(1); + + ctx.spawn( + "candidate-responder", + vstaging::respond_task( + self.req_receiver.take().expect("Mandatory argument to new. qed"), + res_sender.clone(), + ) + .boxed(), + ) + .map_err(FatalError::SpawnTask)?; loop { - let message = - MuxedMessage::receive(&mut ctx, &mut v1_req_receiver, &mut v1_res_receiver).await; + let message = MuxedMessage::receive( + &mut ctx, + &mut state, + &mut v1_req_receiver, + &mut v1_res_receiver, + &mut res_receiver, + ) + .await; match message { MuxedMessage::Subsystem(result) => { let result = self .handle_subsystem_message( &mut ctx, + &mut state, &mut legacy_v1_state, &v1_req_sender, result?, @@ -189,7 +235,20 @@ impl StatementDistributionSubsystem { .await; log_error(result.map_err(From::from), "handle_responder_message")?; }, + MuxedMessage::Responder(result) => { + vstaging::answer_request( + &mut ctx, + &mut state, + result.ok_or(FatalError::RequesterReceiverFinished)?, + ) + .await; + }, + MuxedMessage::Response(result) => { + vstaging::handle_response(&mut ctx, &mut state, result).await; + }, }; + + vstaging::dispatch_requests(&mut ctx, &mut state).await; } Ok(()) } @@ -197,6 +256,7 @@ impl StatementDistributionSubsystem { async fn handle_subsystem_message( &mut self, ctx: &mut Context, + state: &mut vstaging::State, legacy_v1_state: &mut legacy_v1::State, v1_req_sender: &mpsc::Sender, message: FromOrchestra, @@ -210,16 +270,35 @@ impl StatementDistributionSubsystem { })) => { let _timer = metrics.time_active_leaves_update(); - // TODO [now]: vstaging should handle activated first - // because of implicit view. + // vstaging should handle activated first because of implicit view. + let mut mode = None; + if let Some(ref activated) = activated { + mode = Some(prospective_parachains_mode(ctx.sender(), activated.hash).await?); + if let Some(ProspectiveParachainsMode::Enabled { .. }) = mode { + vstaging::handle_active_leaves_update( + ctx, + state, + ActiveLeavesUpdate { + activated: Some(activated.clone()), + deactivated: vec![].into(), + }, + ) + .await?; + } + } + vstaging::handle_active_leaves_update( + ctx, + state, + ActiveLeavesUpdate { activated: None, deactivated: deactivated.clone() }, + ) + .await?; + for deactivated in deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); } - if let Some(activated) = activated { // Legacy, activate only if no prospective parachains support. - let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; - if let ProspectiveParachainsMode::Disabled = mode { + if let Some(ProspectiveParachainsMode::Disabled) = mode { crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) .await?; } @@ -229,60 +308,63 @@ impl StatementDistributionSubsystem { // do nothing }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(true), - FromOrchestra::Communication { msg } => - match msg { - StatementDistributionMessage::Share(relay_parent, statement) => { - let _timer = metrics.time_share(); - - // pass to legacy if legacy state contains head. - if legacy_v1_state.contains_relay_parent(&relay_parent) { - crate::legacy_v1::share_local_statement( - ctx, - legacy_v1_state, - relay_parent, - StatementWithPVD::drop_pvd_from_signed(statement), - &mut self.rng, - metrics, - ) + FromOrchestra::Communication { msg } => match msg { + StatementDistributionMessage::Share(relay_parent, statement) => { + let _timer = metrics.time_share(); + + // pass to legacy if legacy state contains head. + if legacy_v1_state.contains_relay_parent(&relay_parent) { + crate::legacy_v1::share_local_statement( + ctx, + legacy_v1_state, + relay_parent, + StatementWithPVD::drop_pvd_from_signed(statement), + &mut self.rng, + metrics, + ) + .await?; + } else { + vstaging::share_local_statement(ctx, state, relay_parent, statement) .await?; - } - }, - StatementDistributionMessage::NetworkBridgeUpdate(event) => { - // pass to legacy, but not if the message isn't - // v1. - let legacy = match &event { - &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, - Versioned::V1(_) => true, - Versioned::VStaging(_) => false, - }, - _ => true, - }; - - if legacy { - crate::legacy_v1::handle_network_update( - ctx, - legacy_v1_state, - v1_req_sender, - event, - &mut self.rng, - metrics, - ) - .await; - } - - // TODO [now]: pass to vstaging, but not if the message is - // v1 or the connecting peer is v1. - }, - StatementDistributionMessage::Backed(candidate_hash) => { - crate::vstaging::handle_backed_candidate_message( + } + }, + StatementDistributionMessage::NetworkBridgeUpdate(event) => { + // pass to legacy, but not if the message isn't v1. + let (legacy, peer_id) = match &event { + &NetworkBridgeEvent::PeerMessage(peer_id, ref message) => match message { + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + ) => (true, Some(peer_id)), + Versioned::V1(_) => (true, Some(peer_id)), + Versioned::VStaging(_) => (false, Some(peer_id)), + }, + _ => (true, None), + }; + + if legacy { + crate::legacy_v1::handle_network_update( ctx, - unimplemented!(), // TODO [now] state - candidate_hash, + legacy_v1_state, + v1_req_sender, + event, + &mut self.rng, + metrics, ) .await; - }, + } else if peer_id + .map_or(false, |peer_id| !legacy_v1_state.peers.contains_key(&peer_id)) + { + // pass to vstaging, but not if the message is + // v1 or the connecting peer is v1. + // TODO: Is the check above correct? + vstaging::handle_network_update(ctx, state, event).await; + } + }, + StatementDistributionMessage::Backed(candidate_hash) => { + crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash) + .await; }, + }, } Ok(false) } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 30d41a8809e0..8e3ece172d4c 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -27,7 +27,8 @@ use polkadot_node_network_protocol::{ request_response::{ incoming::OutgoingResponse, vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, - IncomingRequest, Requests, + IncomingRequest, IncomingRequestReceiver, Requests, + MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, vstaging::{self as protocol_vstaging, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, @@ -56,7 +57,12 @@ use polkadot_primitives::vstaging::{ use sp_keystore::SyncCryptoStorePtr; -use futures::channel::oneshot; +use fatality::Nested; +use futures::{ + channel::{mpsc, oneshot}, + stream::FuturesUnordered, + SinkExt, StreamExt, +}; use indexmap::IndexMap; use std::collections::{ @@ -114,9 +120,10 @@ const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = const COST_INACCURATE_ADVERTISEMENT: Rep = Rep::CostMajor("Peer advertised a candidate inaccurately"); +const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = Rep::CostMajor("Attested candidate request bitfields have wrong size"); -const COST_UNEXPECTED_REQUEST: Rep = Rep::CostMajor("Unexpected ttested candidate request"); +const COST_UNEXPECTED_REQUEST: Rep = Rep::CostMajor("Unexpected attested candidate request"); const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); @@ -218,6 +225,22 @@ pub(crate) struct State { request_manager: RequestManager, } +impl State { + /// Create a new state. + pub(crate) fn new(keystore: SyncCryptoStorePtr) -> Self { + State { + implicit_view: Default::default(), + candidates: Default::default(), + per_relay_parent: HashMap::new(), + per_session: HashMap::new(), + peers: HashMap::new(), + keystore, + authorities: HashMap::new(), + request_manager: RequestManager::new(), + } + } +} + // For the provided validator index, if there is a connected peer controlling the given authority // ID, then return that peer's `PeerId`. fn connected_validator_peer( @@ -627,7 +650,7 @@ async fn handle_peer_view_update( fn find_validator_ids<'a>( known_discovery_ids: impl IntoIterator, discovery_mapping: impl Fn(&AuthorityDiscoveryId) -> Option<&'a ValidatorIndex>, -) -> impl IntoIterator { +) -> impl Iterator { known_discovery_ids.into_iter().filter_map(discovery_mapping).cloned() } @@ -2536,13 +2559,14 @@ pub(crate) async fn handle_response( pub(crate) async fn answer_request( ctx: &mut Context, state: &mut State, - request: IncomingRequest, + message: ResponderMessage, ) { - // TODO [now]: wire this up with something like the legacy_v1 `responder` running in the background - // to bound the amount of parallel requests we serve. - + let ResponderMessage { request, sent_feedback } = message; let AttestedCandidateRequest { candidate_hash, ref mask } = &request.payload; + // Signal to the responder that we started processing this request. + let _ = sent_feedback.send(()); + let confirmed = match state.candidates.get_confirmed(&candidate_hash) { None => return, // drop request, candidate not known. Some(c) => c, @@ -2632,3 +2656,48 @@ pub(crate) async fn answer_request( request.send_response(response); } + +/// Messages coming from the background respond task. +pub struct ResponderMessage { + request: IncomingRequest, + sent_feedback: oneshot::Sender<()>, +} + +/// A fetching task, taking care of fetching candidates via request/response. +/// +/// Runs in a background task and feeds request to [`answer_request`] through [`MuxedMessage`]. +pub async fn respond_task( + mut receiver: IncomingRequestReceiver, + mut sender: mpsc::Sender, +) { + let mut pending_out = FuturesUnordered::new(); + loop { + // Ensure we are not handling too many requests in parallel. + if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + // Wait for one to finish: + pending_out.next().await; + } + + let req = match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() { + Ok(Ok(v)) => v, + Err(fatal) => { + gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder"); + return + }, + Ok(Err(jfyi)) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed"); + continue + }, + }; + + let (pending_sent_tx, pending_sent_rx) = oneshot::channel(); + if let Err(err) = sender + .feed(ResponderMessage { request: req, sent_feedback: pending_sent_tx }) + .await + { + gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder"); + return + } + pending_out.push(pending_sent_rx); + } +} diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index b18530c48578..4757c60fc0f9 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -223,7 +223,7 @@ impl StatementStore { &'a self, validators: &'a [ValidatorIndex], candidate_hash: CandidateHash, - ) -> impl IntoIterator + 'a { + ) -> impl Iterator + 'a { let s_st = CompactStatement::Seconded(candidate_hash); let v_st = CompactStatement::Valid(candidate_hash); diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index ff3f463d52f5..079dd8e0a26f 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -897,6 +897,8 @@ where config.network.request_response_protocols.push(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); + let (candidate_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + config.network.request_response_protocols.push(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); @@ -1076,6 +1078,7 @@ where collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, + candidate_req_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 83291addb762..2eeb940a3aa4 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -110,6 +110,8 @@ where IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming candidate requests. + pub candidate_req_receiver: IncomingRequestReceiver, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -154,6 +156,7 @@ pub fn prepared_overseer_builder( collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, + candidate_req_receiver, dispute_req_receiver, registry, spawner, @@ -293,6 +296,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, + candidate_req_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 3aaee43d79fe..c2889a4f90e5 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -1358,7 +1358,7 @@ mod tests { ); } - candidate.commitments.processed_downward_messages = 1; + candidate.commitments.to_mut().processed_downward_messages = 1; assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); } From 1e3c63eed200e785ee2ede7b6bf3885d5bd8c3e4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 15 Feb 2023 15:04:23 -0700 Subject: [PATCH 165/220] clean up some review comments --- .../src/legacy_v1/mod.rs | 2 +- .../network/statement-distribution/src/lib.rs | 103 +++++++++++------- 2 files changed, 66 insertions(+), 39 deletions(-) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 5cedf9677976..f5c174f28184 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -103,7 +103,7 @@ const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; /// Overall state of the legacy-v1 portion of the subsystem. pub(crate) struct State { - pub peers: HashMap, + peers: HashMap, topology_storage: SessionBoundGridTopologyStorage, authorities: HashMap, active_heads: HashMap, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 5f63baee4bc5..89971c6a211f 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -271,37 +271,43 @@ impl StatementDistributionSubsystem { let _timer = metrics.time_active_leaves_update(); // vstaging should handle activated first because of implicit view. - let mut mode = None; if let Some(ref activated) = activated { - mode = Some(prospective_parachains_mode(ctx.sender(), activated.hash).await?); - if let Some(ProspectiveParachainsMode::Enabled { .. }) = mode { + let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; + if let ProspectiveParachainsMode::Enabled { .. } = mode { vstaging::handle_active_leaves_update( ctx, state, - ActiveLeavesUpdate { - activated: Some(activated.clone()), - deactivated: vec![].into(), - }, + ActiveLeavesUpdate { activated: Some(activated.clone()), deactivated }, + ) + .await?; + } else if let ProspectiveParachainsMode::Disabled = mode { + for deactivated in &deactivated { + crate::legacy_v1::handle_deactivate_leaf( + legacy_v1_state, + deactivated.clone(), + ); + } + + crate::legacy_v1::handle_activated_leaf( + ctx, + legacy_v1_state, + activated.clone(), ) .await?; } - } - vstaging::handle_active_leaves_update( - ctx, - state, - ActiveLeavesUpdate { activated: None, deactivated: deactivated.clone() }, - ) - .await?; - - for deactivated in deactivated { - crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); - } - if let Some(activated) = activated { - // Legacy, activate only if no prospective parachains support. - if let Some(ProspectiveParachainsMode::Disabled) = mode { - crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) - .await?; + } else { + for deactivated in &deactivated { + crate::legacy_v1::handle_deactivate_leaf( + legacy_v1_state, + deactivated.clone(), + ); } + vstaging::handle_active_leaves_update( + ctx, + state, + ActiveLeavesUpdate { activated: None, deactivated }, + ) + .await?; } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { @@ -329,34 +335,55 @@ impl StatementDistributionSubsystem { } }, StatementDistributionMessage::NetworkBridgeUpdate(event) => { - // pass to legacy, but not if the message isn't v1. - let (legacy, peer_id) = match &event { - &NetworkBridgeEvent::PeerMessage(peer_id, ref message) => match message { + // pass all events to both protocols except for messages, + // which are filtered. + enum VersionTarget { + Legacy, + Current, + Both, + } + + impl VersionTarget { + fn targets_legacy(&self) -> bool { + match self { + &VersionTarget::Legacy | &VersionTarget::Both => true, + _ => false, + } + } + + fn targets_current(&self) -> bool { + match self { + &VersionTarget::Current | &VersionTarget::Both => true, + _ => false, + } + } + } + + let target = match &event { + &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { Versioned::VStaging( protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), - ) => (true, Some(peer_id)), - Versioned::V1(_) => (true, Some(peer_id)), - Versioned::VStaging(_) => (false, Some(peer_id)), + ) => VersionTarget::Legacy, + Versioned::V1(_) => VersionTarget::Legacy, + Versioned::VStaging(_) => VersionTarget::Current, }, - _ => (true, None), + _ => VersionTarget::Both, }; - if legacy { + if target.targets_legacy() { crate::legacy_v1::handle_network_update( ctx, legacy_v1_state, v1_req_sender, - event, + event.clone(), &mut self.rng, metrics, ) .await; - } else if peer_id - .map_or(false, |peer_id| !legacy_v1_state.peers.contains_key(&peer_id)) - { - // pass to vstaging, but not if the message is - // v1 or the connecting peer is v1. - // TODO: Is the check above correct? + } + + if target.targets_current() { + // pass to vstaging. vstaging::handle_network_update(ctx, state, event).await; } }, From f1264d9f92c91346a0ef6b99260522280fe3935c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 15 Feb 2023 15:31:56 -0700 Subject: [PATCH 166/220] clean up warnings --- .../network/statement-distribution/src/lib.rs | 4 +- .../src/vstaging/candidates.rs | 23 +--- .../src/vstaging/cluster.rs | 10 +- .../src/vstaging/grid.rs | 58 +-------- .../src/vstaging/groups.rs | 20 +--- .../src/vstaging/mod.rs | 110 ++++++------------ .../src/vstaging/requests.rs | 20 ++-- .../src/vstaging/statement_store.rs | 10 +- 8 files changed, 57 insertions(+), 198 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 89971c6a211f..aeac6db42bc5 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -237,11 +237,9 @@ impl StatementDistributionSubsystem { }, MuxedMessage::Responder(result) => { vstaging::answer_request( - &mut ctx, &mut state, result.ok_or(FatalError::RequesterReceiverFinished)?, - ) - .await; + ); }, MuxedMessage::Response(result) => { vstaging::handle_response(&mut ctx, &mut state, result).await; diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 9d7a0550bb0a..b356fd6a8ab0 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -162,7 +162,6 @@ impl Candidates { assigned_group, parent_hash, importable_under: HashSet::new(), - backed: false, }), ); let new_confirmed = @@ -246,11 +245,6 @@ impl Candidates { self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } - /// Whether the candidate is marked as backed. - pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { - self.get_confirmed(candidate_hash).map_or(false, |c| c.is_backed()) - } - /// Note that a candidate is importable in a fragment tree indicated by the given /// leaf hash. pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { @@ -283,15 +277,6 @@ impl Candidates { } } - /// Note that a candidate is backed. No-op if the candidate is not confirmed. - pub fn note_backed(&mut self, candidate_hash: &CandidateHash) { - if let Some(&mut CandidateState::Confirmed(ref mut c)) = - self.candidates.get_mut(candidate_hash) - { - c.backed = true; - } - } - /// Get all hypothetical candidates which should be tested /// for inclusion in the frontier. /// @@ -460,7 +445,7 @@ impl UnconfirmedCandidate { if let Some(parent_claims) = c.1.parent_hash_and_id { if let Entry::Occupied(mut e) = self.parent_claims.entry(parent_claims) { if let Some(p) = e.get().iter().position(|x| x.0 == c.1.relay_parent) { - let mut sub_claims = e.get_mut(); + let sub_claims = e.get_mut(); sub_claims[p].1 -= 1; if sub_claims[p].1 == 0 { sub_claims.remove(p); @@ -528,7 +513,6 @@ pub struct ConfirmedCandidate { parent_hash: Hash, // active leaves statements about this candidate are importable under. importable_under: HashSet, - backed: bool, } impl ConfirmedCandidate { @@ -560,11 +544,6 @@ impl ConfirmedCandidate { } } - /// Whether the candidate is marked as being backed. - pub fn is_backed(&self) -> bool { - self.backed - } - /// Get the parent head data hash. pub fn parent_head_data_hash(&self) -> Hash { self.parent_hash diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index 8102a68915b8..a63d1387d2d0 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -55,8 +55,6 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use std::ops::Range; - use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; use std::collections::{HashMap, HashSet}; @@ -198,7 +196,7 @@ impl ClusterTracker { } { - let mut sender_knowledge = self.knowledge.entry(sender).or_default(); + let sender_knowledge = self.knowledge.entry(sender).or_default(); sender_knowledge.insert(TaggedKnowledge::IncomingP2P(Knowledge::Specific( statement.clone(), originator, @@ -214,7 +212,7 @@ impl ClusterTracker { // since we accept additional `Seconded` statements beyond the limits // 'with prejudice', we must respect the limit here. if self.seconded_already_or_within_limit(originator, candidate_hash) { - let mut originator_knowledge = self.knowledge.entry(originator).or_default(); + let originator_knowledge = self.knowledge.entry(originator).or_default(); originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); } } @@ -264,7 +262,7 @@ impl ClusterTracker { statement: CompactStatement, ) { { - let mut target_knowledge = self.knowledge.entry(target).or_default(); + let target_knowledge = self.knowledge.entry(target).or_default(); target_knowledge.insert(TaggedKnowledge::OutgoingP2P(Knowledge::Specific( statement.clone(), originator, @@ -277,7 +275,7 @@ impl ClusterTracker { } if let CompactStatement::Seconded(candidate_hash) = statement { - let mut originator_knowledge = self.knowledge.entry(originator).or_default(); + let originator_knowledge = self.knowledge.entry(originator).or_default(); originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); } diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index b777034d4d9d..27419d0001bb 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -18,10 +18,10 @@ //! the grid. use polkadot_node_network_protocol::{ - grid_topology::SessionGridTopology, vstaging::StatementFilter, PeerId, + grid_topology::SessionGridTopology, vstaging::StatementFilter, }; use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, + CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, }; use std::collections::{ @@ -29,7 +29,7 @@ use std::collections::{ HashSet, }; -use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; +use bitvec::{order::Lsb0, slice::BitSlice}; use super::{groups::Groups, LOG_TARGET}; @@ -264,7 +264,7 @@ impl GridTracker { let votes = remote_knowledge.backing_validators(); if votes < backing_threshold { - return Err(ManifestImportError::Malformed) + return Err(ManifestImportError::Insufficient) } self.received.entry(sender).or_default().import_received( @@ -319,7 +319,6 @@ impl GridTracker { session_topology: &SessionTopologyView, candidate_hash: CandidateHash, group_index: GroupIndex, - group_size: usize, local_knowledge: StatementFilter, ) -> Vec<(ValidatorIndex, ManifestKind)> { let c = match self.confirmed_backed.entry(candidate_hash) { @@ -413,19 +412,6 @@ impl GridTracker { } } - /// Whether we should send a manifest about a specific candidate to a validator, - /// and which kind of manifest. - pub fn is_manifest_pending_for( - &self, - validator_index: ValidatorIndex, - candidate_hash: &CandidateHash, - ) -> Option { - self.pending_manifests - .get(&validator_index) - .and_then(|x| x.get(&candidate_hash)) - .map(|x| *x) - } - /// Returns a vector of all candidates pending manifests for the specific validator, and /// the type of manifest we should send. pub fn pending_manifests_for( @@ -481,25 +467,6 @@ impl GridTracker { }) } - /// Which validators we could request the fully attested candidates from. - /// If the candidate is already confirmed, then this will return an empty - /// set. - pub fn validators_to_request( - &self, - candidate_hash: CandidateHash, - group_index: GroupIndex, - ) -> Vec { - let mut validators = Vec::new(); - if let Some(unconfirmed) = self.unconfirmed.get(&candidate_hash) { - for (v, g) in unconfirmed { - if g == &group_index { - validators.push(*v); - } - } - } - validators - } - /// Determine the validators which can send a statement to us by direct broadcast. pub fn direct_statement_providers( &self, @@ -591,7 +558,7 @@ impl GridTracker { counterparty: ValidatorIndex, statement: &CompactStatement, ) { - if let Some((g, c_h, kind, in_group)) = + if let Some((_, c_h, kind, in_group)) = extract_statement_and_group_info(groups, originator, statement) { if let Some(known) = self.confirmed_backed.get_mut(&c_h) { @@ -695,10 +662,6 @@ struct ReceivedManifests { } impl ReceivedManifests { - fn new() -> Self { - ReceivedManifests { received: HashMap::new(), seconded_counts: HashMap::new() } - } - fn candidate_statement_filter( &self, candidate_hash: &CandidateHash, @@ -879,17 +842,6 @@ struct KnownBackedCandidate { } impl KnownBackedCandidate { - fn known_by(&self, validator: ValidatorIndex) -> bool { - match self.mutual_knowledge.get(&validator) { - None => false, - Some(k) => k.remote_knowledge.is_some(), - } - } - - fn group_index(&self) -> &GroupIndex { - &self.group_index - } - fn has_received_manifest_from(&self, validator: ValidatorIndex) -> bool { self.mutual_knowledge .get(&validator) diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs index e03fa63bc3f1..86321b30f220 100644 --- a/node/network/statement-distribution/src/vstaging/groups.rs +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -17,7 +17,7 @@ //! A utility for tracking groups and their members within a session. use polkadot_node_primitives::minimum_votes; -use polkadot_primitives::vstaging::{AuthorityDiscoveryId, GroupIndex, IndexedVec, ValidatorIndex}; +use polkadot_primitives::vstaging::{GroupIndex, IndexedVec, ValidatorIndex}; use std::collections::HashMap; @@ -27,31 +27,22 @@ use std::collections::HashMap; pub struct Groups { groups: IndexedVec>, by_validator_index: HashMap, - by_discovery_key: HashMap, } impl Groups { /// Create a new [`Groups`] tracker with the groups and discovery keys /// from the session. - pub fn new( - groups: IndexedVec>, - discovery_keys: &[AuthorityDiscoveryId], - ) -> Self { + pub fn new(groups: IndexedVec>) -> Self { let mut by_validator_index = HashMap::new(); - let mut by_discovery_key = HashMap::new(); for (i, group) in groups.iter().enumerate() { let index = GroupIndex(i as _); for v in group { by_validator_index.insert(*v, index); - if let Some(discovery_key) = discovery_keys.get(v.0 as usize) { - // GIGO: malformed session data leads to incomplete index. - by_discovery_key.insert(discovery_key.clone(), index); - } } } - Groups { groups, by_validator_index, by_discovery_key } + Groups { groups, by_validator_index } } /// Access all the underlying groups. @@ -76,9 +67,4 @@ impl Groups { pub fn by_validator_index(&self, validator_index: ValidatorIndex) -> Option { self.by_validator_index.get(&validator_index).map(|x| *x) } - - /// Get the group index for a validator by its discovery key. - pub fn by_discovery_key(&self, discovery_key: AuthorityDiscoveryId) -> Option { - self.by_discovery_key.get(&discovery_key).map(|x| *x) - } } diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 8e3ece172d4c..0d2b79069e86 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -17,12 +17,9 @@ //! Implementation of the v2 statement distribution protocol, //! designed for asynchronous backing. -// TODO [now]: remove before merging & fix warnings -#![allow(unused)] - use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{RequiredRouting, SessionGridTopology}, + grid_topology::SessionGridTopology, peer_set::ValidationVersion, request_response::{ incoming::OutgoingResponse, @@ -37,22 +34,20 @@ use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, }; use polkadot_node_subsystem::{ - jaeger, messages::{ CandidateBackingMessage, HypotheticalCandidate, HypotheticalFrontierRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, }, - overseer, ActivatedLeaf, ActiveLeavesUpdate, PerLeafSpan, StatementDistributionSenderTrait, + overseer, ActiveLeavesUpdate, }; use polkadot_node_subsystem_util::{ - backing_implicit_view::{FetchError, View as ImplicitView}, + backing_implicit_view::View as ImplicitView, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::vstaging::{ - AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, CoreIndex, - CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, - PersistedValidationData, SessionIndex, SessionInfo, SignedStatement, SigningContext, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, + AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, + SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; use sp_keystore::SyncCryptoStorePtr; @@ -63,7 +58,6 @@ use futures::{ stream::FuturesUnordered, SinkExt, StreamExt, }; -use indexmap::IndexMap; use std::collections::{ hash_map::{Entry, HashMap}, @@ -76,7 +70,7 @@ use crate::{ }; use candidates::{BadAdvertisement, Candidates, PostConfirmation}; use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; -use grid::{GridTracker, ManifestSummary}; +use grid::GridTracker; use groups::Groups; use requests::{CandidateIdentifier, RequestProperties}; use statement_store::{StatementOrigin, StatementStore}; @@ -93,10 +87,6 @@ mod statement_store; const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = Rep::CostMinor("Unexpected Statement, missing knowledge for relay parent"); -const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = - Rep::CostMinor("Unexpected Statement, unknown candidate"); -const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = - Rep::CostMinor("Unexpected Statement, remote not allowed"); const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` Statements"); const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = @@ -107,7 +97,6 @@ const COST_CONFLICTING_MANIFEST: Rep = Rep::CostMajor("Manifest conflicts with p const COST_INSUFFICIENT_MANIFEST: Rep = Rep::CostMajor("Manifest statements insufficient to back candidate"); const COST_MALFORMED_MANIFEST: Rep = Rep::CostMajor("Manifest is malformed"); -const COST_DUPLICATE_MANIFEST: Rep = Rep::CostMinor("Duplicate Manifest"); const COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE: Rep = Rep::CostMinor("Unexpected acknowledgement, unknown candidate"); @@ -131,7 +120,6 @@ const BENEFIT_VALID_STATEMENT_FIRST: Rep = Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); struct PerRelayParentState { - validator_state: HashMap, local_validator: Option, statement_store: StatementStore, availability_cores: Vec, @@ -140,11 +128,6 @@ struct PerRelayParentState { session: SessionIndex, } -struct PerRelayParentValidatorState { - seconded_count: usize, - group_id: GroupIndex, -} - // per-relay-parent local validator state. struct LocalValidatorState { // The index of the validator. @@ -171,8 +154,7 @@ struct PerSessionState { impl PerSessionState { async fn new(session_info: SessionInfo, keystore: &SyncCryptoStorePtr) -> Self { - let groups = - Groups::new(session_info.validator_groups.clone(), &session_info.discovery_keys); + let groups = Groups::new(session_info.validator_groups.clone()); let mut authority_lookup = HashMap::new(); for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { authority_lookup.insert(ad, ValidatorIndex(i as _)); @@ -202,13 +184,6 @@ impl PerSessionState { self.grid_view = Some(grid_view); } - - fn authority_index_in_session( - &self, - discovery_key: &AuthorityDiscoveryId, - ) -> Option { - self.authority_lookup.get(discovery_key).map(|x| *x) - } } pub(crate) struct State { @@ -516,7 +491,6 @@ pub(crate) async fn handle_active_leaves_update( let local_validator = per_session.local_validator.and_then(|v| { find_local_validator_state( v, - &per_session.session_info.validators, &per_session.groups, &availability_cores, &group_rotation_info, @@ -527,7 +501,6 @@ pub(crate) async fn handle_active_leaves_update( state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { - validator_state: HashMap::new(), local_validator, statement_store: StatementStore::new(&per_session.groups), availability_cores, @@ -565,7 +538,6 @@ pub(crate) async fn handle_active_leaves_update( fn find_local_validator_state( validator_index: ValidatorIndex, - validators: &IndexedVec, groups: &Groups, availability_cores: &[CoreState], group_rotation_info: &GroupRotationInfo, @@ -575,8 +547,6 @@ fn find_local_validator_state( return None } - let validator_id = validators.get(validator_index)?.clone(); - let our_group = groups.by_validator_index(validator_index)?; // note: this won't work well for parathreads because it only works @@ -610,7 +580,7 @@ fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { } // clean up per-relay-parent data based on everything removed. - state.per_relay_parent.retain(|r, x| relay_parents.contains(r)); + state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); // Clean up all requests for leaf in leaves { @@ -975,7 +945,7 @@ pub(crate) async fn share_local_statement( let mut post_confirmation = None; // Insert candidate if unknown + more sanity checks. - let (compact_statement, candidate_hash) = { + let compact_statement = { let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); @@ -988,7 +958,7 @@ pub(crate) async fn share_local_statement( ); }; - let x = match per_relay_parent.statement_store.insert( + match per_relay_parent.statement_store.insert( &per_session.groups, compact_statement.clone(), StatementOrigin::Local, @@ -1002,7 +972,7 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) }, Ok(true) => {}, - }; + } if let Some(ref session_topology) = per_session.grid_view { let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); @@ -1014,7 +984,7 @@ pub(crate) async fn share_local_statement( ); } - (compact_statement, candidate_hash) + compact_statement }; // send the compact version of the statement to any peers which need it. @@ -1026,13 +996,12 @@ pub(crate) async fn share_local_statement( &state.candidates, &state.authorities, &state.peers, - local_group, compact_statement, ) .await; if let Some(post_confirmation) = post_confirmation { - apply_post_confirmation(ctx, state, post_confirmation); + apply_post_confirmation(ctx, state, post_confirmation).await; } Ok(()) @@ -1046,11 +1015,9 @@ enum DirectTargetKind { } // Circulates a compact statement to all peers who need it: those in the current group of the -// local validator, those in the next group for the parachain, and grid peers which have already -// indicated that they know the candidate as backed. +// local validator and grid peers which have already indicated that they know the candidate as backed. // -// If we're not sure whether the peer knows the candidate is `Seconded` already, we also send a `Seconded` -// statement. +// We only circulate statements for which we have the confirmed candidate, even to the local group. // // The group index which is _canonically assigned_ to this parachain must be // specified already. This function should not be used when the candidate receipt and @@ -1068,7 +1035,6 @@ async fn circulate_statement( candidates: &Candidates, authorities: &HashMap, peers: &HashMap, - group_index: GroupIndex, statement: SignedStatement, ) { let session_info = &per_session.session_info; @@ -1076,11 +1042,6 @@ async fn circulate_statement( let candidate_hash = statement.payload().candidate_hash().clone(); let compact_statement = statement.payload().clone(); - let is_seconded = match compact_statement { - CompactStatement::Seconded(_) => true, - CompactStatement::Valid(_) => false, - }; - let is_confirmed = candidates.is_confirmed(&candidate_hash); let originator = statement.validator_index(); @@ -1347,11 +1308,6 @@ async fn handle_incoming_statement( let originator_index = checked_statement.validator_index(); let candidate_hash = *checked_statement.payload().candidate_hash(); - let originator_group = per_relay_parent - .statement_store - .validator_group_index(originator_index) - .expect("validator confirmed to be known by statement_store.insert; qed"); - // Insert an unconfirmed candidate entry if needed. Note that if the candidate is already confirmed, // this ensures that the assigned group of the originator matches the expected group of the // parachain. @@ -1441,7 +1397,6 @@ async fn handle_incoming_statement( &state.candidates, &state.authorities, &state.peers, - originator_group, checked_statement, ) .await; @@ -1559,7 +1514,7 @@ async fn send_backing_fresh_statements( let carrying_pvd = statement .clone() .convert_to_superpayload_with(|statement| match statement { - CompactStatement::Seconded(c_hash) => FullStatementWithPVD::Seconded( + CompactStatement::Seconded(_) => FullStatementWithPVD::Seconded( (&**confirmed.candidate_receipt()).clone(), confirmed.persisted_validation_data().clone(), ), @@ -1648,7 +1603,6 @@ async fn provide_candidate_to_grid( grid_view, candidate_hash, group_index, - group_size, filter.clone(), ); @@ -1787,7 +1741,7 @@ async fn fragment_tree_update_inner( continue } - for (leaf_hash, depths) in membership { + for (leaf_hash, _) in membership { state.candidates.note_importable_under(&hypo, leaf_hash); } @@ -1795,7 +1749,7 @@ async fn fragment_tree_update_inner( if let HypotheticalCandidate::Complete { candidate_hash, receipt, - persisted_validation_data, + persisted_validation_data: _, } = hypo { let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); @@ -2227,7 +2181,6 @@ pub(crate) async fn handle_backed_candidate_message( ) { // If the candidate is unknown or unconfirmed, it's a race (pruned before receiving message) // or a bug. Ignore if so - state.candidates.note_backed(&candidate_hash); let confirmed = match state.candidates.get_confirmed(&candidate_hash) { None => { gum::debug!( @@ -2261,6 +2214,15 @@ pub(crate) async fn handle_backed_candidate_message( &state.peers, ) .await; + + // Search for children of the backed candidate to request. + prospective_backed_notification_fragment_tree_updates( + ctx, + state, + confirmed.para_id(), + confirmed.candidate_receipt().descriptor().para_head, + ) + .await; } /// Sends all messages about a candidate to all peers in the cluster, @@ -2312,7 +2274,6 @@ async fn send_cluster_candidate_statements( &state.candidates, &state.authorities, &state.peers, - local_group, statement, ) .await; @@ -2384,7 +2345,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St None }; let request_props = |identifier: &CandidateIdentifier| { - let &CandidateIdentifier { relay_parent, candidate_hash, group_index } = identifier; + let &CandidateIdentifier { relay_parent, group_index, .. } = identifier; let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; let per_session = state.per_session.get(&relay_parent_state.session)?; @@ -2555,12 +2516,7 @@ pub(crate) async fn handle_response( } /// Answer an incoming request for a candidate. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -pub(crate) async fn answer_request( - ctx: &mut Context, - state: &mut State, - message: ResponderMessage, -) { +pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { let ResponderMessage { request, sent_feedback } = message; let AttestedCandidateRequest { candidate_hash, ref mask } = &request.payload; @@ -2600,7 +2556,7 @@ pub(crate) async fn answer_request( // check request bitfields are right size. if mask.seconded_in_group.len() != group_size || mask.validated_in_group.len() != group_size { - request.send_outgoing_response(OutgoingResponse { + let _ = request.send_outgoing_response(OutgoingResponse { result: Err(()), reputation_changes: vec![COST_INVALID_REQUEST_BITFIELD_SIZE], sent_feedback: None, @@ -2622,7 +2578,7 @@ pub(crate) async fn answer_request( } if !can_request { - request.send_outgoing_response(OutgoingResponse { + let _ = request.send_outgoing_response(OutgoingResponse { result: Err(()), reputation_changes: vec![COST_UNEXPECTED_REQUEST], sent_feedback: None, @@ -2654,7 +2610,7 @@ pub(crate) async fn answer_request( .collect(), }; - request.send_response(response); + let _ = request.send_response(response); } /// Messages coming from the background respond task. diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 32ab1e30695f..86ffe0c3cce3 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -47,15 +47,11 @@ use polkadot_primitives::vstaging::{ ValidatorIndex, }; -use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use std::{ - collections::{ - hash_map::{Entry as HEntry, HashMap, VacantEntry}, - BTreeSet, HashSet, VecDeque, - }, - pin::Pin, +use std::collections::{ + hash_map::{Entry as HEntry, HashMap}, + HashSet, VecDeque, }; /// An identifier for a candidate. @@ -192,7 +188,7 @@ impl RequestManager { .binary_search(&(candidate.priority.clone(), identifier.clone())) { Ok(i) => i, - Err(i) => unreachable!("requested candidates always have a priority entry; qed"), + Err(_) => unreachable!("requested candidates always have a priority entry; qed"), } }; @@ -239,7 +235,7 @@ impl RequestManager { }, // We can expect to encounter vacant entries, but only if nodes are misbehaving and // we don't use a deduplicating collection; there are no issues from ignoring it. - HEntry::Vacant(entry) => (), + HEntry::Vacant(_) => (), } } } @@ -483,7 +479,7 @@ impl UnhandledResponse { .binary_search(&(entry.priority.clone(), identifier.clone())) { Ok(i) => i, - Err(i) => unreachable!("requested candidates always have a priority entry; qed"), + Err(_) => unreachable!("requested candidates always have a priority entry; qed"), }; entry.in_flight = false; @@ -521,7 +517,7 @@ impl UnhandledResponse { Ok(response) => response, }; - let mut output = validate_complete_response( + let output = validate_complete_response( &identifier, props, complete_response, diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 4757c60fc0f9..0967394b69f0 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -23,7 +23,7 @@ //! views into this data: views based on the candidate, views based on the validator //! groups, and views based on the validators themselves. -use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec}; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use polkadot_node_network_protocol::vstaging::StatementFilter; use polkadot_primitives::vstaging::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, @@ -90,12 +90,6 @@ impl StatementStore { } } - /// Get the group index of a validator by index. If any statements by the validator - /// have been imported successfully, this is guaranteed to succeed. - pub fn validator_group_index(&self, validator: ValidatorIndex) -> Option { - self.validator_meta.get(&validator).map(|g| g.group) - } - /// Insert a statement. Returns `true` if was not known already, `false` if it was. /// Ignores statements by unknown validators and returns an error. pub fn insert( @@ -121,7 +115,7 @@ impl StatementStore { return Ok(false) }, - HEntry::Vacant(mut e) => { + HEntry::Vacant(e) => { e.insert(StoredStatement { statement, known_by_backing: origin.is_local() }); }, } From bb37d2676e765593a4de4c6d1139425cf8b72a8c Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 16 Feb 2023 04:48:17 +0100 Subject: [PATCH 167/220] Async backing statement distribution: grid tests (#6673) * Add `manifest_import_returns_ok_true` test * cargo fmt * Add pending_communication_receiving_manifest_on_confirmed_candidate * Add `senders_can_provide_manifests_in_acknowledgement` test * Add a couple of tests for pending statements * Add `pending_statements_cleared_when_sending` test * Add `pending_statements_respect_remote_knowledge` test * Refactor group creation in tests * Clarify docs * Address some review comments * Make some clarifications * Fix post-merge errors * Clarify test `senders_can_provide_manifests_in_acknowledgement` * Try writing `pending_statements_are_updated_after_manifest_exchange` * Document "seconding limit" and `reject_overflowing_manifests` test * Test that seconding counts are not updated for validators on error * Fix tests * Fix manifest exchange test --- .../src/vstaging/grid.rs | 785 ++++++++++++++++-- 1 file changed, 727 insertions(+), 58 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 27419d0001bb..3f740b6257a6 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Utilities for handling distribution of backed candidates along -//! the grid. +//! Utilities for handling distribution of backed candidates along the grid (outside the group to +//! the rest of the network). +//! +//! The grid uses the gossip topology defined in [`polkadot_node_network_protocol::grid_topology`]. +//! It defines how messages and statements are forwarded between validators. use polkadot_node_network_protocol::{ grid_topology::SessionGridTopology, vstaging::StatementFilter, @@ -42,7 +45,7 @@ use super::{groups::Groups, LOG_TARGET}; /// /// In the case that this group is the group that we are locally assigned to, /// the 'receiving' side will be empty. -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] struct GroupSubView { sending: HashSet, receiving: HashSet, @@ -70,8 +73,8 @@ impl SessionTopologyView { /// Build a view of the topology for the session. /// For groups that we are part of: we receive from nobody and send to our X/Y peers. -/// For groups that we are not part of: we receive from any validator in the group we share a slice with. -/// and send to the corresponding X/Y slice. +/// For groups that we are not part of: we receive from any validator in the group we share a slice with +/// and send to the corresponding X/Y slice in the other dimension. /// For any validators we don't share a slice with, we receive from the nodes /// which share a slice with them. pub fn build_session_topology<'a>( @@ -300,7 +303,7 @@ impl GridTracker { ); } } else { - // received prevents conflicting manifests so this is max 1 per validator. + // `received` prevents conflicting manifests so this is max 1 per validator. self.unconfirmed .entry(candidate_hash) .or_default() @@ -311,7 +314,7 @@ impl GridTracker { } /// Add a new backed candidate to the tracker. This yields - /// an iterator of validators which we should either advertise to + /// a list of validators which we should either advertise to /// or signal that we know the candidate, along with the corresponding /// type of manifest we should send. pub fn add_backed_candidate( @@ -354,7 +357,7 @@ impl GridTracker { Some(g) => g, }; - // advertise onwards ad accept received advertisements + // advertise onwards and accept received advertisements let sending_group_manifests = group_topology.sending.iter().map(|v| (*v, ManifestKind::Full)); @@ -531,7 +534,7 @@ impl GridTracker { return } - // Add to `pending_statements` for all valdiators we communicate with + // Add to `pending_statements` for all validators we communicate with // who have exchanged manifests. let all_group_validators = session_topology .group_views @@ -653,7 +656,7 @@ pub enum ManifestImportError { Disallowed, } -/// The knowledge we are awawre of counterparties having of manifests. +/// The knowledge we are aware of counterparties having of manifests. #[derive(Default)] struct ReceivedManifests { received: HashMap, @@ -764,6 +767,9 @@ impl ReceivedManifests { // updates validator-seconded records but only if the new statements // are OK. returns `true` if alright and `false` otherwise. +// +// The seconding limit is a per-validator limit. It ensures an upper bound on the total number of +// candidates entering the system. fn updating_ensure_within_seconding_limit( seconded_counts: &mut HashMap>, group_index: GroupIndex, @@ -792,7 +798,7 @@ fn updating_ensure_within_seconding_limit( true } -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] enum StatementKind { Seconded, Valid, @@ -823,18 +829,22 @@ impl FilterQuery for StatementFilter { } } +/// Knowledge that we have about a remote peer concerning a candidate, and that they have about us +/// concerning the candidate. +#[derive(Debug, Clone)] struct MutualKnowledge { - // Knowledge they have about the candidate. `Some` only if they - // have advertised or requested the candidate. + /// Knowledge the remote peer has about the candidate. `Some` only if they + /// have advertised or requested the candidate. remote_knowledge: Option, - // Knowledge we have indicated to them about the candidate. - // `Some` only if we have advertised or requested the candidate - // from them. + /// Knowledge we have indicated to the remote peer about the candidate. + /// `Some` only if we have advertised or requested the candidate + /// from them. local_knowledge: Option, } // A utility struct for keeping track of metadata about candidates // we have confirmed as having been backed. +#[derive(Debug, Clone)] struct KnownBackedCandidate { group_index: GroupIndex, local_knowledge: StatementFilter, @@ -991,6 +1001,14 @@ mod tests { use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::crypto::Pair as PairT; + fn dummy_groups(group_size: usize) -> Groups { + let groups = vec![(0..(group_size as u32)).map(ValidatorIndex).collect()].into(); + let mut discovery_keys = vec![]; + (0..group_size).map(|_| discovery_keys.push(AuthorityDiscoveryPair::generate().0.public())); + + Groups::new(groups, &discovery_keys) + } + #[test] fn topology_empty_for_no_index() { let base_topology = SessionGridTopology::new( @@ -1141,6 +1159,8 @@ mod tests { ); } + // Make sure we don't import manifests that would put a validator in a group over the limit of + // candidates they are allowed to second (aka seconding limit). #[test] fn reject_overflowing_manifests() { let mut knowledge = ReceivedManifests::default(); @@ -1176,6 +1196,8 @@ mod tests { ) .unwrap(); + // Reject a seconding validator that is already at the seconding limit. Seconding counts for + // the validators should not be applied. assert_matches!( knowledge.import_received( 3, @@ -1193,6 +1215,7 @@ mod tests { Err(ManifestImportError::Overflow) ); + // Don't reject validators that have seconded less than the limit so far. knowledge .import_received( 3, @@ -1225,14 +1248,7 @@ mod tests { .collect(), }; - let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), - &[ - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - ], - ); + let groups = dummy_groups(3); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); @@ -1298,14 +1314,7 @@ mod tests { .collect(), }; - let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), - &[ - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - ], - ); + let groups = dummy_groups(3); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); @@ -1367,14 +1376,7 @@ mod tests { .collect(), }; - let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), - &[ - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - ], - ); + let groups = dummy_groups(3); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); @@ -1409,21 +1411,14 @@ mod tests { GroupIndex(0), GroupSubView { sending: HashSet::new(), - receiving: vec![ValidatorIndex(0)].into_iter().collect(), + receiving: HashSet::from([ValidatorIndex(0)]), }, )] .into_iter() .collect(), }; - let groups = Groups::new( - vec![vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]].into(), - &[ - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - AuthorityDiscoveryPair::generate().0.public(), - ], - ); + let groups = dummy_groups(3); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); @@ -1496,20 +1491,694 @@ mod tests { ); } - // TODO [now]: test that senders can provide manifests in acknowledgement + // Test that when we add a candidate as backed and advertise it to the sending group, they can + // provide an acknowledgement manifest in response. + #[test] + fn senders_can_provide_manifests_in_acknowledgement() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([validator_index]), + receiving: HashSet::from([ValidatorIndex(1)]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Add the candidate as backed. + let receivers = tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + // Validator 0 is in the sending group. Advertise onward to it. + // + // Validator 1 is in the receiving group, but we have not received from it, so we're not + // expected to send it an acknowledgement. + assert_eq!(receivers, vec![(validator_index, ManifestKind::Full)]); + + // Note the manifest as 'sent' to validator 0. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Import manifest of kind `Acknowledgement` from validator 0. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + validator_index, + ); + assert_matches!(ack, Ok(false)); + } + + // Check that pending communication is set correctly when receiving a manifest on a confirmed candidate. + // + // It should also overwrite any existing `Full` ManifestKind. + #[test] + fn pending_communication_receiving_manifest_on_confirmed_candidate() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([validator_index]), + receiving: HashSet::from([ValidatorIndex(1)]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Manifest should not be pending yet. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + + // Manifest should be pending as `Full`. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, Some(ManifestKind::Full)); + + // Note the manifest as 'sent' to validator 0. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Import manifest. + // + // Should overwrite existing `Full` manifest. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + validator_index, + ); + assert_matches!(ack, Ok(false)); + + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + } + + // Check that pending communication is cleared correctly in `manifest_sent_to` + // + // Also test a scenario where manifest import returns `Ok(true)` (should acknowledge). + #[test] + fn pending_communication_is_cleared() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + + // Manifest should not be pending yet. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + + // Import manifest. The candidate is confirmed backed and we are expected to receive from + // validator 0, so send it an acknowledgement. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + + // Acknowledgement manifest should be pending. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, Some(ManifestKind::Acknowledgement)); + + // Note the candidate as advertised. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Pending manifest should be cleared. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + } + + /// A manifest exchange means that both `manifest_sent_to` and `manifest_received_from` have + /// been invoked. + /// + /// In practice, it means that one of three things have happened: + /// + /// - They announced, we acknowledged + /// + /// - We announced, they acknowledged + /// + /// - We announced, they announced (not sure if this can actually happen; it would happen if 2 + /// nodes had each other in their sending set and they sent manifests at the same time. The + /// code accounts for this anyway) + #[test] + fn pending_statements_are_updated_after_manifest_exchange() { + let send_to = ValidatorIndex(0); + let receive_from = ValidatorIndex(1); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([send_to]), + receiving: HashSet::from([receive_from]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Confirm the candidate. + let receivers = tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + assert_eq!(receivers, vec![(send_to, ManifestKind::Full)]); + + // Learn a statement from a different validator. + tracker.learned_fresh_statement( + &groups, + &session_topology, + ValidatorIndex(2), + &CompactStatement::Seconded(candidate_hash), + ); + + // Test receiving followed by sending an ack. + { + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(receive_from, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(receive_from), vec![]); + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + receive_from, + ); + assert_matches!(ack, Ok(true)); + + // Send ack now. + tracker.manifest_sent_to( + &groups, + receive_from, + candidate_hash, + local_knowledge.clone(), + ); + + // There should be pending statements now. + assert_eq!( + tracker.pending_statements_for(receive_from, candidate_hash), + Some(StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }) + ); + assert_eq!( + tracker.all_pending_statements_for(receive_from), + vec![(ValidatorIndex(2), CompactStatement::Seconded(candidate_hash))] + ); + } + + // Test sending followed by receiving an ack. + { + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(send_to, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(send_to), vec![]); + + tracker.manifest_sent_to(&groups, send_to, candidate_hash, local_knowledge.clone()); + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + send_to, + ); + assert_matches!(ack, Ok(false)); + + // There should be pending statements now. + assert_eq!( + tracker.pending_statements_for(send_to, candidate_hash), + Some(StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }) + ); + assert_eq!( + tracker.all_pending_statements_for(send_to), + vec![(ValidatorIndex(2), CompactStatement::Seconded(candidate_hash))] + ); + } + } + + #[test] + fn invalid_fresh_statement_import() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Try to import fresh statement. Candidate not backed. + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + + // Try to import fresh statement. Unknown group for validator index. + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, ValidatorIndex(1), &statement); + + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + } + + #[test] + fn pending_statements_updated_when_importing_fresh_statement() { + let validator_index = ValidatorIndex(0); - // TODO [now]: check that pending communication is set correctly when receiving a manifest on a confirmed candidate - // It should also overwrite any existing `Full` ManifestKind + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); - // TODO [now]: check that pending communication is cleared correctly in `manifest_sent_to` + // Import fresh statement. - // TODO [now]: test a scenario where manifest import returns `Ok(true)`. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + // There should be pending statements now. + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); - // TODO [now]: test that pending statements are updated after manifest exchange + // After successful import, try importing again. Nothing should change. - // TODO [now]: test that pending statements are updated when importing a fresh statement + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + } - // TODO [now]: test that pending statements respect remote knowledge + // After learning fresh statements, we should not generate pending statements for knowledge that + // the validator already has. + #[test] + fn pending_statements_respect_remote_knowledge() { + let validator_index = ValidatorIndex(0); - // TODO [now]: test that pending statements are cleared when sending/receiving. + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + + // Import fresh statement. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + tracker.learned_fresh_statement( + &groups, + &session_topology, + validator_index, + &CompactStatement::Seconded(candidate_hash), + ); + tracker.learned_fresh_statement( + &groups, + &session_topology, + validator_index, + &CompactStatement::Valid(candidate_hash), + ); + + // The pending statements should respect the remote knowledge (meaning the Seconded + // statement is ignored, but not the Valid statement). + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Valid(candidate_hash))] + ); + } + + #[test] + fn pending_statements_cleared_when_sending() { + let validator_index = ValidatorIndex(0); + let counterparty = ValidatorIndex(1); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index, counterparty]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + group_size, + local_knowledge.clone(), + ); + + // Import statement for originator. + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge.clone()); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + // Import statement for counterparty. + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + counterparty, + ); + tracker.manifest_sent_to(&groups, counterparty, candidate_hash, local_knowledge); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, counterparty, &statement); + + // There should be pending statements now. + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + assert_eq!( + tracker.pending_statements_for(counterparty, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(counterparty), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + tracker.sent_or_received_direct_statement( + &groups, + validator_index, + counterparty, + &statement, + ); + + // There should be no pending statements now (for the counterparty). + assert_eq!( + tracker.pending_statements_for(counterparty, candidate_hash), + Some(StatementFilter::blank(group_size)) + ); + assert_eq!(tracker.all_pending_statements_for(counterparty), vec![]); + } } From 14a731bc64fc75d4b8cf1796586a4b410cf3b3e8 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 16 Feb 2023 04:49:10 +0100 Subject: [PATCH 168/220] Add more tests in `requests.rs` (#6707) This resolves remaining TODOs in this file. --- .../src/vstaging/requests.rs | 301 +++++++++++++++++- 1 file changed, 298 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 86ffe0c3cce3..494dfbd8bf56 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -80,6 +80,7 @@ struct TaggedResponse { } /// A pending request. +#[derive(Debug)] pub struct RequestedCandidate { priority: Priority, known_by: VecDeque, @@ -354,6 +355,7 @@ impl RequestManager { } /// Properties used in target selection and validation of a request. +#[derive(Clone)] pub struct RequestProperties { /// A mask for limiting the statements the response is allowed to contain. /// The mask has `OR` semantics: statements by validators corresponding to bits @@ -362,7 +364,7 @@ pub struct RequestProperties { pub unwanted_mask: StatementFilter, /// The required backing threshold, if any. If this is `Some`, then requests will only /// be made to peers which can provide enough statements to back the candidate, when - /// taking into account the unwanted_mask`, and a response will only be validated + /// taking into account the `unwanted_mask`, and a response will only be validated /// in the case of those statements. /// /// If this is `None`, it is assumed that only the candidate itself is needed. @@ -710,6 +712,7 @@ fn validate_complete_response( } /// The status of the candidate request after the handling of a response. +#[derive(Debug, PartialEq)] pub enum CandidateRequestStatus { /// The request was outdated at the point of receiving the response. Outdated, @@ -725,6 +728,7 @@ pub enum CandidateRequestStatus { } /// Output of the response validation. +#[derive(Debug, PartialEq)] pub struct ResponseValidationOutput { /// The peer we requested from. pub requested_peer: PeerId, @@ -764,6 +768,17 @@ fn insert_or_update_priority( #[cfg(test)] mod tests { use super::*; + use polkadot_primitives::HeadData; + use polkadot_primitives_test_helpers as test_helpers; + + fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } + } #[test] fn test_remove_by_relay_parent() { @@ -868,7 +883,287 @@ mod tests { ); } - // TODO [now]: test that outdated responses are handled correctly. + // Test case where candidate is requested under two different identifiers at the same time. + // Should result in `Outdated` error. + #[test] + fn handle_outdated_response_due_to_requests_for_different_identifiers() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); + + let identifier1 = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + let identifier2 = request_manager + .get_or_insert(relay_parent, candidate, 2.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 2.into()) + .add_peer(requested_peer); + + assert_ne!(identifier1, identifier2); + assert_eq!(request_manager.requests.len(), 2); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + + // Get requests. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let peer_advertised = |_identifier: &CandidateIdentifier, _peer: &_| { + Some(StatementFilter::full(group_size)) + }; + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Validate first response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier: identifier1, + requested_peer, + props: request_properties.clone(), + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let statements = vec![]; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Complete { + candidate: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }, + reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)], + } + ); + } + + // Try to validate second response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier: identifier2, + requested_peer, + props: request_properties, + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Outdated, + reputation_changes: vec![], + } + ); + } + } + + // Test case where we had a request in-flight and the request entry was garbage-collected on + // outdated relay parent. + #[test] + fn handle_outdated_response_due_to_garbage_collection() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); + + let identifier = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + let peer_advertised = + |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size)); + + // Get request once successfully. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Garbage collect based on relay parent. + request_manager.remove_by_relay_parent(relay_parent); + + // Try to validate response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier, + requested_peer, + props: request_properties, + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Outdated, + reputation_changes: vec![], + } + ); + } + } + + #[test] + fn should_clean_up_after_successful_requests() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); - // TODO [now]: test that successful requests lead to clean up. + let identifier = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + + assert_eq!(request_manager.requests.len(), 1); + assert_eq!(request_manager.by_priority.len(), 1); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + let peer_advertised = + |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size)); + + // Get request once successfully. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Validate response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier, + requested_peer, + props: request_properties.clone(), + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let statements = vec![]; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Complete { + candidate: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }, + reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)], + } + ); + } + + // Ensure that cleanup occurred. + assert_eq!(request_manager.requests.len(), 0); + assert_eq!(request_manager.by_priority.len(), 0); + } } From 89d8d42a747161f322bd83d70be8728e41d10ac2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 15 Feb 2023 20:59:43 -0700 Subject: [PATCH 169/220] remove outdated inventory terminology --- node/network/protocol/src/lib.rs | 3 ++- .../statement-distribution/src/vstaging/mod.rs | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index ca7a3d677832..69966c6e0edb 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -683,7 +683,8 @@ pub mod vstaging { } } - /// An inventory of a backed candidate, which can be requested. + /// A manifest of a known backed candidate, along with a description + /// of the statements backing it. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub struct BackedCandidateManifest { /// The relay-parent of the candidate. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 0d2b79069e86..93c943b52416 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1619,14 +1619,14 @@ async fn provide_candidate_to_grid( statement_knowledge: filter.clone(), }; - let inventory_message = Versioned::VStaging( + let manifest_message = Versioned::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), ); let ack_message = Versioned::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); - let mut inventory_peers = Vec::new(); + let mut manifest_peers = Vec::new(); let mut ack_peers = Vec::new(); let mut post_statements = Vec::new(); @@ -1642,7 +1642,7 @@ async fn provide_candidate_to_grid( }; match action { - grid::ManifestKind::Full => inventory_peers.push(p), + grid::ManifestKind::Full => manifest_peers.push(p), grid::ManifestKind::Acknowledgement => ack_peers.push(p), } @@ -1667,10 +1667,10 @@ async fn provide_candidate_to_grid( ); } - if !inventory_peers.is_empty() { + if !manifest_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - inventory_peers, - inventory_message.into(), + manifest_peers, + manifest_message.into(), )) .await; } From 0b2e958cb9a2356b04d29200ea649b9bf2e872b5 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 16 Feb 2023 08:54:16 +0100 Subject: [PATCH 170/220] Async backing statement distribution: `Candidates` tests (#6658) --- Cargo.lock | 12 +- node/core/prospective-parachains/src/tests.rs | 101 +-- .../network/statement-distribution/Cargo.toml | 2 +- .../src/vstaging/candidates.rs | 699 +++++++++++++++++- primitives/test-helpers/src/lib.rs | 46 +- 5 files changed, 782 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eee6ffdc4d74..855f0565d0ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8031,14 +8031,14 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" dependencies = [ - "ansi_term", "ctor", "diff", "output_vt100", + "yansi", ] [[package]] @@ -13822,6 +13822,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.1" diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index ef03789ce101..71e0367e8d62 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::*; -use ::polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_subsystem::{ errors::RuntimeApiError, @@ -27,12 +26,11 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::{ - v2::{ - CandidateCommitments, HeadData, Header, PersistedValidationData, ScheduledCore, - ValidationCodeHash, - }, vstaging::{AsyncBackingParameters, Constraints, InboundHrmpLimitations}, + CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, + ValidationCodeHash, }; +use polkadot_primitives_test_helpers::make_candidate; use std::sync::Arc; const ALLOWED_ANCESTRY_LEN: u32 = 3; @@ -70,42 +68,6 @@ fn dummy_constraints( } } -fn dummy_pvd(parent_head: HeadData, relay_parent_number: u32) -> PersistedValidationData { - PersistedValidationData { - parent_head, - relay_parent_number, - max_pov_size: MAX_POV_SIZE, - relay_parent_storage_root: dummy_hash(), - } -} - -fn make_candidate( - leaf: &TestLeaf, - para_id: ParaId, - parent_head: HeadData, - head_data: HeadData, - validation_code_hash: ValidationCodeHash, -) -> (CommittedCandidateReceipt, PersistedValidationData) { - let pvd = dummy_pvd(parent_head, leaf.number); - let commitments = CandidateCommitments { - head_data, - horizontal_messages: Vec::new(), - upward_messages: Vec::new(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: leaf.number, - }; - - let mut candidate = dummy_candidate_receipt_bad_sig(leaf.hash, Some(Default::default())); - candidate.commitments_hash = commitments.hash(); - candidate.descriptor.para_id = para_id; - candidate.descriptor.persisted_validation_data_hash = pvd.hash(); - candidate.descriptor.validation_code_hash = validation_code_hash; - let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; - - (candidate, pvd) -} - struct TestState { availability_cores: Vec, validation_code_hash: ValidationCodeHash, @@ -539,7 +501,8 @@ fn send_candidates_and_check_if_found() { // Candidate A1 let (candidate_a1, pvd_a1) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -550,7 +513,8 @@ fn send_candidates_and_check_if_found() { // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 2.into(), HeadData(vec![2, 3, 4]), HeadData(vec![2]), @@ -561,7 +525,8 @@ fn send_candidates_and_check_if_found() { // Candidate B let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![3]), @@ -572,7 +537,8 @@ fn send_candidates_and_check_if_found() { // Candidate C let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 2.into(), HeadData(vec![6, 7, 8]), HeadData(vec![4]), @@ -649,7 +615,8 @@ fn check_candidate_parent_leaving_view() { // Candidate A1 let (candidate_a1, pvd_a1) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -660,7 +627,8 @@ fn check_candidate_parent_leaving_view() { // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 2.into(), HeadData(vec![2, 3, 4]), HeadData(vec![2]), @@ -671,7 +639,8 @@ fn check_candidate_parent_leaving_view() { // Candidate B let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![3]), @@ -682,7 +651,8 @@ fn check_candidate_parent_leaving_view() { // Candidate C let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 2.into(), HeadData(vec![6, 7, 8]), HeadData(vec![4]), @@ -771,7 +741,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -782,7 +753,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf B. let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![1]), @@ -793,7 +765,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf C. let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 1.into(), HeadData(vec![5, 6, 7]), HeadData(vec![1]), @@ -860,7 +833,8 @@ fn check_backable_query() { // Candidate A let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -871,7 +845,8 @@ fn check_backable_query() { // Candidate B let (mut candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), @@ -970,7 +945,8 @@ fn check_depth_query() { // Candidate A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -981,7 +957,8 @@ fn check_depth_query() { // Candidate B. let (candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), @@ -992,7 +969,8 @@ fn check_depth_query() { // Candidate C. let (candidate_c, pvd_c) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![2]), HeadData(vec![3]), @@ -1120,7 +1098,8 @@ fn check_pvd_query() { // Candidate A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -1130,7 +1109,8 @@ fn check_pvd_query() { // Candidate B. let (candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), @@ -1140,7 +1120,8 @@ fn check_pvd_query() { // Candidate C. let (candidate_c, pvd_c) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![2]), HeadData(vec![3]), diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 4c3aba82e189..b89c6363ec19 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -23,8 +23,8 @@ fatality = "0.0.6" bitvec = "1" [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index b356fd6a8ab0..11deeed42fef 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -20,7 +20,7 @@ //! Due to the request-oriented nature of this protocol, we often learn //! about candidates just as a hash, alongside claimed properties that the //! receipt would commit to. However, it is only later on that we can -//! confirm those claimed properties. This store lets us keep track of the +//! confirm those claimed properties. This store lets us keep track of //! all candidates which are currently 'relevant' after spam-protection, and //! gives us the ability to detect mis-advertisements after the fact //! and punish them accordingly. @@ -42,7 +42,7 @@ use std::{ /// This encapsulates the correct and incorrect advertisers /// post-confirmation of a candidate. -#[derive(Default)] +#[derive(Debug, Default, PartialEq)] pub struct PostConfirmationReckoning { /// Peers which advertised correctly. pub correct: HashSet, @@ -51,6 +51,7 @@ pub struct PostConfirmationReckoning { } /// Outputs generated by initial confirmation of a candidate. +#[derive(Debug, PartialEq)] pub struct PostConfirmation { /// The hypothetical candidate used to determine importability and membership /// in the hypothetical frontier. @@ -139,10 +140,12 @@ impl Candidates { } /// Note that a candidate has been confirmed. If the candidate has just been - /// confirmed, then this returns `Some`. Otherwise, `None`. + /// confirmed (previous state was `Unconfirmed`), then this returns `Some`. Otherwise, `None`. + /// + /// If we are confirming for the first time, then remove any outdated claims, and generate a + /// reckoning of which peers advertised correctly and incorrectly. /// - /// This does no sanity-checking of input data, and will overwrite - /// already-confirmed canidates. + /// This does no sanity-checking of input data, and will overwrite already-confirmed candidates. pub fn confirm_candidate( &mut self, candidate_hash: CandidateHash, @@ -355,9 +358,10 @@ impl Candidates { } /// A bad advertisement was recognized. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct BadAdvertisement; +#[derive(Debug, PartialEq)] enum CandidateState { Unconfirmed(UnconfirmedCandidate), Confirmed(ConfirmedCandidate), @@ -390,7 +394,7 @@ impl CandidateClaims { } // properties of an unconfirmed but hypothetically importable candidate. -#[derive(Hash, PartialEq, Eq)] +#[derive(Debug, Hash, PartialEq, Eq)] struct UnconfirmedImportable { relay_parent: Hash, parent_hash: Hash, @@ -400,6 +404,7 @@ struct UnconfirmedImportable { // An unconfirmed candidate may have have been advertised under // multiple identifiers. We track here, on the basis of unique identifier, // the peers which advertised each candidate in a specific way. +#[derive(Debug, PartialEq)] struct UnconfirmedCandidate { claims: Vec<(PeerId, CandidateClaims)>, // ref-counted @@ -506,6 +511,7 @@ impl UnconfirmedCandidate { } /// A confirmed candidate. +#[derive(Debug, PartialEq)] pub struct ConfirmedCandidate { receipt: Arc, persisted_validation_data: PersistedValidationData, @@ -567,12 +573,681 @@ impl ConfirmedCandidate { #[cfg(test)] mod tests { use super::*; + use polkadot_primitives::HeadData; + use polkadot_primitives_test_helpers::make_candidate; + + #[test] + fn inserting_unconfirmed_rejects_on_incompatible_claims() { + let relay_head_data_a = HeadData(vec![1, 2, 3]); + let relay_head_data_b = HeadData(vec![4, 5, 6]); + let relay_hash_a = relay_head_data_a.hash(); + let relay_hash_b = relay_head_data_b.hash(); + + let para_id_a = 1.into(); + let para_id_b = 2.into(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash_a, + 1, + para_id_a, + relay_head_data_a, + HeadData(vec![1]), + Hash::from_low_u64_be(1000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + + let peer = PeerId::random(); + + let group_index_a = 100.into(); + let group_index_b = 200.into(); + + let mut candidates = Candidates::default(); + + // Confirm a candidate first. + candidates.confirm_candidate(candidate_hash_a, candidate_a, pvd_a, group_index_a); + + // Relay parent does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_b, + group_index_a, + Some((relay_hash_a, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Group index does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_b, + Some((relay_hash_a, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Parent head data does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_b, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Para ID does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_a, para_id_b)), + ), + Err(BadAdvertisement) + ); + + // Everything matches. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_a, para_id_a)), + ), + Ok(()) + ); + } + + // Tests that: + // + // - When the advertisement matches, confirming does not change the parent hash index. + // - When it doesn't match, confirming updates the index. Specifically, confirming should prune + // unconfirmed claims. + #[test] + fn confirming_maintains_parent_hash_index() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + let candidate_head_data_hash_c = candidate_head_data_c.hash(); + let candidate_head_data_hash_d = candidate_head_data_d.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, pvd_b) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a, + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, pvd_c) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, pvd_d) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_c.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer = PeerId::random(); + let group_index = 100.into(); + + let mut candidates = Candidates::default(); + + // Insert some unconfirmed candidates. + + // Advertise A without parent hash. + candidates.insert_unconfirmed(peer, candidate_hash_a, relay_hash, group_index, None); + assert_eq!(candidates.by_parent, HashMap::default()); + + // Advertise A with parent hash and ID. + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ); + assert_eq!( + candidates.by_parent, + HashMap::from([((relay_hash, 1.into()), HashSet::from([candidate_hash_a]))]) + ); + + // Advertise B with parent A. + candidates.insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ((candidate_head_data_hash_a, 1.into()), HashSet::from([candidate_hash_b])) + ]) + ); + + // Advertise C with parent A. + candidates.insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ) + ]) + ); + + // Advertise D with parent A. + candidates.insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. Index should be unchanged. + candidates.confirm_candidate(candidate_hash_a, candidate_a, pvd_a, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + candidates.confirm_candidate(candidate_hash_b, candidate_b, pvd_b, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + + // Confirmation does not match advertisement. Index should be updated. + candidates.confirm_candidate(candidate_hash_d, candidate_d, pvd_d, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ), + ((candidate_head_data_hash_c, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + // Make a new candidate for C with a different para ID. + let (new_candidate_c, new_pvd_c) = make_candidate( + relay_hash, + 1, + 2.into(), + candidate_head_data_b, + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + candidates.confirm_candidate(candidate_hash_c, new_candidate_c, new_pvd_c, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ((candidate_head_data_hash_a, 1.into()), HashSet::from([candidate_hash_b])), + ((candidate_head_data_hash_b, 2.into()), HashSet::from([candidate_hash_c])), + ((candidate_head_data_hash_c, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + } + + #[test] + fn test_returned_post_confirmation() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + let candidate_head_data_hash_c = candidate_head_data_c.hash(); + let candidate_head_data_hash_d = candidate_head_data_d.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, pvd_b) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, pvd_c) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, pvd_d) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + let group_index = 100.into(); - // TODO [now]: test that inserting unconfirmed rejects if claims are - // incomptable. + let mut candidates = Candidates::default(); - // TODO [now]: test that confirming correctly maintains the parent hash index + // Insert some unconfirmed candidates. - // TODO [now]: test that pruning unconfirmed claims correctly maintains the parent hash - // index + // Advertise A without parent hash. + candidates.insert_unconfirmed(peer_a, candidate_hash_a, relay_hash, group_index, None); + + // Advertise A with parent hash and ID. + candidates.insert_unconfirmed( + peer_a, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ); + + // (Correctly) advertise B with parent A. Do it from a couple of peers. + candidates.insert_unconfirmed( + peer_a, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + candidates.insert_unconfirmed( + peer_b, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + + // (Wrongly) advertise C with parent A. Do it from a couple peers. + candidates.insert_unconfirmed( + peer_b, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + candidates.insert_unconfirmed( + peer_c, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + + // Advertise D. Do it correctly from one peer (parent B) and wrongly from another (parent A). + candidates.insert_unconfirmed( + peer_c, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ); + candidates.insert_unconfirmed( + peer_d, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ), + ((candidate_head_data_hash_b, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. + let post_confirmation = candidates.confirm_candidate( + candidate_hash_a, + candidate_a.clone(), + pvd_a.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_a, + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_a]), + incorrect: HashSet::from([]), + }, + }) + ); + + let post_confirmation = candidates.confirm_candidate( + candidate_hash_b, + candidate_b.clone(), + pvd_b.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_b, + receipt: Arc::new(candidate_b), + persisted_validation_data: pvd_b, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_a, peer_b]), + incorrect: HashSet::from([]), + }, + }) + ); + + // Confirm candidate with two wrong peers (different group index). + let (new_candidate_c, new_pvd_c) = make_candidate( + relay_hash, + 1, + 2.into(), + candidate_head_data_b, + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let post_confirmation = candidates.confirm_candidate( + candidate_hash_c, + new_candidate_c.clone(), + new_pvd_c.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_c, + receipt: Arc::new(new_candidate_c), + persisted_validation_data: new_pvd_c, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([]), + incorrect: HashSet::from([peer_b, peer_c]), + }, + }) + ); + + // Confirm candidate with one wrong peer (different parent head data). + let post_confirmation = candidates.confirm_candidate( + candidate_hash_d, + candidate_d.clone(), + pvd_d.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_d, + receipt: Arc::new(candidate_d), + persisted_validation_data: pvd_d, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_c]), + incorrect: HashSet::from([peer_d]), + }, + }) + ); + } + + #[test] + fn test_hypothetical_frontiers() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + let candidate_head_data_hash_c = candidate_head_data_c.hash(); + let candidate_head_data_hash_d = candidate_head_data_d.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, pvd_b) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, pvd_c) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, pvd_d) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer = PeerId::random(); + let group_index = 100.into(); + + let mut candidates = Candidates::default(); + + // Confirm A. + candidates.confirm_candidate( + candidate_hash_a, + candidate_a.clone(), + pvd_a.clone(), + group_index, + ); + + // Advertise B with parent A. + candidates.insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + + // Advertise C with parent A. + candidates.insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ); + + // Advertise D with parent B. + candidates.insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ); + + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ), + ((candidate_head_data_hash_b, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + let hypothetical_a = HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_a, + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + }; + let hypothetical_b = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_b, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_a, + candidate_relay_parent: relay_hash, + }; + let hypothetical_c = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_c, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_a, + candidate_relay_parent: relay_hash, + }; + let hypothetical_d = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_d, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_b, + candidate_relay_parent: relay_hash, + }; + + let hypotheticals = candidates.frontier_hypotheticals(Some((relay_hash, 1.into()))); + assert_eq!(hypotheticals.len(), 1); + assert!(hypotheticals.contains(&hypothetical_a)); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_a, 2.into()))); + assert_eq!(hypotheticals.len(), 0); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_a, 1.into()))); + assert_eq!(hypotheticals.len(), 2); + assert!(hypotheticals.contains(&hypothetical_b)); + assert!(hypotheticals.contains(&hypothetical_c)); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_d, 1.into()))); + assert_eq!(hypotheticals.len(), 0); + + let hypotheticals = candidates.frontier_hypotheticals(None); + assert_eq!(hypotheticals.len(), 4); + assert!(hypotheticals.contains(&hypothetical_a)); + assert!(hypotheticals.contains(&hypothetical_b)); + assert!(hypotheticals.contains(&hypothetical_c)); + assert!(hypotheticals.contains(&hypothetical_d)); + } } diff --git a/primitives/test-helpers/src/lib.rs b/primitives/test-helpers/src/lib.rs index e734caeb35ba..c15f7c826122 100644 --- a/primitives/test-helpers/src/lib.rs +++ b/primitives/test-helpers/src/lib.rs @@ -23,14 +23,16 @@ //! contain randomness based data. use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorId, CollatorSignature, - CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, ValidationCode, ValidationCodeHash, - ValidatorId, + CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, + ValidationCode, ValidationCodeHash, ValidatorId, }; pub use rand; use sp_application_crypto::sr25519; use sp_keyring::Sr25519Keyring; use sp_runtime::generic::Digest; +const MAX_POV_SIZE: u32 = 1_000_000; + /// Creates a candidate receipt with filler data. pub fn dummy_candidate_receipt>(relay_parent: H) -> CandidateReceipt { CandidateReceipt:: { @@ -146,6 +148,46 @@ pub fn dummy_collator_signature() -> CollatorSignature { CollatorSignature::from(sr25519::Signature([0u8; 64])) } +/// Create a meaningless persisted validation data. +pub fn dummy_pvd(parent_head: HeadData, relay_parent_number: u32) -> PersistedValidationData { + PersistedValidationData { + parent_head, + relay_parent_number, + max_pov_size: MAX_POV_SIZE, + relay_parent_storage_root: dummy_hash(), + } +} + +/// Create a meaningless candidate, returning its receipt and PVD. +pub fn make_candidate( + relay_parent_hash: Hash, + relay_parent_number: u32, + para_id: ParaId, + parent_head: HeadData, + head_data: HeadData, + validation_code_hash: ValidationCodeHash, +) -> (CommittedCandidateReceipt, PersistedValidationData) { + let pvd = dummy_pvd(parent_head, relay_parent_number); + let commitments = CandidateCommitments { + head_data, + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: relay_parent_number, + }; + + let mut candidate = + dummy_candidate_receipt_bad_sig(relay_parent_hash, Some(Default::default())); + candidate.commitments_hash = commitments.hash(); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + candidate.descriptor.validation_code_hash = validation_code_hash; + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + (candidate, pvd) +} + /// Create a new candidate descriptor, and apply a valid signature /// using the provided `collator` key. pub fn make_valid_candidate_descriptor>( From 537987ec0b253c0628fad97f2136b625ce7d1292 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 17 Feb 2023 19:39:27 +0100 Subject: [PATCH 171/220] Async Backing: Fix clippy errors in statement distribution branch (#6720) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors * Async Backing: Fix clippy errors in statement distribution branch * Fix some more clippy lints --- .../src/legacy_v1/mod.rs | 2 +- .../network/statement-distribution/src/lib.rs | 12 ++--- .../src/vstaging/candidates.rs | 2 +- .../src/vstaging/grid.rs | 16 +++---- .../src/vstaging/mod.rs | 48 +++++++++---------- .../src/vstaging/requests.rs | 34 ++++++------- 6 files changed, 50 insertions(+), 64 deletions(-) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index f5c174f28184..5cedf9677976 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -103,7 +103,7 @@ const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; /// Overall state of the legacy-v1 portion of the subsystem. pub(crate) struct State { - peers: HashMap, + pub peers: HashMap, topology_storage: SessionBoundGridTopologyStorage, authorities: HashMap, active_heads: HashMap, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index aeac6db42bc5..521ccb8fc02a 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -280,10 +280,7 @@ impl StatementDistributionSubsystem { .await?; } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { - crate::legacy_v1::handle_deactivate_leaf( - legacy_v1_state, - deactivated.clone(), - ); + crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } crate::legacy_v1::handle_activated_leaf( @@ -295,10 +292,7 @@ impl StatementDistributionSubsystem { } } else { for deactivated in &deactivated { - crate::legacy_v1::handle_deactivate_leaf( - legacy_v1_state, - deactivated.clone(), - ); + crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } vstaging::handle_active_leaves_update( ctx, @@ -358,7 +352,7 @@ impl StatementDistributionSubsystem { } let target = match &event { - &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { + NetworkBridgeEvent::PeerMessage(_, message) => match message { Versioned::VStaging( protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index 11deeed42fef..b49a4563380d 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -308,7 +308,7 @@ impl Candidates { let maybe_children = self.by_parent.get(&parent); let i = maybe_children .into_iter() - .flat_map(|c| c) + .flatten() .filter_map(|c_hash| self.candidates.get_key_value(c_hash)); extend_hypotheticals(&mut v, i, Some(parent)); diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 3f740b6257a6..2220e2e21915 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -60,10 +60,10 @@ pub struct SessionTopologyView { impl SessionTopologyView { /// Returns an iterator over all validator indices from the group who are allowed to /// send us manifests. - pub fn iter_group_senders<'a>( - &'a self, + pub fn iter_group_senders( + &self, group: GroupIndex, - ) -> impl Iterator + 'a { + ) -> impl Iterator + '_ { self.group_views .get(&group) .into_iter() @@ -335,7 +335,7 @@ impl GridTracker { // Populate the entry with previously unconfirmed manifests. for (v, claimed_group_index) in - self.unconfirmed.remove(&candidate_hash).into_iter().flat_map(|x| x) + self.unconfirmed.remove(&candidate_hash).into_iter().flatten() { if claimed_group_index != group_index { // This is misbehavior, but is handled more comprehensively elsewhere @@ -614,13 +614,13 @@ fn decompose_statement_filter<'a>( let s = statement_filter .seconded_in_group .iter_ones() - .map(|i| g[i].clone()) + .map(|i| g[i]) .map(move |i| (i, CompactStatement::Seconded(candidate_hash))); let v = statement_filter .validated_in_group .iter_ones() - .map(|i| g[i].clone()) + .map(|i| g[i]) .map(move |i| (i, CompactStatement::Valid(candidate_hash))); s.chain(v) @@ -732,7 +732,7 @@ impl ReceivedManifests { manifest_summary.claimed_group_index, group_size, seconding_limit, - &*fresh_seconded, + &fresh_seconded, ); if !within_limits { @@ -751,7 +751,7 @@ impl ReceivedManifests { manifest_summary.claimed_group_index, group_size, seconding_limit, - &*manifest_summary.statement_knowledge.seconded_in_group, + &manifest_summary.statement_knowledge.seconded_in_group, ); if within_limits { diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 93c943b52416..0b683778b3a4 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -228,7 +228,7 @@ fn connected_validator_peer( .discovery_keys .get(validator_index.0 as usize) .and_then(|k| authorities.get(k)) - .map(|p| p.clone()) + .map(|p| *p) } struct PeerState { @@ -244,7 +244,7 @@ impl PeerState { let next_implicit = new_view .iter() .flat_map(|x| local_implicit.known_allowed_relay_parents_under(x, None)) - .flat_map(|x| x) + .flatten() .cloned() .collect::>(); @@ -289,10 +289,8 @@ impl PeerState { self.discovery_ids.as_ref().map_or(false, |x| x.contains(authority_id)) } - fn iter_known_discovery_ids<'a>( - &'a self, - ) -> impl Iterator + 'a { - self.discovery_ids.as_ref().into_iter().flat_map(|inner| inner) + fn iter_known_discovery_ids(&self) -> impl Iterator { + self.discovery_ids.as_ref().into_iter().flatten() } } @@ -341,7 +339,7 @@ pub(crate) async fn handle_network_update( }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { if let Some(p) = state.peers.remove(&peer_id) { - for discovery_key in p.discovery_ids.into_iter().flat_map(|x| x) { + for discovery_key in p.discovery_ids.into_iter().flatten() { state.authorities.remove(&discovery_key); } } @@ -519,14 +517,13 @@ pub(crate) async fn handle_active_leaves_update( for (peer, peer_state) in state.peers.iter_mut() { let fresh = peer_state.reconcile_active_leaf(leaf.hash, &new_relay_parents); if !fresh.is_empty() { - update_peers.push((peer.clone(), fresh)); + update_peers.push((*peer, fresh)); } } for (peer, fresh) in update_peers { for fresh_relay_parent in fresh { - send_peer_messages_for_relay_parent(ctx, state, peer.clone(), fresh_relay_parent) - .await; + send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent).await; } } } @@ -611,7 +608,7 @@ async fn handle_peer_view_update( }; for new_relay_parent in fresh_implicit { - send_peer_messages_for_relay_parent(ctx, state, peer.clone(), new_relay_parent).await; + send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent).await; } } @@ -700,7 +697,7 @@ fn pending_statement_network_message( .map(|signed| { protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) }) - .map(|msg| (vec![peer.clone()], Versioned::VStaging(msg).into())) + .map(|msg| (vec![*peer], Versioned::VStaging(msg).into())) } /// Send a peer all pending cluster statements for a relay parent. @@ -816,7 +813,7 @@ async fn send_pending_grid_messages( ); messages.push(( - vec![peer_id.clone()], + vec![*peer_id], Versioned::VStaging( protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( manifest, @@ -827,7 +824,7 @@ async fn send_pending_grid_messages( }, grid::ManifestKind::Acknowledgement => { messages.extend(acknowledgement_and_statement_messages( - peer_id.clone(), + *peer_id, peer_validator_id, groups, relay_parent_state, @@ -1039,7 +1036,7 @@ async fn circulate_statement( ) { let session_info = &per_session.session_info; - let candidate_hash = statement.payload().candidate_hash().clone(); + let candidate_hash = *statement.payload().candidate_hash(); let compact_statement = statement.payload().clone(); let is_confirmed = candidates.is_confirmed(&candidate_hash); @@ -1083,7 +1080,7 @@ async fn circulate_statement( let targets = cluster_targets .into_iter() - .flat_map(|c| c) + .flatten() .chain(grid_targets) .filter_map(|(v, k)| { session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) @@ -1097,8 +1094,7 @@ async fn circulate_statement( for (target, authority_id, kind) in targets { // Find peer ID based on authority ID, and also filter to connected. let peer_id: PeerId = match authorities.get(&authority_id) { - Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => - p.clone(), + Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => *p, None | Some(_) => continue, }; @@ -1313,7 +1309,7 @@ async fn handle_incoming_statement( // parachain. { let res = state.candidates.insert_unconfirmed( - peer.clone(), + peer, candidate_hash, relay_parent, originator_group, @@ -1383,7 +1379,7 @@ async fn handle_incoming_statement( &relay_parent, &mut *per_relay_parent, confirmed, - &*per_session, + per_session, ) .await; } @@ -1927,7 +1923,7 @@ async fn handle_incoming_manifest_common<'a, Context>( // 3. if accepted by grid, insert as unconfirmed. if let Err(BadAdvertisement) = candidates.insert_unconfirmed( - peer.clone(), + peer, candidate_hash, relay_parent, group_index, @@ -1988,7 +1984,7 @@ async fn handle_incoming_manifest( ) { let x = match handle_incoming_manifest_common( ctx, - peer.clone(), + peer, &state.peers, &mut state.per_relay_parent, &state.per_session, @@ -2077,7 +2073,7 @@ fn acknowledgement_and_statement_messages( protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); - let mut messages = vec![(vec![peer.clone()], msg.into())]; + let mut messages = vec![(vec![peer], msg.into())]; local_validator.grid_tracker.manifest_sent_to( groups, @@ -2096,7 +2092,7 @@ fn acknowledgement_and_statement_messages( candidate_hash, ); - messages.extend(statement_messages.into_iter().map(|m| (vec![peer.clone()], m))); + messages.extend(statement_messages.into_iter().map(|m| (vec![peer], m))); messages } @@ -2126,7 +2122,7 @@ async fn handle_incoming_acknowledgement( let x = match handle_incoming_manifest_common( ctx, - peer.clone(), + peer, &state.peers, &mut state.per_relay_parent, &state.per_session, @@ -2166,7 +2162,7 @@ async fn handle_incoming_acknowledgement( if !messages.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( - messages.into_iter().map(|m| (vec![peer.clone()], m)).collect(), + messages.into_iter().map(|m| (vec![peer], m)).collect(), )) .await; } diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 494dfbd8bf56..082f334fcb41 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -305,7 +305,7 @@ impl RequestManager { }; let (request, response_fut) = OutgoingRequest::new( - RequestRecipient::Peer(target.clone()), + RequestRecipient::Peer(target), AttestedCandidateRequest { candidate_hash: id.candidate_hash, mask: props.unwanted_mask.clone(), @@ -393,7 +393,7 @@ fn find_request_target_with_update( filter.mask_seconded(&props.unwanted_mask.seconded_in_group); filter.mask_valid(&props.unwanted_mask.validated_in_group); if seconded_and_sufficient(&filter, props.backing_threshold) { - target = Some((i, p.clone())); + target = Some((i, *p)); break } } @@ -405,7 +405,7 @@ fn find_request_target_with_update( if let Some((i, p)) = target { known_by.remove(i - prune_count); - known_by.push_back(p.clone()); + known_by.push_back(p); Some(p) } else { None @@ -505,7 +505,7 @@ impl UnhandledResponse { ); return ResponseValidationOutput { - requested_peer: requested_peer.clone(), + requested_peer, reputation_changes: vec![(requested_peer, COST_IMPROPERLY_DECODED_RESPONSE)], request_status: CandidateRequestStatus::Incomplete, } @@ -566,7 +566,7 @@ fn validate_complete_response( let invalid_candidate_output = || ResponseValidationOutput { request_status: CandidateRequestStatus::Incomplete, - reputation_changes: vec![(requested_peer.clone(), COST_INVALID_RESPONSE)], + reputation_changes: vec![(requested_peer, COST_INVALID_RESPONSE)], requested_peer, }; @@ -613,7 +613,7 @@ fn validate_complete_response( let i = match index_in_group(unchecked_statement.unchecked_validator_index()) { Some(i) => i, None => { - rep_changes.push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue }, }; @@ -622,7 +622,7 @@ fn validate_complete_response( if unchecked_statement.unchecked_payload().candidate_hash() != &identifier.candidate_hash { - rep_changes.push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } @@ -632,27 +632,23 @@ fn validate_complete_response( match unchecked_statement.unchecked_payload() { CompactStatement::Seconded(_) => { if unwanted_mask.seconded_in_group[i] { - rep_changes - .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } if received_filter.seconded_in_group[i] { - rep_changes - .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } }, CompactStatement::Valid(_) => { if unwanted_mask.validated_in_group[i] { - rep_changes - .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } if received_filter.validated_in_group[i] { - rep_changes - .push((requested_peer.clone(), COST_UNREQUESTED_RESPONSE_STATEMENT)); + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); continue } }, @@ -661,7 +657,7 @@ fn validate_complete_response( let validator_public = match validator_key_lookup(unchecked_statement.unchecked_validator_index()) { None => { - rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); + rep_changes.push((requested_peer, COST_INVALID_SIGNATURE)); continue }, Some(p) => p, @@ -670,7 +666,7 @@ fn validate_complete_response( let checked_statement = match unchecked_statement.try_into_checked(&signing_context, &validator_public) { Err(_) => { - rep_changes.push((requested_peer.clone(), COST_INVALID_SIGNATURE)); + rep_changes.push((requested_peer, COST_INVALID_SIGNATURE)); continue }, Ok(checked) => checked, @@ -686,7 +682,7 @@ fn validate_complete_response( } statements.push(checked_statement); - rep_changes.push((requested_peer.clone(), BENEFIT_VALID_STATEMENT)); + rep_changes.push((requested_peer, BENEFIT_VALID_STATEMENT)); } // Only accept responses which are sufficient, according to our @@ -698,7 +694,7 @@ fn validate_complete_response( statements }; - rep_changes.push((requested_peer.clone(), BENEFIT_VALID_RESPONSE)); + rep_changes.push((requested_peer, BENEFIT_VALID_RESPONSE)); ResponseValidationOutput { requested_peer, From 4c254e61048457f8cdf10e2e87134cd51f4793ad Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 16:50:49 -0600 Subject: [PATCH 172/220] add tests module --- .../src/vstaging/mod.rs | 3 +++ .../src/vstaging/tests/mod.rs | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/tests/mod.rs diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 0b683778b3a4..547331d3b67e 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -84,6 +84,9 @@ mod groups; mod requests; mod statement_store; +#[cfg(test)] +mod tests; + const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = Rep::CostMinor("Unexpected Statement, missing knowledge for relay parent"); diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs new file mode 100644 index 000000000000..25093f30381d --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -0,0 +1,25 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; + +struct TestConfig { + validator_count: usize, + group_size: usize, + max_groups: Option, + // whether the local node should be a validator + local_validator: bool, +} From c50d50de28cc71b5ca0b6d7ed0fb279cc149aae6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 17:02:22 -0600 Subject: [PATCH 173/220] fix warnings in existing tests --- .../src/vstaging/candidates.rs | 262 ++++++++++-------- .../src/vstaging/grid.rs | 96 ++++--- 2 files changed, 205 insertions(+), 153 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs index b49a4563380d..804da987ba6d 100644 --- a/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -685,7 +685,6 @@ mod tests { let candidate_head_data_hash_a = candidate_head_data_a.hash(); let candidate_head_data_hash_b = candidate_head_data_b.hash(); let candidate_head_data_hash_c = candidate_head_data_c.hash(); - let candidate_head_data_hash_d = candidate_head_data_d.hash(); let (candidate_a, pvd_a) = make_candidate( relay_hash, @@ -703,7 +702,7 @@ mod tests { candidate_head_data_b.clone(), Hash::from_low_u64_be(2000).into(), ); - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( relay_hash, 1, 1.into(), @@ -733,30 +732,39 @@ mod tests { // Insert some unconfirmed candidates. // Advertise A without parent hash. - candidates.insert_unconfirmed(peer, candidate_hash_a, relay_hash, group_index, None); + candidates + .insert_unconfirmed(peer, candidate_hash_a, relay_hash, group_index, None) + .ok() + .unwrap(); assert_eq!(candidates.by_parent, HashMap::default()); // Advertise A with parent hash and ID. - candidates.insert_unconfirmed( - peer, - candidate_hash_a, - relay_hash, - group_index, - Some((relay_hash, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, HashMap::from([((relay_hash, 1.into()), HashSet::from([candidate_hash_a]))]) ); // Advertise B with parent A. - candidates.insert_unconfirmed( - peer, - candidate_hash_b, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, HashMap::from([ @@ -766,13 +774,16 @@ mod tests { ); // Advertise C with parent A. - candidates.insert_unconfirmed( - peer, - candidate_hash_c, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, HashMap::from([ @@ -785,13 +796,16 @@ mod tests { ); // Advertise D with parent A. - candidates.insert_unconfirmed( - peer, - candidate_hash_d, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, HashMap::from([ @@ -875,8 +889,6 @@ mod tests { let candidate_head_data_d = HeadData(vec![4]); let candidate_head_data_hash_a = candidate_head_data_a.hash(); let candidate_head_data_hash_b = candidate_head_data_b.hash(); - let candidate_head_data_hash_c = candidate_head_data_c.hash(); - let candidate_head_data_hash_d = candidate_head_data_d.hash(); let (candidate_a, pvd_a) = make_candidate( relay_hash, @@ -894,7 +906,7 @@ mod tests { candidate_head_data_b.clone(), Hash::from_low_u64_be(2000).into(), ); - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( relay_hash, 1, 1.into(), @@ -928,64 +940,88 @@ mod tests { // Insert some unconfirmed candidates. // Advertise A without parent hash. - candidates.insert_unconfirmed(peer_a, candidate_hash_a, relay_hash, group_index, None); + candidates + .insert_unconfirmed(peer_a, candidate_hash_a, relay_hash, group_index, None) + .ok() + .unwrap(); // Advertise A with parent hash and ID. - candidates.insert_unconfirmed( - peer_a, - candidate_hash_a, - relay_hash, - group_index, - Some((relay_hash, 1.into())), - ); + candidates + .insert_unconfirmed( + peer_a, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ) + .ok() + .unwrap(); // (Correctly) advertise B with parent A. Do it from a couple of peers. - candidates.insert_unconfirmed( - peer_a, - candidate_hash_b, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); - candidates.insert_unconfirmed( - peer_b, - candidate_hash_b, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer_a, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_b, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); // (Wrongly) advertise C with parent A. Do it from a couple peers. - candidates.insert_unconfirmed( - peer_b, - candidate_hash_c, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); - candidates.insert_unconfirmed( - peer_c, - candidate_hash_c, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer_b, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_c, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); // Advertise D. Do it correctly from one peer (parent B) and wrongly from another (parent A). - candidates.insert_unconfirmed( - peer_c, - candidate_hash_d, - relay_hash, - group_index, - Some((candidate_head_data_hash_b, 1.into())), - ); - candidates.insert_unconfirmed( - peer_d, - candidate_hash_d, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer_c, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_d, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, @@ -1108,7 +1144,6 @@ mod tests { let candidate_head_data_d = HeadData(vec![4]); let candidate_head_data_hash_a = candidate_head_data_a.hash(); let candidate_head_data_hash_b = candidate_head_data_b.hash(); - let candidate_head_data_hash_c = candidate_head_data_c.hash(); let candidate_head_data_hash_d = candidate_head_data_d.hash(); let (candidate_a, pvd_a) = make_candidate( @@ -1119,7 +1154,7 @@ mod tests { candidate_head_data_a.clone(), Hash::from_low_u64_be(1000).into(), ); - let (candidate_b, pvd_b) = make_candidate( + let (candidate_b, _) = make_candidate( relay_hash, 1, 1.into(), @@ -1127,7 +1162,7 @@ mod tests { candidate_head_data_b.clone(), Hash::from_low_u64_be(2000).into(), ); - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( relay_hash, 1, 1.into(), @@ -1135,7 +1170,7 @@ mod tests { candidate_head_data_c.clone(), Hash::from_low_u64_be(3000).into(), ); - let (candidate_d, pvd_d) = make_candidate( + let (candidate_d, _) = make_candidate( relay_hash, 1, 1.into(), @@ -1163,31 +1198,40 @@ mod tests { ); // Advertise B with parent A. - candidates.insert_unconfirmed( - peer, - candidate_hash_b, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); // Advertise C with parent A. - candidates.insert_unconfirmed( - peer, - candidate_hash_c, - relay_hash, - group_index, - Some((candidate_head_data_hash_a, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); // Advertise D with parent B. - candidates.insert_unconfirmed( - peer, - candidate_hash_d, - relay_hash, - group_index, - Some((candidate_head_data_hash_b, 1.into())), - ); + candidates + .insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ) + .ok() + .unwrap(); assert_eq!( candidates.by_parent, diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 2220e2e21915..a00210af44b3 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -582,6 +582,18 @@ impl GridTracker { ) -> Option { self.received.get(&validator)?.candidate_statement_filter(candidate_hash) } + + #[cfg(test)] + fn is_manifest_pending_for( + &self, + validator: ValidatorIndex, + candidate_hash: &CandidateHash, + ) -> Option { + self.pending_manifests + .get(&validator) + .and_then(|m| m.get(candidate_hash)) + .map(|x| *x) + } } fn extract_statement_and_group_info( @@ -1003,10 +1015,8 @@ mod tests { fn dummy_groups(group_size: usize) -> Groups { let groups = vec![(0..(group_size as u32)).map(ValidatorIndex).collect()].into(); - let mut discovery_keys = vec![]; - (0..group_size).map(|_| discovery_keys.push(AuthorityDiscoveryPair::generate().0.public())); - Groups::new(groups, &discovery_keys) + Groups::new(groups) } #[test] @@ -1404,7 +1414,7 @@ mod tests { } #[test] - fn reject_malformed_below_threshold() { + fn reject_insufficient_below_threshold() { let mut tracker = GridTracker::default(); let session_topology = SessionTopologyView { group_views: vec![( @@ -1443,7 +1453,7 @@ mod tests { ManifestKind::Full, ValidatorIndex(0), ), - Err(ManifestImportError::Malformed) + Err(ManifestImportError::Insufficient) ); // seconding + validating still not enough to reach '2' threshold @@ -1465,7 +1475,7 @@ mod tests { ManifestKind::Full, ValidatorIndex(0), ), - Err(ManifestImportError::Malformed) + Err(ManifestImportError::Insufficient) ); // finally good. @@ -1522,7 +1532,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); // Validator 0 is in the sending group. Advertise onward to it. @@ -1590,7 +1599,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); @@ -1658,7 +1666,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); @@ -1740,7 +1747,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); assert_eq!(receivers, vec![(send_to, ManifestKind::Full)]); @@ -1878,7 +1884,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); @@ -1923,7 +1928,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); @@ -2012,7 +2016,6 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); @@ -2098,48 +2101,53 @@ mod tests { &session_topology, candidate_hash, group_index, - group_size, local_knowledge.clone(), ); // Import statement for originator. - tracker.import_manifest( - &session_topology, - &groups, - candidate_hash, - 3, - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(0), - claimed_group_index: group_index, - statement_knowledge: StatementFilter { - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + tracker + .import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, }, - }, - ManifestKind::Full, - validator_index, - ); + ManifestKind::Full, + validator_index, + ) + .ok() + .unwrap(); tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge.clone()); let statement = CompactStatement::Seconded(candidate_hash); tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); // Import statement for counterparty. - tracker.import_manifest( - &session_topology, - &groups, - candidate_hash, - 3, - ManifestSummary { - claimed_parent_hash: Hash::repeat_byte(0), - claimed_group_index: group_index, - statement_knowledge: StatementFilter { - seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], - validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + tracker + .import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, }, - }, - ManifestKind::Full, - counterparty, - ); + ManifestKind::Full, + counterparty, + ) + .ok() + .unwrap(); tracker.manifest_sent_to(&groups, counterparty, candidate_hash, local_knowledge); let statement = CompactStatement::Seconded(candidate_hash); tracker.learned_fresh_statement(&groups, &session_topology, counterparty, &statement); From 1ffcd43a992731c6f44374ec8f28c83edfec2990 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 17:29:23 -0600 Subject: [PATCH 174/220] create basic test harness --- Cargo.lock | 1 + .../network/statement-distribution/Cargo.toml | 1 + .../src/vstaging/tests/mod.rs | 58 +++++++++++++++++++ 3 files changed, 60 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1ca15a9b95b4..5a7b521ff590 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7806,6 +7806,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand_chacha 0.3.1", "sc-keystore", "sc-network", "sp-application-crypto", diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index d941adf02ae2..961a8e04a659 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -35,3 +35,4 @@ sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } futures-timer = "3.0.2" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +rand_chacha = "0.3" diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 25093f30381d..051704a51d91 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -15,6 +15,20 @@ // along with Polkadot. If not, see . use super::*; +use crate::*; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_network_protocol::request_response::ReqProtocolNames; +use sc_keystore::LocalKeystore; + +use futures::Future; +use rand::{Rng, SeedableRng}; + +use std::sync::Arc; + +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +// Some deterministic genesis hash for req/res protocol names +const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); struct TestConfig { validator_count: usize, @@ -22,4 +36,48 @@ struct TestConfig { max_groups: Option, // whether the local node should be a validator local_validator: bool, + rng_seed: u64, +} + +fn test_harness>( + config: TestConfig, + test: impl FnOnce(VirtualOverseer) -> T, +) { + let pool = sp_core::testing::TaskExecutor::new(); + let keystore = if config.local_validator { + test_helpers::mock::make_ferdie_keystore() + } else { + Arc::new(LocalKeystore::in_memory()) as SyncCryptoStorePtr + }; + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let rng = rand_chacha::ChaCha8Rng::seed_from_u64(config.rng_seed); + + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let subsystem = async move { + let subsystem = crate::StatementDistributionSubsystem::new( + keystore, + statement_req_receiver, + candidate_req_receiver, + Metrics::default(), + rng, + ); + + if let Err(e) = subsystem.run(context).await { + panic!("Fatal error: {:?}", e); + } + }; + + let test_fut = test(virtual_overseer); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + futures::executor::block_on(future::join( + async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + }, + subsystem, + )); } From 12947b7f961cdb8718c56718b35dc6cd7d511b79 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:19:28 -0600 Subject: [PATCH 175/220] create a test state struct --- .../src/vstaging/tests/mod.rs | 87 ++++++++++++++++++- 1 file changed, 83 insertions(+), 4 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 051704a51d91..dfd3f1c40542 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -16,9 +16,13 @@ use super::*; use crate::*; +use polkadot_primitives::vstaging::{ValidatorPair, AssignmentPair, AssignmentId, IndexedVec}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_network_protocol::request_response::ReqProtocolNames; use sc_keystore::LocalKeystore; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; +use sp_application_crypto::Pair as PairT; +use sp_keyring::Sr25519Keyring; use futures::Future; use rand::{Rng, SeedableRng}; @@ -32,16 +36,89 @@ const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); struct TestConfig { validator_count: usize, + // how many validators to place in each group. group_size: usize, - max_groups: Option, // whether the local node should be a validator local_validator: bool, rng_seed: u64, } +struct TestLocalValidator { + validator_id: ValidatorId, + validator_index: ValidatorIndex, + group_index: GroupIndex, +} + +struct TestState { + config: TestConfig, + local: Option, + validators: Vec, + discovery_keys: Vec, + assignment_keys: Vec, + validator_groups: IndexedVec>, +} + +impl TestState { + fn from_config(config: TestConfig, rng: &mut impl Rng) -> Self { + if config.group_size == 0 { + panic!("group size cannot be 0"); + } + + let mut validators = Vec::new(); + let mut discovery_keys = Vec::new(); + let mut assignment_keys = Vec::new(); + let mut validator_groups = Vec::new(); + + let local_validator_pos = if config.local_validator { + Some(rng.gen_range(0..config.validator_count)) + } else { + None + }; + + for i in 0..config.validator_count { + let validator_pair = if Some(i) == local_validator_pos { + Sr25519Keyring::Ferdie.pair().into() + } else { + ValidatorPair::generate().0 + }; + let assignment_id = AssignmentPair::generate().0.public(); + let discovery_id = AuthorityDiscoveryPair::generate().0.public(); + + let group_index = i / config.group_size; + validators.push(validator_pair); + discovery_keys.push(discovery_id); + assignment_keys.push(assignment_id); + if validator_groups.len() == group_index { + validator_groups.push(vec![ValidatorIndex(i as _)]); + } else { + validator_groups.last_mut().unwrap().push(ValidatorIndex(i as _)); + } + } + + let local = if let Some(local_pos) = local_validator_pos { + Some(TestLocalValidator { + validator_id: validators[local_pos].public().clone(), + validator_index: ValidatorIndex(local_pos as _), + group_index: GroupIndex((local_pos / config.group_size) as _), + }) + } else { + None + }; + + TestState { + config, + local, + validators, + discovery_keys, + assignment_keys, + validator_groups: IndexedVec::from(validator_groups), + } + } +} + fn test_harness>( config: TestConfig, - test: impl FnOnce(VirtualOverseer) -> T, + test: impl FnOnce(TestState, VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); let keystore = if config.local_validator { @@ -52,7 +129,9 @@ fn test_harness>( let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); - let rng = rand_chacha::ChaCha8Rng::seed_from_u64(config.rng_seed); + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(config.rng_seed); + + let test_state = TestState::from_config(config, &mut rng); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); let subsystem = async move { @@ -69,7 +148,7 @@ fn test_harness>( } }; - let test_fut = test(virtual_overseer); + let test_fut = test(test_state, virtual_overseer); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); From 0bfe2139ee536a2cc36f65da25012156baf3d805 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:19:54 -0600 Subject: [PATCH 176/220] fmt --- .../statement-distribution/src/vstaging/tests/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index dfd3f1c40542..0aa41629a4bc 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -16,12 +16,12 @@ use super::*; use crate::*; -use polkadot_primitives::vstaging::{ValidatorPair, AssignmentPair, AssignmentId, IndexedVec}; -use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_network_protocol::request_response::ReqProtocolNames; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_primitives::vstaging::{AssignmentId, AssignmentPair, IndexedVec, ValidatorPair}; use sc_keystore::LocalKeystore; -use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_application_crypto::Pair as PairT; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_keyring::Sr25519Keyring; use futures::Future; From 72b5e9c22d361d8afb1a8642f18b5a6d5e707525 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:20:54 -0600 Subject: [PATCH 177/220] create empty cluster & grid modules for tests --- .../src/vstaging/tests/cluster.rs | 15 +++++++++++++++ .../src/vstaging/tests/grid.rs | 15 +++++++++++++++ .../src/vstaging/tests/mod.rs | 3 +++ 3 files changed, 33 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/tests/cluster.rs create mode 100644 node/network/statement-distribution/src/vstaging/tests/grid.rs diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs new file mode 100644 index 000000000000..1b5509939a42 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -0,0 +1,15 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . diff --git a/node/network/statement-distribution/src/vstaging/tests/grid.rs b/node/network/statement-distribution/src/vstaging/tests/grid.rs new file mode 100644 index 000000000000..1b5509939a42 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/grid.rs @@ -0,0 +1,15 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 0aa41629a4bc..1accbd8c347f 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -29,6 +29,9 @@ use rand::{Rng, SeedableRng}; use std::sync::Arc; +mod cluster; +mod grid; + type VirtualOverseer = test_helpers::TestSubsystemContextHandle; // Some deterministic genesis hash for req/res protocol names From d62e5dc72f68291b56446bd6f267789795202c0b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:31:52 -0600 Subject: [PATCH 178/220] some TODOs for cluster test suite --- .../src/vstaging/tests/cluster.rs | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 1b5509939a42..faa3cb5d1fbf 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -13,3 +13,28 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + + +// TODO [now]: shared seconded statement is circulated to all cluster peers with relay-parent +// in view + +// TODO [now]: cluster 'valid' statement without prior seconded is ignored + +// TODO [now]: statement with invalid signature leads to report + +// TODO [now]: cluster statement from non-cluster peer is rejected + +// TODO [now]: statement from non-cluster originator is rejected + +// TODO [now]: cluster statement for unknown candidate leads to request + +// TODO [now]: cluster statements are shared with `Seconded` first for all cluster peers +// with relay-parent in view + +// TODO [now]: cluster statements not re-shared on view update + +// TODO [now]: cluster statements shared on first time cluster peer gets relay-parent in view. + +// TODO [now]: confirmed cluster statement does not import statements until candidate in hypothetical frontier + +// TODO [now]: shared valid statement after confirmation sent to all cluster peers with relay-parent From 314b68cb7964a2b8f0f9a4bf9a2238bff74e71ba Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:43:15 -0600 Subject: [PATCH 179/220] describe test-suite for grid logic --- .../src/vstaging/mod.rs | 2 +- .../src/vstaging/tests/grid.rs | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 547331d3b67e..dbe61dd282bf 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2011,7 +2011,7 @@ async fn handle_incoming_manifest( let ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index } = x; if acknowledge { - // 4. if already confirmed & known within grid, acknowledge candidate + // 4. if already known within grid (confirmed & backed), acknowledge candidate let local_knowledge = { let group_size = match per_session.groups.get(manifest.group_index) { diff --git a/node/network/statement-distribution/src/vstaging/tests/grid.rs b/node/network/statement-distribution/src/vstaging/tests/grid.rs index 1b5509939a42..c5eb3826846e 100644 --- a/node/network/statement-distribution/src/vstaging/tests/grid.rs +++ b/node/network/statement-distribution/src/vstaging/tests/grid.rs @@ -13,3 +13,29 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + +// TODO [now]: backed candidate leads to advertisement to relevant validators with relay-parent + +// TODO [now]: received advertisement before confirmation leads to request + +// TODO [now]: received advertisement after backing leads to acknowledgement + +// TODO [now]: received advertisement after confirmation but before backing leads to nothing + +// TODO [now]: additional statements are shared after manifest exchange + +// TODO [now]: grid-sending validator view entering relay-parent leads to advertisement + +// TODO [now]: advertisement not re-sent after re-entering relay parent (view oscillation) + +// TODO [now]: acknowledgements sent only when candidate backed + +// TODO [now]: grid statements imported to backing once candidate enters hypothetical frontier + +// TODO [now]: advertisements rejected from incorrect peers + +// TODO [now]: manifests rejected with unknown relay parent or when not a validator + +// TODO [now]: advertisements rejected when candidate group doers not match para + +// TODO [now]: peer reported when advertisement conflicting with confirmed candidate. From 6a897c69af099c77eef07b3dc1fb24f24d925dd8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:45:44 -0600 Subject: [PATCH 180/220] describe request test suite --- .../src/vstaging/tests/mod.rs | 1 + .../src/vstaging/tests/requests.rs | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 node/network/statement-distribution/src/vstaging/tests/requests.rs diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 1accbd8c347f..428ff8ec9319 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -31,6 +31,7 @@ use std::sync::Arc; mod cluster; mod grid; +mod requests; type VirtualOverseer = test_helpers::TestSubsystemContextHandle; diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs new file mode 100644 index 000000000000..4809bc6ee39b --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -0,0 +1,23 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// TODO [now]: peer reported for providing statements meant to be masked out + +// TODO [now]: peer reported for not providing enough statements, request retried + +// TODO [now]: peer reported for providing duplicate statements + +// TODO [now]: peer reported for providing statements with invalid signatures or wrong validator IDs From a45e350aa505bd57d39b2716354073c177b58e15 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Feb 2023 18:55:23 -0600 Subject: [PATCH 181/220] fix seconding-limit bug --- node/network/statement-distribution/src/vstaging/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index dbe61dd282bf..1df43b52e6a5 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -426,7 +426,7 @@ pub(crate) async fn handle_active_leaves_update( let seconding_limit = match mode { Ok(ProspectiveParachainsMode::Disabled) | Err(_) => continue, Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }) => - max_candidate_depth, + max_candidate_depth + 1, }; let session_index = polkadot_node_subsystem_util::request_session_index_for_child( From 1a93b151b9f1a3d3f73ab0439032ec8b04041bd0 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sun, 19 Feb 2023 10:23:36 +0100 Subject: [PATCH 182/220] Remove extraneous `pub` This somehow made it into my clippy PR. --- node/network/statement-distribution/src/legacy_v1/mod.rs | 2 +- .../statement-distribution/src/vstaging/tests/cluster.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 5cedf9677976..f5c174f28184 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -103,7 +103,7 @@ const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; /// Overall state of the legacy-v1 portion of the subsystem. pub(crate) struct State { - pub peers: HashMap, + peers: HashMap, topology_storage: SessionBoundGridTopologyStorage, authorities: HashMap, active_heads: HashMap, diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index faa3cb5d1fbf..1a1aab468f27 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . - // TODO [now]: shared seconded statement is circulated to all cluster peers with relay-parent // in view From 7eca1eeb6928983d41ce64e20a1ba05ec7624aa6 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sun, 19 Feb 2023 11:55:05 +0100 Subject: [PATCH 183/220] Fix some test compile warnings --- node/core/prospective-parachains/src/tests.rs | 143 +++++------------- .../src/vstaging/tests/mod.rs | 3 + 2 files changed, 39 insertions(+), 107 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 71e0367e8d62..6725ef201599 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -57,7 +57,7 @@ fn dummy_constraints( ump_remaining: 10, ump_remaining_bytes: 1_000, max_ump_num_per_candidate: 10, - dmp_remaining_messages: 10, + dmp_remaining_messages: vec![10], hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, hrmp_channels_out: vec![], max_hrmp_num_per_candidate: 0, @@ -299,22 +299,15 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { async fn second_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, - pvd: PersistedValidationData, - expected_candidate_response: Vec<(Hash, Vec)>, ) { - let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::CandidateSeconded( candidate.descriptor.para_id, - candidate, - pvd, - tx, + candidate.hash(), ), }) .await; - let resp = rx.await.unwrap(); - assert_eq!(resp, expected_candidate_response); } async fn back_candidate( @@ -386,6 +379,8 @@ async fn get_hypothetical_frontier( let request = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(fragment_tree_relay_parent), + // TODO [now]: test `true` case as well + backed_in_path_only: false, }; let (tx, rx) = oneshot::channel(); virtual_overseer @@ -500,7 +495,7 @@ fn send_candidates_and_check_if_found() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate A1 - let (candidate_a1, pvd_a1) = make_candidate( + let (candidate_a1, _) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -512,7 +507,7 @@ fn send_candidates_and_check_if_found() { let response_a1 = vec![(leaf_a.hash, vec![0])]; // Candidate A2 - let (candidate_a2, pvd_a2) = make_candidate( + let (candidate_a2, _) = make_candidate( leaf_a.hash, leaf_a.number, 2.into(), @@ -524,7 +519,7 @@ fn send_candidates_and_check_if_found() { let response_a2 = vec![(leaf_a.hash, vec![0])]; // Candidate B - let (candidate_b, pvd_b) = make_candidate( + let (candidate_b, _) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -536,7 +531,7 @@ fn send_candidates_and_check_if_found() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate C - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( leaf_c.hash, leaf_c.number, 2.into(), @@ -548,10 +543,10 @@ fn send_candidates_and_check_if_found() { let response_c = vec![(leaf_c.hash, vec![0])]; // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; - second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; - second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + second_candidate(&mut virtual_overseer, candidate_a1).await; + second_candidate(&mut virtual_overseer, candidate_a2).await; + second_candidate(&mut virtual_overseer, candidate_b).await; + second_candidate(&mut virtual_overseer, candidate_c).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; @@ -614,7 +609,7 @@ fn check_candidate_parent_leaving_view() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate A1 - let (candidate_a1, pvd_a1) = make_candidate( + let (candidate_a1, _) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -623,10 +618,9 @@ fn check_candidate_parent_leaving_view() { test_state.validation_code_hash, ); let candidate_hash_a1 = candidate_a1.hash(); - let response_a1 = vec![(leaf_a.hash, vec![0])]; // Candidate A2 - let (candidate_a2, pvd_a2) = make_candidate( + let (candidate_a2, _) = make_candidate( leaf_a.hash, leaf_a.number, 2.into(), @@ -635,10 +629,9 @@ fn check_candidate_parent_leaving_view() { test_state.validation_code_hash, ); let candidate_hash_a2 = candidate_a2.hash(); - let response_a2 = vec![(leaf_a.hash, vec![0])]; // Candidate B - let (candidate_b, pvd_b) = make_candidate( + let (candidate_b, _) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -650,7 +643,7 @@ fn check_candidate_parent_leaving_view() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate C - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( leaf_c.hash, leaf_c.number, 2.into(), @@ -662,10 +655,10 @@ fn check_candidate_parent_leaving_view() { let response_c = vec![(leaf_c.hash, vec![0])]; // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; - second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; - second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + second_candidate(&mut virtual_overseer, candidate_a1).await; + second_candidate(&mut virtual_overseer, candidate_a2).await; + second_candidate(&mut virtual_overseer, candidate_b).await; + second_candidate(&mut virtual_overseer, candidate_c).await; // Deactivate leaf A. deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; @@ -740,7 +733,7 @@ fn check_candidate_on_multiple_forks() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate on leaf A. - let (candidate_a, pvd_a) = make_candidate( + let (candidate_a, _) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -752,7 +745,7 @@ fn check_candidate_on_multiple_forks() { let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate on leaf B. - let (candidate_b, pvd_b) = make_candidate( + let (candidate_b, _) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -764,7 +757,7 @@ fn check_candidate_on_multiple_forks() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate on leaf C. - let (candidate_c, pvd_c) = make_candidate( + let (candidate_c, _) = make_candidate( leaf_c.hash, leaf_c.number, 1.into(), @@ -776,27 +769,9 @@ fn check_candidate_on_multiple_forks() { let response_c = vec![(leaf_c.hash, vec![0])]; // Second candidate on all three leaves. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; - second_candidate( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), - ) - .await; - second_candidate( - &mut virtual_overseer, - candidate_c.clone(), - pvd_c.clone(), - response_c.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + second_candidate(&mut virtual_overseer, candidate_c.clone()).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; @@ -832,7 +807,7 @@ fn check_backable_query() { activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; // Candidate A - let (candidate_a, pvd_a) = make_candidate( + let (candidate_a, _) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -841,10 +816,9 @@ fn check_backable_query() { test_state.validation_code_hash, ); let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B - let (mut candidate_b, pvd_b) = make_candidate( + let (mut candidate_b, _) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -855,23 +829,10 @@ fn check_backable_query() { // Set a field to make this candidate unique. candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_a.hash, vec![1])]; // Second candidates. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; - second_candidate( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + second_candidate(&mut virtual_overseer, candidate_b.clone()).await; // Should not get any backable candidates. get_backable_candidate( @@ -953,7 +914,6 @@ fn check_depth_query() { test_state.validation_code_hash, ); let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B. let (candidate_b, pvd_b) = make_candidate( @@ -965,7 +925,6 @@ fn check_depth_query() { test_state.validation_code_hash, ); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_a.hash, vec![1])]; // Candidate C. let (candidate_c, pvd_c) = make_candidate( @@ -977,7 +936,6 @@ fn check_depth_query() { test_state.validation_code_hash, ); let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_a.hash, vec![2])]; // Get hypothetical frontier of candidate A before adding it. get_hypothetical_frontier( @@ -991,13 +949,7 @@ fn check_depth_query() { .await; // Add candidate A. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; // Get frontier of candidate A after adding it. get_hypothetical_frontier( @@ -1022,13 +974,7 @@ fn check_depth_query() { .await; // Add candidate B. - second_candidate( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_b.clone()).await; // Get frontier of candidate B after adding it. get_hypothetical_frontier( @@ -1053,13 +999,7 @@ fn check_depth_query() { .await; // Add candidate C. - second_candidate( - &mut virtual_overseer, - candidate_c.clone(), - pvd_c.clone(), - response_c.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_c.clone()).await; // Get frontier of candidate C after adding it. get_hypothetical_frontier( @@ -1105,7 +1045,6 @@ fn check_pvd_query() { HeadData(vec![1]), test_state.validation_code_hash, ); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B. let (candidate_b, pvd_b) = make_candidate( @@ -1116,7 +1055,6 @@ fn check_pvd_query() { HeadData(vec![2]), test_state.validation_code_hash, ); - let response_b = vec![(leaf_a.hash, vec![1])]; // Candidate C. let (candidate_c, pvd_c) = make_candidate( @@ -1127,7 +1065,6 @@ fn check_pvd_query() { HeadData(vec![3]), test_state.validation_code_hash, ); - let response_c = vec![(leaf_a.hash, vec![2])]; // Get pvd of candidate A before adding it. get_pvd( @@ -1140,13 +1077,7 @@ fn check_pvd_query() { .await; // Add candidate A. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; // Get pvd of candidate A after adding it. @@ -1170,8 +1101,7 @@ fn check_pvd_query() { .await; // Add candidate B. - second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) - .await; + second_candidate(&mut virtual_overseer, candidate_b).await; // Get pvd of candidate B after adding it. get_pvd( @@ -1194,8 +1124,7 @@ fn check_pvd_query() { .await; // Add candidate C. - second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) - .await; + second_candidate(&mut virtual_overseer, candidate_c).await; // Get pvd of candidate C after adding it. get_pvd( diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 428ff8ec9319..7b0c349a508b 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -14,6 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +// TODO [now]: Remove once some tests are written. +#![allow(unused)] + use super::*; use crate::*; use polkadot_node_network_protocol::request_response::ReqProtocolNames; From 2603cfff08a6902a95eb11567fd00a71e91ebff8 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sun, 19 Feb 2023 14:48:12 +0100 Subject: [PATCH 184/220] Remove some unneeded `allow`s --- node/core/prospective-parachains/src/fragment_tree.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 312376d0f307..05ce22533cfe 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -144,8 +144,6 @@ impl CandidateStorage { } /// Remove a candidate from the store. - // TODO [now]: make it used or remove. - #[allow(dead_code)] pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); @@ -159,8 +157,6 @@ impl CandidateStorage { } /// Note that an existing candidate has been seconded. - // TODO [now]: make it used or remove - #[allow(dead_code)] pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { if entry.state != CandidateState::Backed { @@ -253,7 +249,6 @@ enum CandidateState { /// is not necessarily backed. Introduced, /// The candidate has been seconded. - #[allow(dead_code)] Seconded, /// The candidate has been completely backed by the group. Backed, From b733ffc3277b38c66cfffb9ecad2907f10be1c54 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:34:20 -0600 Subject: [PATCH 185/220] adapt some new test helpers from Marcin --- Cargo.lock | 1 + .../network/statement-distribution/Cargo.toml | 1 + .../src/vstaging/tests/mod.rs | 191 +++++++++++++++++- 3 files changed, 186 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a7b521ff590..7e8838f30609 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7803,6 +7803,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 961a8e04a659..dc5d9b15d7b3 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -36,3 +36,4 @@ sc-network = { git = "https://github.com/paritytech/substrate", branch = "master futures-timer = "3.0.2" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } rand_chacha = "0.3" +polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 7b0c349a508b..5ae844f951f7 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -20,13 +20,23 @@ use super::*; use crate::*; use polkadot_node_network_protocol::request_response::ReqProtocolNames; +use polkadot_node_subsystem::messages::{ + AllMessages, ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage, + RuntimeApiRequest, +}; use polkadot_node_subsystem_test_helpers as test_helpers; -use polkadot_primitives::vstaging::{AssignmentId, AssignmentPair, IndexedVec, ValidatorPair}; +use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; +use polkadot_primitives::vstaging::{ + AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CoreState, + GroupRotationInfo, HeadData, Header, IndexedVec, ScheduledCore, SessionInfo, + ValidationCodeHash, ValidatorPair, SessionIndex, +}; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair as PairT; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_keyring::Sr25519Keyring; +use assert_matches::assert_matches; use futures::Future; use rand::{Rng, SeedableRng}; @@ -38,6 +48,9 @@ mod requests; type VirtualOverseer = test_helpers::TestSubsystemContextHandle; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = + AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); @@ -60,9 +73,7 @@ struct TestState { config: TestConfig, local: Option, validators: Vec, - discovery_keys: Vec, - assignment_keys: Vec, - validator_groups: IndexedVec>, + session_info: SessionInfo, } impl TestState { @@ -112,13 +123,28 @@ impl TestState { None }; + let validator_public = validator_pubkeys(&validators); + let session_info = SessionInfo { + validators: validator_public, + discovery_keys, + validator_groups: IndexedVec::from(validator_groups), + assignment_keys, + n_cores: 0, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + active_validator_indices: vec![], + dispute_period: 6, + random_seed: [0u8; 32], + }; + TestState { config, local, validators, - discovery_keys, - assignment_keys, - validator_groups: IndexedVec::from(validator_groups), + session_info, } } } @@ -167,3 +193,154 @@ fn test_harness>( subsystem, )); } + +struct PerParaData { + min_relay_parent: BlockNumber, + head_data: HeadData, +} + +impl PerParaData { + pub fn new(min_relay_parent: BlockNumber, head_data: HeadData) -> Self { + Self { min_relay_parent, head_data } + } +} + +struct TestLeaf { + number: BlockNumber, + hash: Hash, + session: SessionIndex, + availability_cores: Vec, + para_data: Vec<(ParaId, PerParaData)>, +} + +impl TestLeaf { + pub fn para_data(&self, para_id: ParaId) -> &PerParaData { + self.para_data + .iter() + .find_map(|(p_id, data)| if *p_id == para_id { Some(data) } else { None }) + .unwrap() + } +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + leaf: &TestLeaf, + test_state: &TestState, +) { + let activated = ActivatedLeaf { + hash: leaf.hash, + number: leaf.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + handle_leaf_activation(virtual_overseer, para_id, leaf, test_state).await; +} + +async fn handle_leaf_activation( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + leaf: &TestLeaf, + test_state: &TestState, +) { + let TestLeaf { number, hash, para_data, session, availability_cores } = leaf; + let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) if parent == *hash => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + let mrp_response: Vec<(ParaId, BlockNumber)> = para_data + .iter() + .map(|(para_id, data)| (*para_id, data.min_relay_parent)) + .collect(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == *hash => { + tx.send(mrp_response).unwrap(); + } + ); + + let header = Header { + parent_hash: get_parent_hash(*hash), + number: *number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(parent, tx) + ) if parent == *hash => { + tx.send(Ok(Some(header))).unwrap(); + } + ); + + // TODO: Can we remove this REDUNDANT request? + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) if parent == *hash => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))) if parent == *hash => { + tx.send(Ok(*session)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx))) if parent == *hash => { + tx.send(Ok(availability_cores.clone())).unwrap(); + } + ); + + let validator_groups = test_state.session_info.validator_groups.to_vec(); + let group_rotation_info = + GroupRotationInfo { session_start_block: 1, group_rotation_frequency: 12, now: 1 }; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx))) if parent == *hash => { + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if s == *session => { + tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); + } + ); +} + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec { + val_ids.iter().map(|v| v.public().into()).collect() +} From 46a09f36f76d9f75a0caf7afd832271d322ee9e5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:39:54 -0600 Subject: [PATCH 186/220] add helper for activating a gossip topology --- .../statement-distribution/src/vstaging/tests/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 5ae844f951f7..623f5876952c 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -21,8 +21,9 @@ use super::*; use crate::*; use polkadot_node_network_protocol::request_response::ReqProtocolNames; use polkadot_node_subsystem::messages::{ + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage, - RuntimeApiRequest, + RuntimeApiRequest, NetworkBridgeEvent, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; @@ -344,3 +345,12 @@ fn get_parent_hash(hash: Hash) -> Hash { fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec { val_ids.iter().map(|v| v.public().into()).collect() } + +async fn provide_topology( + virtual_overseer: &mut VirtualOverseer, + topology: NewGossipTopology, +) { + virtual_overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::NewGossipTopology(topology)) + }).await; +} From a1d56c8fede11d84889a8afe728606555d2ca0cb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:45:01 -0600 Subject: [PATCH 187/220] add utility for signing statements --- .../src/vstaging/tests/mod.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 623f5876952c..2f0f228dfd62 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -96,6 +96,8 @@ impl TestState { for i in 0..config.validator_count { let validator_pair = if Some(i) == local_validator_pos { + // Note: the specific key is used to ensure the keystore holds + // this key and the subsystem can detect that it is a validator. Sr25519Keyring::Ferdie.pair().into() } else { ValidatorPair::generate().0 @@ -148,6 +150,19 @@ impl TestState { session_info, } } + + fn sign_statement( + &self, + validator_index: ValidatorIndex, + statement: CompactStatement, + context: &SigningContext, + ) -> SignedStatement { + let payload = statement.signing_payload(context); + let pair = &self.validators[validator_index.0 as usize]; + let signature = pair.sign(&payload[..]); + + SignedStatement::new(statement, validator_index, signature, context, &pair.public()).unwrap() + } } fn test_harness>( From 786cf2858e0d216535135a69ce95c3acd50539ac Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:53:04 -0600 Subject: [PATCH 188/220] helpers for connecting/disconnecting peers --- .../src/vstaging/tests/cluster.rs | 7 +++++ .../src/vstaging/tests/mod.rs | 31 +++++++++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 1a1aab468f27..4a88cc4331a4 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -14,9 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use super::*; + // TODO [now]: shared seconded statement is circulated to all cluster peers with relay-parent // in view +#[test] +fn share_seconded_circulated_to_cluster() { + +} + // TODO [now]: cluster 'valid' statement without prior seconded is ignored // TODO [now]: statement with invalid signature leads to report diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 2f0f228dfd62..7f332eeb2d46 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -19,7 +19,9 @@ use super::*; use crate::*; -use polkadot_node_network_protocol::request_response::ReqProtocolNames; +use polkadot_node_network_protocol::{ + request_response::ReqProtocolNames, ObservedRole, +}; use polkadot_node_subsystem::messages::{ network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage, @@ -61,7 +63,6 @@ struct TestConfig { group_size: usize, // whether the local node should be a validator local_validator: bool, - rng_seed: u64, } struct TestLocalValidator { @@ -178,7 +179,7 @@ fn test_harness>( let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); - let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(config.rng_seed); + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0); let test_state = TestState::from_config(config, &mut rng); @@ -361,6 +362,30 @@ fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec>, +) { + virtual_overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( + peer, + ObservedRole::Authority, + ValidationVersion::VStaging.into(), + authority_ids, + )) + }).await; +} + +async fn disconnect_peer( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, +) { + virtual_overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerDisconnected(peer)) + }).await; +} + async fn provide_topology( virtual_overseer: &mut VirtualOverseer, topology: NewGossipTopology, From a51ad1878ed787727941b9be40d154b47dc21e89 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:56:04 -0600 Subject: [PATCH 189/220] round out network utilities --- .../src/vstaging/tests/mod.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 7f332eeb2d46..850c05f73944 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -386,7 +386,27 @@ async fn disconnect_peer( }).await; } -async fn provide_topology( +async fn send_peer_view_change( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + view: View, +) { + virtual_overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange(peer, view)) + }).await; +} + +async fn send_peer_message( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + message: protocol_vstaging::StatementDistributionMessage, +) { + virtual_overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message))) + }).await; +} + +async fn send_new_topology( virtual_overseer: &mut VirtualOverseer, topology: NewGossipTopology, ) { From 643ba8bfb6ea2e01bb14421fe2e1717c81c15a1e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 16:56:17 -0600 Subject: [PATCH 190/220] fmt --- .../src/vstaging/tests/cluster.rs | 4 +- .../src/vstaging/tests/mod.rs | 99 ++++++++++--------- 2 files changed, 52 insertions(+), 51 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 4a88cc4331a4..59adb6899ae0 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -20,9 +20,7 @@ use super::*; // in view #[test] -fn share_seconded_circulated_to_cluster() { - -} +fn share_seconded_circulated_to_cluster() {} // TODO [now]: cluster 'valid' statement without prior seconded is ignored diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 850c05f73944..e951e6b06cb1 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -19,20 +19,17 @@ use super::*; use crate::*; -use polkadot_node_network_protocol::{ - request_response::ReqProtocolNames, ObservedRole, -}; +use polkadot_node_network_protocol::{request_response::ReqProtocolNames, ObservedRole}; use polkadot_node_subsystem::messages::{ - network_bridge_event::NewGossipTopology, - AllMessages, ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage, - RuntimeApiRequest, NetworkBridgeEvent, + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, NetworkBridgeEvent, + ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::vstaging::{ AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CoreState, - GroupRotationInfo, HeadData, Header, IndexedVec, ScheduledCore, SessionInfo, - ValidationCodeHash, ValidatorPair, SessionIndex, + GroupRotationInfo, HeadData, Header, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, + ValidationCodeHash, ValidatorPair, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair as PairT; @@ -144,12 +141,7 @@ impl TestState { random_seed: [0u8; 32], }; - TestState { - config, - local, - validators, - session_info, - } + TestState { config, local, validators, session_info } } fn sign_statement( @@ -162,7 +154,8 @@ impl TestState { let pair = &self.validators[validator_index.0 as usize]; let signature = pair.sign(&payload[..]); - SignedStatement::new(statement, validator_index, signature, context, &pair.public()).unwrap() + SignedStatement::new(statement, validator_index, signature, context, &pair.public()) + .unwrap() } } @@ -367,33 +360,38 @@ async fn connect_peer( peer: PeerId, authority_ids: Option>, ) { - virtual_overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer, - ObservedRole::Authority, - ValidationVersion::VStaging.into(), - authority_ids, - )) - }).await; + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerConnected( + peer, + ObservedRole::Authority, + ValidationVersion::VStaging.into(), + authority_ids, + ), + ), + }) + .await; } -async fn disconnect_peer( - virtual_overseer: &mut VirtualOverseer, - peer: PeerId, -) { - virtual_overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerDisconnected(peer)) - }).await; +async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerDisconnected(peer), + ), + }) + .await; } -async fn send_peer_view_change( - virtual_overseer: &mut VirtualOverseer, - peer: PeerId, - view: View, -) { - virtual_overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange(peer, view)) - }).await; +async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: PeerId, view: View) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(peer, view), + ), + }) + .await; } async fn send_peer_message( @@ -401,16 +399,21 @@ async fn send_peer_message( peer: PeerId, message: protocol_vstaging::StatementDistributionMessage, ) { - virtual_overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message))) - }).await; + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)), + ), + }) + .await; } -async fn send_new_topology( - virtual_overseer: &mut VirtualOverseer, - topology: NewGossipTopology, -) { - virtual_overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::NewGossipTopology(topology)) - }).await; +async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: NewGossipTopology) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::NewGossipTopology(topology), + ), + }) + .await; } From ff15c82a7a5c7e645417f6d01f867b3210312f9a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 19:22:05 -0600 Subject: [PATCH 191/220] fix bug in initializing validator-meta --- .../statement-distribution/src/vstaging/statement_store.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 0967394b69f0..6834bb6cb59f 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -84,7 +84,7 @@ impl StatementStore { } StatementStore { - validator_meta: HashMap::new(), + validator_meta, group_statements: HashMap::new(), known_statements: HashMap::new(), } @@ -99,7 +99,6 @@ impl StatementStore { origin: StatementOrigin, ) -> Result { let validator_index = statement.validator_index(); - let validator_meta = match self.validator_meta.get_mut(&validator_index) { None => return Err(ValidatorUnknown), Some(m) => m, From de62130b7e058ace69c1a9f253cd0d2c0c468f45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 19:22:29 -0600 Subject: [PATCH 192/220] fix compilation --- .../statement-distribution/src/vstaging/statement_store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs index 6834bb6cb59f..50ac99d0a813 100644 --- a/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -73,7 +73,7 @@ impl StatementStore { for (g, group) in groups.all().iter().enumerate() { for (i, v) in group.iter().enumerate() { validator_meta.insert( - v, + *v, ValidatorMeta { seconded_count: 0, within_group_index: i, From daef7207c75beaf4d89edff0f244e43cee457fa0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 19:49:43 -0600 Subject: [PATCH 193/220] implement first cluster test --- .../src/vstaging/tests/cluster.rs | 120 +++++++++++++++- .../src/vstaging/tests/mod.rs | 129 ++++++++++++++++-- 2 files changed, 230 insertions(+), 19 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 59adb6899ae0..82cd24c10cb5 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -16,11 +16,123 @@ use super::*; -// TODO [now]: shared seconded statement is circulated to all cluster peers with relay-parent -// in view - #[test] -fn share_seconded_circulated_to_cluster() {} +fn share_seconded_circulated_to_cluster() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (pvd, candidate) = make_committed_candidate( + local_para, + relay_parent, + 1, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + })), + para_data: (0..state.session_info.validator_groups.len()).map(|i| { + (ParaId::from(i as u32), PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }) + }).collect(), + }; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ).await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ).await; + + connect_peer( + &mut overseer, + peer_c.clone(), + None, + ).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ).await; + + let full_signed = state.sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ).convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())).unwrap(); + + overseer.send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed) + }).await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + // sharing a `Seconded` message confirms a candidate, which leads to new + // fragment tree updates. + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + None, + false, + ).await; + + overseer + }); +} // TODO [now]: cluster 'valid' statement without prior seconded is ignored diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index e951e6b06cb1..edc9a3a5f6f5 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -19,17 +19,17 @@ use super::*; use crate::*; -use polkadot_node_network_protocol::{request_response::ReqProtocolNames, ObservedRole}; +use polkadot_node_network_protocol::{view, request_response::ReqProtocolNames, ObservedRole}; use polkadot_node_subsystem::messages::{ network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, NetworkBridgeEvent, - ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, + ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, HypotheticalCandidate, FragmentTreeMembership, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::vstaging::{ AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, - ValidationCodeHash, ValidatorPair, + ValidationCodeHash, ValidatorPair, CandidateDescriptor, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair as PairT; @@ -62,6 +62,7 @@ struct TestConfig { local_validator: bool, } +#[derive(Clone)] struct TestLocalValidator { validator_id: ValidatorId, validator_index: ValidatorIndex, @@ -87,7 +88,8 @@ impl TestState { let mut validator_groups = Vec::new(); let local_validator_pos = if config.local_validator { - Some(rng.gen_range(0..config.validator_count)) + // ensure local validator is always in a full group. + Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) } else { None }; @@ -144,6 +146,31 @@ impl TestState { TestState { config, local, validators, session_info } } + fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { + (0..self.session_info.validator_groups.len()) + .map(f) + .collect() + } + + fn group_validators(&self, group_index: GroupIndex, exclude_local: bool) -> Vec { + self.session_info + .validator_groups + .get(group_index) + .unwrap() + .iter() + .cloned() + .filter(|&i| self.local.as_ref().map_or(true, |l| exclude_local && l.validator_index != i)) + .collect() + } + + fn validator_id(&self, validator_index: ValidatorIndex) -> ValidatorId { + self.session_info.validators.get(validator_index).unwrap().clone() + } + + fn discovery_id(&self, validator_index: ValidatorIndex) -> AuthorityDiscoveryId { + self.session_info.discovery_keys[validator_index.0 as usize].clone() + } + fn sign_statement( &self, validator_index: ValidatorIndex, @@ -218,6 +245,7 @@ impl PerParaData { struct TestLeaf { number: BlockNumber, hash: Hash, + parent_hash: Hash, session: SessionIndex, availability_cores: Vec, para_data: Vec<(ParaId, PerParaData)>, @@ -237,6 +265,7 @@ async fn activate_leaf( para_id: ParaId, leaf: &TestLeaf, test_state: &TestState, + expect_session_info_request: bool, ) { let activated = ActivatedLeaf { hash: leaf.hash, @@ -251,7 +280,13 @@ async fn activate_leaf( )))) .await; - handle_leaf_activation(virtual_overseer, para_id, leaf, test_state).await; + handle_leaf_activation( + virtual_overseer, + para_id, + leaf, + test_state, + expect_session_info_request, + ).await; } async fn handle_leaf_activation( @@ -259,8 +294,9 @@ async fn handle_leaf_activation( para_id: ParaId, leaf: &TestLeaf, test_state: &TestState, + expect_session_info_request: bool, ) { - let TestLeaf { number, hash, para_data, session, availability_cores } = leaf; + let TestLeaf { number, hash, parent_hash, para_data, session, availability_cores } = leaf; let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); assert_matches!( @@ -286,7 +322,7 @@ async fn handle_leaf_activation( ); let header = Header { - parent_hash: get_parent_hash(*hash), + parent_hash: *parent_hash, number: *number, state_root: Hash::zero(), extrinsics_root: Hash::zero(), @@ -338,17 +374,41 @@ async fn handle_leaf_activation( } ); + if expect_session_info_request { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if s == *session => { + tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); + } + ); + } +} + +async fn answer_expected_hypothetical_depth_request( + virtual_overseer: &mut VirtualOverseer, + responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + expected_leaf_hash: Option, + expected_backed_in_path_only: bool, +) { assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if s == *session => { - tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); - } - ); -} + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx) + ) => { + assert_eq!(req.fragment_tree_relay_parent, expected_leaf_hash); + assert_eq!(req.backed_in_path_only, expected_backed_in_path_only); + for (i, (candidate, _)) in responses.iter().enumerate() { + assert!( + req.candidates.iter().find(|c| c == &candidate).is_some(), + "did not receive request for hypothetical candidate {}", + i, + ); + } -fn get_parent_hash(hash: Hash) -> Hash { - Hash::from_low_u64_be(hash.to_low_u64_be() + 1) + tx.send(responses); + } + ) } fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec { @@ -417,3 +477,42 @@ async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: New }) .await; } + +fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, +) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: polkadot_primitives_test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: polkadot_primitives_test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 1, + hrmp_watermark: relay_parent_number, + }, + }; + + (persisted_validation_data, candidate) +} From c4daaa74f9f6e7d91eed773c45d071f8e9c1ae17 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 20 Feb 2023 19:51:18 -0600 Subject: [PATCH 194/220] TODOs for incoming request tests --- .../statement-distribution/src/vstaging/tests/requests.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs index 4809bc6ee39b..5e624bd622eb 100644 --- a/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -21,3 +21,7 @@ // TODO [now]: peer reported for providing duplicate statements // TODO [now]: peer reported for providing statements with invalid signatures or wrong validator IDs + +// TODO [now]: local node sanity checks incoming requests + +// TODO [now]: local node respects statement mask From 01468f656c33efca75c7f68ebffe025d32ca7a7e Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 21 Feb 2023 08:25:49 +0100 Subject: [PATCH 195/220] Remove unneeded `make_committed_candidate` helper --- .../src/vstaging/mod.rs | 1 + .../src/vstaging/tests/cluster.rs | 7 +++- .../src/vstaging/tests/mod.rs | 39 ------------------- 3 files changed, 6 insertions(+), 41 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 1df43b52e6a5..398d21c021d9 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -419,6 +419,7 @@ pub(crate) async fn handle_active_leaves_update( // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. + // TODO [now]: Didn't we already request this in `lib.rs`? let mode = prospective_parachains_mode(ctx.sender(), new_relay_parent).await; // request prospective parachains mode, skip disabled relay-parents diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 82cd24c10cb5..972373dee144 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -16,6 +16,8 @@ use super::*; +use polkadot_primitives_test_helpers::make_candidate; + #[test] fn share_seconded_circulated_to_cluster() { let config = TestConfig { @@ -33,12 +35,13 @@ fn share_seconded_circulated_to_cluster() { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); - let (pvd, candidate) = make_committed_candidate( - local_para, + let (candidate, pvd) = make_candidate( relay_parent, 1, + local_para, vec![1, 2, 3].into(), vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index edc9a3a5f6f5..3dc64732216e 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -477,42 +477,3 @@ async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: New }) .await; } - -fn make_committed_candidate( - para_id: ParaId, - relay_parent: Hash, - relay_parent_number: BlockNumber, - parent_head: HeadData, - para_head: HeadData, -) -> (PersistedValidationData, CommittedCandidateReceipt) { - let persisted_validation_data = PersistedValidationData { - parent_head, - relay_parent_number, - relay_parent_storage_root: Hash::repeat_byte(69), - max_pov_size: 1_000_000, - }; - - let candidate = CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id, - relay_parent, - collator: polkadot_primitives_test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: polkadot_primitives_test_helpers::dummy_collator_signature(), - para_head: para_head.hash(), - validation_code_hash: Hash::repeat_byte(42).into(), - }, - commitments: CandidateCommitments { - upward_messages: Vec::new(), - horizontal_messages: Vec::new(), - new_validation_code: None, - head_data: para_head, - processed_downward_messages: 1, - hrmp_watermark: relay_parent_number, - }, - }; - - (persisted_validation_data, candidate) -} From 9f9ef9a44550c6afcd6c4398d69ab198fa220ad1 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 21 Feb 2023 08:26:45 +0100 Subject: [PATCH 196/220] fmt --- .../src/vstaging/tests/cluster.rs | 72 ++++++++++--------- .../src/vstaging/tests/mod.rs | 31 ++++---- 2 files changed, 56 insertions(+), 47 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 972373dee144..c41c59748a97 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -20,11 +20,7 @@ use polkadot_primitives_test_helpers::make_candidate; #[test] fn share_seconded_circulated_to_cluster() { - let config = TestConfig { - validator_count: 20, - group_size: 3, - local_validator: true, - }; + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); @@ -50,13 +46,20 @@ fn share_seconded_circulated_to_cluster() { hash: relay_parent, parent_hash: Hash::repeat_byte(0), session: 1, - availability_cores: state.make_availability_cores(|i| CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - })), - para_data: (0..state.session_info.validator_groups.len()).map(|i| { - (ParaId::from(i as u32), PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }) - }).collect(), + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), }; // peer A is in group, has relay parent in view. @@ -69,19 +72,17 @@ fn share_seconded_circulated_to_cluster() { &mut overseer, peer_a.clone(), Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), - ).await; + ) + .await; connect_peer( &mut overseer, peer_b.clone(), Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), - ).await; + ) + .await; - connect_peer( - &mut overseer, - peer_c.clone(), - None, - ).await; + connect_peer(&mut overseer, peer_c.clone(), None).await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; @@ -94,17 +95,23 @@ fn share_seconded_circulated_to_cluster() { vec![], Some(relay_parent), false, - ).await; + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); - let full_signed = state.sign_statement( - local_validator.validator_index, - CompactStatement::Seconded(candidate_hash), - &SigningContext { session_index: 1, parent_hash: relay_parent }, - ).convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())).unwrap(); - - overseer.send(FromOrchestra::Communication { - msg: StatementDistributionMessage::Share(relay_parent, full_signed) - }).await; + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; assert_matches!( overseer.recv().await, @@ -126,12 +133,7 @@ fn share_seconded_circulated_to_cluster() { // sharing a `Seconded` message confirms a candidate, which leads to new // fragment tree updates. - answer_expected_hypothetical_depth_request( - &mut overseer, - vec![], - None, - false, - ).await; + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; overseer }); diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 3dc64732216e..15c19f676d6e 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -19,17 +19,19 @@ use super::*; use crate::*; -use polkadot_node_network_protocol::{view, request_response::ReqProtocolNames, ObservedRole}; +use polkadot_node_network_protocol::{request_response::ReqProtocolNames, view, ObservedRole}; use polkadot_node_subsystem::messages::{ - network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, NetworkBridgeEvent, - ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, HypotheticalCandidate, FragmentTreeMembership, + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, + HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, RuntimeApiMessage, + RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::vstaging::{ - AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CoreState, - GroupRotationInfo, HeadData, Header, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, - ValidationCodeHash, ValidatorPair, CandidateDescriptor, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData + AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CandidateCommitments, + CandidateDescriptor, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, + IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, + ValidationCodeHash, ValidatorPair, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair as PairT; @@ -147,19 +149,23 @@ impl TestState { } fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { - (0..self.session_info.validator_groups.len()) - .map(f) - .collect() + (0..self.session_info.validator_groups.len()).map(f).collect() } - fn group_validators(&self, group_index: GroupIndex, exclude_local: bool) -> Vec { + fn group_validators( + &self, + group_index: GroupIndex, + exclude_local: bool, + ) -> Vec { self.session_info .validator_groups .get(group_index) .unwrap() .iter() .cloned() - .filter(|&i| self.local.as_ref().map_or(true, |l| exclude_local && l.validator_index != i)) + .filter(|&i| { + self.local.as_ref().map_or(true, |l| exclude_local && l.validator_index != i) + }) .collect() } @@ -286,7 +292,8 @@ async fn activate_leaf( leaf, test_state, expect_session_info_request, - ).await; + ) + .await; } async fn handle_leaf_activation( From 00ee5e7682098be0742a3c598d96ac2d9f03a461 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 16:58:10 -0600 Subject: [PATCH 197/220] some more tests for cluster --- .../src/vstaging/tests/cluster.rs | 337 +++++++++++++++++- 1 file changed, 333 insertions(+), 4 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index c41c59748a97..3392d883e18e 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -139,13 +139,342 @@ fn share_seconded_circulated_to_cluster() { }); } -// TODO [now]: cluster 'valid' statement without prior seconded is ignored +#[test] +fn cluster_valid_statement_before_seconded_ignored() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let signed_valid = state.sign_statement( + v_a, + CompactStatement::Valid(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + signed_valid.as_unchecked().clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) => { + assert_eq!(p, peer_a); + assert_eq!(r, COST_UNEXPECTED_STATEMENT); + } + ); + + overseer + }); +} -// TODO [now]: statement with invalid signature leads to report +#[test] +fn cluster_statement_bad_signature() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; -// TODO [now]: cluster statement from non-cluster peer is rejected + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; -// TODO [now]: statement from non-cluster originator is rejected + // sign statements with wrong signing context, leading to bad signature. + let statements = vec![ + (v_a, CompactStatement::Seconded(candidate_hash)), + (v_b, CompactStatement::Seconded(candidate_hash)), + ] + .into_iter() + .map(|(v, s)| { + state.sign_statement( + v, + s, + &SigningContext { parent_hash: Hash::repeat_byte(69), session_index: 1 }, + ) + }) + .map(|s| s.as_unchecked().clone()); + + for statement in statements { + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_INVALID_SIGNATURE => { }, + "{:?}", + statement + ); + } + + overseer + }); +} + +#[test] +fn useful_cluster_statement_from_non_cluster_peer_rejected() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is not in group, has relay parent in view. + let not_our_group = + if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + + let that_group_validators = state.group_validators(not_our_group, false); + let v_non = that_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_non)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_non, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT => { } + ); + + overseer + }); +} + +#[test] +fn statement_from_non_cluster_originator_unexpected() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is not in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + + connect_peer(&mut overseer, peer_a.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT => { } + ); + + overseer + }); +} // TODO [now]: cluster statement for unknown candidate leads to request From 1b7d910d718b84d41c7b2b845d676acb61fb4d38 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 16:58:17 -0600 Subject: [PATCH 198/220] add a TODO about grid senders --- node/network/statement-distribution/src/vstaging/grid.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index a00210af44b3..e2f10d5683dc 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -104,6 +104,9 @@ pub fn build_session_topology<'a>( if group.contains(&our_index) { sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); + + // TODO [now]: remove all other group validators from the 'sending' set. + // and test this behavior. } else { for &group_val in group { // If the validator shares a slice with us, we expect to From 804e8b30515cb8584bb4fe02f24d4874da83f1b6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 17:23:34 -0600 Subject: [PATCH 199/220] integrate inbound req/res into test harness --- .../src/vstaging/tests/mod.rs | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 15c19f676d6e..c65b3323d899 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -40,6 +40,7 @@ use sp_keyring::Sr25519Keyring; use assert_matches::assert_matches; use futures::Future; +use parity_scale_codec::Encode; use rand::{Rng, SeedableRng}; use std::sync::Arc; @@ -76,10 +77,15 @@ struct TestState { local: Option, validators: Vec, session_info: SessionInfo, + req_sender: futures::channel::mpsc::Sender, } impl TestState { - fn from_config(config: TestConfig, rng: &mut impl Rng) -> Self { + fn from_config( + config: TestConfig, + req_sender: futures::channel::mpsc::Sender, + rng: &mut impl Rng, + ) -> Self { if config.group_size == 0 { panic!("group size cannot be 0"); } @@ -145,7 +151,7 @@ impl TestState { random_seed: [0u8; 32], }; - TestState { config, local, validators, session_info } + TestState { config, local, validators, session_info, req_sender } } fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { @@ -164,7 +170,7 @@ impl TestState { .iter() .cloned() .filter(|&i| { - self.local.as_ref().map_or(true, |l| exclude_local && l.validator_index != i) + self.local.as_ref().map_or(true, |l| !exclude_local || l.validator_index != i) }) .collect() } @@ -190,6 +196,23 @@ impl TestState { SignedStatement::new(statement, validator_index, signature, context, &pair.public()) .unwrap() } + + // send a request out, returning a future which expects a response. + async fn send_request( + &mut self, + peer: PeerId, + request: AttestedCandidateRequest, + ) -> impl Future { + let (tx, rx) = futures::channel::oneshot::channel(); + let req = sc_network::config::IncomingRequest { + peer, + payload: request.encode(), + pending_response: tx, + }; + self.req_sender.send(req).await.unwrap(); + + rx.map(|r| r.unwrap()) + } } fn test_harness>( @@ -204,10 +227,11 @@ fn test_harness>( }; let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); - let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, req_cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0); - let test_state = TestState::from_config(config, &mut rng); + let test_state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); let subsystem = async move { From 7297515ef02c42933331d68cb503c80f96316896 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 18:46:33 -0600 Subject: [PATCH 200/220] polish off initial cluster test suite --- .../src/vstaging/cluster.rs | 16 +- .../src/vstaging/mod.rs | 18 +- .../src/vstaging/tests/cluster.rs | 740 +++++++++++++++++- .../src/vstaging/tests/mod.rs | 5 +- 4 files changed, 768 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs index a63d1387d2d0..49852a912d2f 100644 --- a/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -171,6 +171,20 @@ impl ClusterTracker { } } + /// Note that we issued a statement. This updates internal structures. + pub fn note_issued(&mut self, originator: ValidatorIndex, statement: CompactStatement) { + for cluster_member in &self.validators { + if !self.they_know_statement(*cluster_member, originator, statement.clone()) { + // add the statement to pending knowledge for all peers + // which don't know the statement. + self.pending + .entry(*cluster_member) + .or_default() + .insert((originator, statement.clone())); + } + } + } + /// Note that we accepted an incoming statement. This updates internal structures. /// /// Should only be called after a successful `can_receive` call. @@ -185,7 +199,7 @@ impl ClusterTracker { if let Some(pending) = self.pending.get_mut(&sender) { pending.remove(&(originator, statement.clone())); } - } else if !self.they_know_statement(sender, originator, statement.clone()) { + } else if !self.they_know_statement(*cluster_member, originator, statement.clone()) { // add the statement to pending knowledge for all peers // which don't know the statement. self.pending diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 398d21c021d9..5b260b31da25 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -919,6 +919,11 @@ pub(crate) async fn share_local_statement( state.candidates.get_confirmed(&hash).map(|c| (c.para_id(), c.relay_parent())), }; + let is_seconded = match statement.payload() { + FullStatementWithPVD::Seconded(_, _) => true, + FullStatementWithPVD::Valid(_) => false, + }; + let (expected_para, expected_relay_parent) = match expected { None => return Err(JfyiError::InvalidShare), Some(x) => x, @@ -928,8 +933,9 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } - if per_relay_parent.statement_store.seconded_count(&local_index) == - per_relay_parent.seconding_limit + if is_seconded && + per_relay_parent.statement_store.seconded_count(&local_index) == + per_relay_parent.seconding_limit { gum::warn!( target: LOG_TARGET, @@ -975,6 +981,11 @@ pub(crate) async fn share_local_statement( Ok(true) => {}, } + { + let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); + } + if let Some(ref session_topology) = per_session.grid_view { let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); l.grid_tracker.learned_fresh_statement( @@ -1240,6 +1251,9 @@ async fn handle_incoming_statement( }; let cluster_sender_index = { + // This block of code only returns `Some` when both the originator and + // the sending peer are in the cluster. + let allowed_senders = local_validator .cluster_tracker .senders_for_originator(statement.unchecked_validator_index()); diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 3392d883e18e..0004c4084280 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -476,15 +476,741 @@ fn statement_from_non_cluster_originator_unexpected() { }); } -// TODO [now]: cluster statement for unknown candidate leads to request +#[test] +fn seconded_statement_leads_to_request() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + &requests[0], + Requests::AttestedCandidateV2(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + } + ); + } + ); + + overseer + }); +} + +#[test] +fn cluster_statements_shared_seconded_first() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, no relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + let valid_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Valid(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Valid(candidate_hash)) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + // result of new confirmed candidate. + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, valid_signed), + }) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessages(messages)) => { + assert_eq!(messages.len(), 2); + + assert_eq!(messages[0].0, vec![peer_a]); + assert_eq!(messages[1].0, vec![peer_a]); + + assert_matches!( + &messages[0].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) if r == &relay_parent + && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) => {} + ); + + assert_matches!( + &messages[1].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) if r == &relay_parent + && s.unchecked_payload() == &CompactStatement::Valid(candidate_hash) => {} + ); + } + ); + + overseer + }); +} + +#[test] +fn cluster_accounts_for_implicit_view() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + // sharing a `Seconded` message confirms a candidate, which leads to new + // fragment tree updates. + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + // activate new leaf, which has relay-parent in implicit view. + let next_relay_parent = Hash::repeat_byte(2); + let next_test_leaf = TestLeaf { + number: 2, + hash: next_relay_parent, + parent_hash: relay_parent, + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(next_relay_parent), + false, + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![next_relay_parent]).await; + send_peer_view_change(&mut overseer, peer_b.clone(), view![next_relay_parent]).await; + + // peer B never had the relay parent in its view, so this tests that + // the implicit view is working correctly for B. + // + // the fact that the statement isn't sent again to A also indicates that it works + // it's working. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessages(messages)) => { + assert_eq!(messages.len(), 1); + assert_matches!( + &messages[0], + ( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + ) => { + assert_eq!(peers, &vec![peer_b.clone()]); + assert_eq!(r, &relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ) + } + ); + + overseer + }); +} + +#[test] +fn cluster_messages_imported_after_confirmed_candidate_importable_check() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + let req = assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateV2(mut outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + + let res = AttestedCandidateResponse { + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + statements: vec![], + }; + outgoing.pending_response.send(Ok(res.encode())); + } + ); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![( + HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }, + vec![(relay_parent, vec![0])], + )], + None, + false, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking(CandidateBackingMessage::Statement( + r, + s, + )) if r == relay_parent => { + assert_matches!( + s.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd => {} + ); + assert_eq!(s.validator_index(), v_a); + } + ); + + overseer + }); +} + +#[test] +fn cluster_messages_imported_after_new_leaf_importable_check() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + let req = assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateV2(mut outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + + let res = AttestedCandidateResponse { + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + statements: vec![], + }; + outgoing.pending_response.send(Ok(res.encode())); + } + ); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); -// TODO [now]: cluster statements are shared with `Seconded` first for all cluster peers -// with relay-parent in view + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + let next_relay_parent = Hash::repeat_byte(2); + let next_test_leaf = TestLeaf { + number: 2, + hash: next_relay_parent, + parent_hash: relay_parent, + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; -// TODO [now]: cluster statements not re-shared on view update + activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![( + HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }, + vec![(relay_parent, vec![0])], + )], + Some(next_relay_parent), + false, + ) + .await; -// TODO [now]: cluster statements shared on first time cluster peer gets relay-parent in view. + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking(CandidateBackingMessage::Statement( + r, + s, + )) if r == relay_parent => { + assert_matches!( + s.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd => {} + ); + assert_eq!(s.validator_index(), v_a); + } + ); -// TODO [now]: confirmed cluster statement does not import statements until candidate in hypothetical frontier + overseer + }); +} -// TODO [now]: shared valid statement after confirmation sent to all cluster peers with relay-parent +// TODO [now]: ensure seconding limit is respected diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index c65b3323d899..cb3e5d9bc253 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -19,7 +19,10 @@ use super::*; use crate::*; -use polkadot_node_network_protocol::{request_response::ReqProtocolNames, view, ObservedRole}; +use polkadot_node_network_protocol::{ + request_response::{outgoing::Recipient, ReqProtocolNames}, + view, ObservedRole, +}; use polkadot_node_subsystem::messages::{ network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, RuntimeApiMessage, From 91c55258bc6689df355a4600a4c7961c6daa34fb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 22:08:33 -0600 Subject: [PATCH 201/220] keep introduce candidate request --- node/subsystem-types/src/messages.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 1c2a5f775f69..56a19fb5cee0 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -887,7 +887,6 @@ pub enum GossipSupportMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } -// TODO [now]: consider removing. /// Request introduction of a candidate into the prospective parachains subsystem. #[derive(Debug, PartialEq, Eq, Clone)] pub struct IntroduceCandidateRequest { From 153c9cf4b53c9c8f415d1b702feb4ba4bf073bc6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 22:48:42 -0600 Subject: [PATCH 202/220] fix tests after introduce candidate request --- node/core/prospective-parachains/src/tests.rs | 95 ++++++++++++------- 1 file changed, 63 insertions(+), 32 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 6725ef201599..231172b76208 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -57,7 +57,7 @@ fn dummy_constraints( ump_remaining: 10, ump_remaining_bytes: 1_000, max_ump_num_per_candidate: 10, - dmp_remaining_messages: vec![10], + dmp_remaining_messages: vec![], hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, hrmp_channels_out: vec![], max_hrmp_num_per_candidate: 0, @@ -296,6 +296,23 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { .await; } + +async fn introduce_candidate( + virtual_overseer: &mut VirtualOverseer, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, +) { + let req = IntroduceCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate, + persisted_validation_data: pvd, + }; + let (tx, _) = oneshot::channel(); + virtual_overseer.send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx) + }).await; +} + async fn second_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, @@ -495,7 +512,7 @@ fn send_candidates_and_check_if_found() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate A1 - let (candidate_a1, _) = make_candidate( + let (candidate_a1, pvd_a1) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -507,7 +524,7 @@ fn send_candidates_and_check_if_found() { let response_a1 = vec![(leaf_a.hash, vec![0])]; // Candidate A2 - let (candidate_a2, _) = make_candidate( + let (candidate_a2, pvd_a2) = make_candidate( leaf_a.hash, leaf_a.number, 2.into(), @@ -519,7 +536,7 @@ fn send_candidates_and_check_if_found() { let response_a2 = vec![(leaf_a.hash, vec![0])]; // Candidate B - let (candidate_b, _) = make_candidate( + let (candidate_b, pvd_b) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -531,7 +548,7 @@ fn send_candidates_and_check_if_found() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate C - let (candidate_c, _) = make_candidate( + let (candidate_c, pvd_c) = make_candidate( leaf_c.hash, leaf_c.number, 2.into(), @@ -542,11 +559,11 @@ fn send_candidates_and_check_if_found() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1).await; - second_candidate(&mut virtual_overseer, candidate_a2).await; - second_candidate(&mut virtual_overseer, candidate_b).await; - second_candidate(&mut virtual_overseer, candidate_c).await; + // Introduce candidates. + introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; + introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; @@ -609,7 +626,7 @@ fn check_candidate_parent_leaving_view() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate A1 - let (candidate_a1, _) = make_candidate( + let (candidate_a1, pvd_a1) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -620,7 +637,7 @@ fn check_candidate_parent_leaving_view() { let candidate_hash_a1 = candidate_a1.hash(); // Candidate A2 - let (candidate_a2, _) = make_candidate( + let (candidate_a2, pvd_a2) = make_candidate( leaf_a.hash, leaf_a.number, 2.into(), @@ -631,7 +648,7 @@ fn check_candidate_parent_leaving_view() { let candidate_hash_a2 = candidate_a2.hash(); // Candidate B - let (candidate_b, _) = make_candidate( + let (candidate_b, pvd_b) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -643,7 +660,7 @@ fn check_candidate_parent_leaving_view() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate C - let (candidate_c, _) = make_candidate( + let (candidate_c, pvd_c) = make_candidate( leaf_c.hash, leaf_c.number, 2.into(), @@ -655,10 +672,10 @@ fn check_candidate_parent_leaving_view() { let response_c = vec![(leaf_c.hash, vec![0])]; // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1).await; - second_candidate(&mut virtual_overseer, candidate_a2).await; - second_candidate(&mut virtual_overseer, candidate_b).await; - second_candidate(&mut virtual_overseer, candidate_c).await; + introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; + introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; // Deactivate leaf A. deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; @@ -733,7 +750,7 @@ fn check_candidate_on_multiple_forks() { activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; // Candidate on leaf A. - let (candidate_a, _) = make_candidate( + let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -745,7 +762,7 @@ fn check_candidate_on_multiple_forks() { let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate on leaf B. - let (candidate_b, _) = make_candidate( + let (candidate_b, pvd_b) = make_candidate( leaf_b.hash, leaf_b.number, 1.into(), @@ -757,7 +774,7 @@ fn check_candidate_on_multiple_forks() { let response_b = vec![(leaf_b.hash, vec![0])]; // Candidate on leaf C. - let (candidate_c, _) = make_candidate( + let (candidate_c, pvd_c) = make_candidate( leaf_c.hash, leaf_c.number, 1.into(), @@ -769,9 +786,9 @@ fn check_candidate_on_multiple_forks() { let response_c = vec![(leaf_c.hash, vec![0])]; // Second candidate on all three leaves. - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - second_candidate(&mut virtual_overseer, candidate_c.clone()).await; + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; @@ -807,7 +824,7 @@ fn check_backable_query() { activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; // Candidate A - let (candidate_a, _) = make_candidate( + let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -818,7 +835,7 @@ fn check_backable_query() { let candidate_hash_a = candidate_a.hash(); // Candidate B - let (mut candidate_b, _) = make_candidate( + let (mut candidate_b, pvd_b) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -830,6 +847,20 @@ fn check_backable_query() { candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); let candidate_hash_b = candidate_b.hash(); + // Introduce candidates. + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + + // Should not get any backable candidates. + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a], + None, + ) + .await; + // Second candidates. second_candidate(&mut virtual_overseer, candidate_a.clone()).await; second_candidate(&mut virtual_overseer, candidate_b.clone()).await; @@ -949,7 +980,7 @@ fn check_depth_query() { .await; // Add candidate A. - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; // Get frontier of candidate A after adding it. get_hypothetical_frontier( @@ -974,7 +1005,7 @@ fn check_depth_query() { .await; // Add candidate B. - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()).await; // Get frontier of candidate B after adding it. get_hypothetical_frontier( @@ -999,7 +1030,7 @@ fn check_depth_query() { .await; // Add candidate C. - second_candidate(&mut virtual_overseer, candidate_c.clone()).await; + introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await; // Get frontier of candidate C after adding it. get_hypothetical_frontier( @@ -1077,7 +1108,7 @@ fn check_pvd_query() { .await; // Add candidate A. - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; // Get pvd of candidate A after adding it. @@ -1101,7 +1132,7 @@ fn check_pvd_query() { .await; // Add candidate B. - second_candidate(&mut virtual_overseer, candidate_b).await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; // Get pvd of candidate B after adding it. get_pvd( @@ -1124,7 +1155,7 @@ fn check_pvd_query() { .await; // Add candidate C. - second_candidate(&mut virtual_overseer, candidate_c).await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. get_pvd( From 070fe343eef80913920abe258d3f10cf5d836b4a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Feb 2023 22:49:07 -0600 Subject: [PATCH 203/220] fmt --- node/core/prospective-parachains/src/tests.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 231172b76208..3ffd026c61fd 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -296,7 +296,6 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { .await; } - async fn introduce_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, @@ -308,9 +307,11 @@ async fn introduce_candidate( persisted_validation_data: pvd, }; let (tx, _) = oneshot::channel(); - virtual_overseer.send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx) - }).await; + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx), + }) + .await; } async fn second_candidate( From c3203844fd52d57b9a3342efb40abae8dd0223bd Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 23 Feb 2023 12:25:18 +0100 Subject: [PATCH 204/220] Add grid protocol to module docs --- .../src/vstaging/grid.rs | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index e2f10d5683dc..ca61dc245ecb 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -19,6 +19,50 @@ //! //! The grid uses the gossip topology defined in [`polkadot_node_network_protocol::grid_topology`]. //! It defines how messages and statements are forwarded between validators. +//! +//! # Protocol +//! +//! - Once the candidate is backed, produce a 'backed candidate packet' +//! `(CommittedCandidateReceipt, Statements)`. +//! - Members of a backing group produce an announcement of a fully-backed candidate +//! (aka "full manifest") when they are finished. +//! - `BackedCandidateManifest` +//! - Manifests are sent along the grid topology to peers who have the relay-parent +//! in their implicit view. +//! - Only sent by 1st-hop nodes after downloading the backed candidate packet. +//! - The grid topology is a 2-dimensional grid that provides either a 1 +//! or 2-hop path from any originator to any recipient - 1st-hop nodes +//! are those which share either a row or column with the originator, +//! and 2nd-hop nodes are those which share a column or row with that +//! 1st-hop node. +//! - Note that for the purposes of statement distribution, we actually +//! take the union of the routing paths from each validator in a group +//! to the local node to determine the sending and receiving paths. +//! - Ignored when received out-of-topology +//! - On every local view change, members of the backing group rebroadcast the +//! manifest for all candidates under every new relay-parent across the grid. +//! - Nodes should send a `BackedCandidateAcknowledgement(CandidateHash, +//! StatementFilter)` notification to any peer which has sent a manifest, and +//! the candidate has been acquired by other means. +//! - Request/response for the candidate + votes. +//! - Ignore if they are inconsistent with the manifest. +//! - A malicious backing group is capable of producing an unbounded number of +//! backed candidates. +//! - We request the candidate only if the candidate has a hypothetical depth in +//! any of our fragment trees, and: +//! - the seconding validators have not seconded any other candidates at that +//! depth in any of those fragment trees +//! - All members of the group attempt to circulate all statements (in compact form) +//! from the rest of the group on candidates that have already been backed. +//! - They do this via the grid topology. +//! - They add the statements to their backed candidate packet for future +//! requestors, and also: +//! - send the statement to any peer, which: +//! - we advertised the backed candidate to (sent manifest), and: +//! - has previously & successfully requested the backed candidate packet, +//! or: +//! - which has sent a `BackedCandidateAcknowledgement` +//! - 1st-hop nodes do the same thing use polkadot_node_network_protocol::{ grid_topology::SessionGridTopology, vstaging::StatementFilter, From 5e5b46c2e389eac2cea513647d7c51d6fc43f5dd Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 24 Feb 2023 11:09:53 +0100 Subject: [PATCH 205/220] Fix comments --- node/core/prospective-parachains/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 3ffd026c61fd..9cd3c58a0ba8 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -672,7 +672,7 @@ fn check_candidate_parent_leaving_view() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidates. + // Introduce candidates. introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; @@ -786,7 +786,7 @@ fn check_candidate_on_multiple_forks() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidate on all three leaves. + // Introduce candidates on all three leaves. introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; From b23f5158d751f287dfd95e685f3e71a8c3d136d9 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 24 Feb 2023 11:37:48 +0100 Subject: [PATCH 206/220] Test `backed_in_path_only: true` --- node/core/prospective-parachains/src/tests.rs | 54 ++++++++++++++++--- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 9cd3c58a0ba8..1936a482e685 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -387,6 +387,7 @@ async fn get_hypothetical_frontier( receipt: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, fragment_tree_relay_parent: Hash, + backed_in_path_only: bool, expected_depths: Vec, ) { let hypothetical_candidate = HypotheticalCandidate::Complete { @@ -397,8 +398,7 @@ async fn get_hypothetical_frontier( let request = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(fragment_tree_relay_parent), - // TODO [now]: test `true` case as well - backed_in_path_only: false, + backed_in_path_only, }; let (tx, rx) = oneshot::channel(); virtual_overseer @@ -407,8 +407,11 @@ async fn get_hypothetical_frontier( }) .await; let resp = rx.await.unwrap(); - let expected_frontier = - vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])]; + let expected_frontier = if expected_depths.is_empty() { + vec![(hypothetical_candidate, vec![])] + } else { + vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])] + }; assert_eq!(resp, expected_frontier); } @@ -920,7 +923,7 @@ fn check_backable_query() { // Test depth query. #[test] -fn check_depth_query() { +fn check_hypothetical_frontier_query() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -976,6 +979,18 @@ fn check_depth_query() { candidate_a.clone(), pvd_a.clone(), leaf_a.hash, + false, + vec![0], + ) + .await; + // Should work with `backed_in_path_only: true`, too. + get_hypothetical_frontier( + &mut virtual_overseer, + candidate_hash_a, + candidate_a.clone(), + pvd_a.clone(), + leaf_a.hash, + true, vec![0], ) .await; @@ -990,6 +1005,7 @@ fn check_depth_query() { candidate_a.clone(), pvd_a.clone(), leaf_a.hash, + false, vec![0], ) .await; @@ -1001,6 +1017,7 @@ fn check_depth_query() { candidate_b.clone(), pvd_b.clone(), leaf_a.hash, + false, vec![1], ) .await; @@ -1015,6 +1032,7 @@ fn check_depth_query() { candidate_b, pvd_b.clone(), leaf_a.hash, + false, vec![1], ) .await; @@ -1026,9 +1044,21 @@ fn check_depth_query() { candidate_c.clone(), pvd_c.clone(), leaf_a.hash, + false, vec![2], ) .await; + // Should be empty with `backed_in_path_only` because we haven't backed anything. + get_hypothetical_frontier( + &mut virtual_overseer, + candidate_hash_c, + candidate_c.clone(), + pvd_c.clone(), + leaf_a.hash, + true, + vec![], + ) + .await; // Add candidate C. introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await; @@ -1037,12 +1067,24 @@ fn check_depth_query() { get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_c, - candidate_c, + candidate_c.clone(), pvd_c.clone(), leaf_a.hash, + false, vec![2], ) .await; + // Should be empty with `backed_in_path_only` because we haven't backed anything. + get_hypothetical_frontier( + &mut virtual_overseer, + candidate_hash_c, + candidate_c.clone(), + pvd_c.clone(), + leaf_a.hash, + true, + vec![], + ) + .await; virtual_overseer }); From 9ac736b280a2696a3c654341f2c6dacee3c6455c Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Sat, 25 Feb 2023 19:21:32 -0600 Subject: [PATCH 207/220] Update node/network/protocol/src/lib.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> --- node/network/protocol/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 69966c6e0edb..64cef318ff01 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -670,7 +670,7 @@ pub mod vstaging { } } - /// Mask out `Valid1 statements in `self` according to the provided + /// Mask out `Valid` statements in `self` according to the provided /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. pub fn mask_valid(&mut self, mask: &BitSlice) { for (mut x, mask) in self From 646184a8f17d39222eae2fbe8004ff4d58d365a5 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Sat, 25 Feb 2023 19:31:18 -0600 Subject: [PATCH 208/220] Update node/network/protocol/src/request_response/mod.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> --- node/network/protocol/src/request_response/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index eaf9f2e6f79e..46a7cc5ecbbe 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -131,7 +131,7 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can -/// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need +/// start serving the data). So this value is a tradeoff. 5 seems to be sensible. So we would need /// to have 5 slow nodes connected, to delay transfer for others by `ATTESTED_CANDIDATE_TIMEOUT`. pub const MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS: u32 = 5; From 920c607ffa1e536eb6b63e8089cf9a69f678889e Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sun, 26 Feb 2023 10:50:41 +0100 Subject: [PATCH 209/220] Mark receiver with `vstaging` --- node/service/src/overseer.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 990207ea312a..404067af0da3 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -111,7 +111,8 @@ where /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, /// Receiver for incoming candidate requests. - pub candidate_req_receiver: IncomingRequestReceiver, + pub candidate_req_vstaging_receiver: + IncomingRequestReceiver, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -156,7 +157,7 @@ pub fn prepared_overseer_builder( collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_receiver, + candidate_req_vstaging_receiver, dispute_req_receiver, registry, spawner, @@ -297,7 +298,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, - candidate_req_receiver, + candidate_req_vstaging_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) From 7d514eac9678950a9c63bfaa7ebc7260813897cd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2023 15:42:56 -0600 Subject: [PATCH 210/220] validate grid senders based on manifest kind --- .../src/vstaging/grid.rs | 24 ++++++++++++------- .../src/vstaging/mod.rs | 2 +- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index ca61dc245ecb..842dc0bfb6e7 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -91,27 +91,31 @@ use super::{groups::Groups, LOG_TARGET}; /// the 'receiving' side will be empty. #[derive(Debug, PartialEq)] struct GroupSubView { + // validators we are 'sending' to. sending: HashSet, + // validators we are 'receiving' from. receiving: HashSet, } /// Our local view of the topology for a session, as it pertains to backed /// candidate distribution. +#[derive(Debug)] pub struct SessionTopologyView { group_views: HashMap, } impl SessionTopologyView { /// Returns an iterator over all validator indices from the group who are allowed to - /// send us manifests. - pub fn iter_group_senders( + /// send us manifests of the given kind. + pub fn iter_sending_for_group( &self, group: GroupIndex, + kind: ManifestKind, ) -> impl Iterator + '_ { - self.group_views - .get(&group) - .into_iter() - .flat_map(|sub| sub.sending.iter().cloned()) + self.group_views.get(&group).into_iter().flat_map(move |sub| match kind { + ManifestKind::Full => sub.receiving.iter().cloned(), + ManifestKind::Acknowledgement => sub.sending.iter().cloned(), + }) } } @@ -149,8 +153,12 @@ pub fn build_session_topology<'a>( sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); - // TODO [now]: remove all other group validators from the 'sending' set. - // and test this behavior. + // remove all other same-group validators from this set, they are + // in the cluster. + // TODO [now]: test this behavior. + for v in group { + sub_view.sending.remove(v); + } } else { for &group_val in group { // If the validator shares a slice with us, we expect to diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 5b260b31da25..47ef9fef236b 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -1890,7 +1890,7 @@ async fn handle_incoming_manifest_common<'a, Context>( }; let sender_index = grid_topology - .iter_group_senders(manifest_summary.claimed_group_index) + .iter_sending_for_group(manifest_summary.claimed_group_index, manifest_kind) .filter_map(|i| per_session.session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) From 8cf3d5f9a39d35f2be3e85c186d43cc3141d157b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2023 16:27:21 -0600 Subject: [PATCH 211/220] fix mask_seconded/valid --- node/network/protocol/src/lib.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 64cef318ff01..9f50bf29406e 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -666,7 +666,12 @@ pub mod vstaging { .iter_mut() .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) { - *x = *x && mask; + // (x, mask) => x + // (true, true) => false + // (true, false) => true + // (false, true) => false + // (false, false) => false + *x = *x && !mask; } } @@ -678,7 +683,12 @@ pub mod vstaging { .iter_mut() .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) { - *x = *x && mask; + // (x, mask) => x + // (true, true) => false + // (true, false) => true + // (false, true) => false + // (false, false) => false + *x = *x && !mask; } } } From 06beb304cee75fe4e54a310c808b2b2c3022bdd0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2023 16:45:52 -0600 Subject: [PATCH 212/220] fix unwanted-mask check --- node/network/statement-distribution/src/vstaging/requests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 082f334fcb41..869580503755 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -552,7 +552,7 @@ fn validate_complete_response( // sanity check bitmask size. this is based entirely on // local logic here. - if unwanted_mask.has_len(group.len()) { + if !unwanted_mask.has_len(group.len()) { gum::error!( target: LOG_TARGET, group_len = group.len(), From 2987860fa8343075ae2d0bea73347ebf3dc92d13 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 27 Feb 2023 16:11:39 +0400 Subject: [PATCH 213/220] fix build --- node/service/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 7030f19a8c00..ad3615d6a662 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -901,7 +901,8 @@ where config.network.request_response_protocols.push(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); - let (candidate_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_vstaging_receiver, cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); @@ -1088,7 +1089,7 @@ where collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_receiver, + candidate_req_vstaging_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, From dbc37d16262d56ecb3eb4c5a97446ab9b52c5a99 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 27 Feb 2023 17:02:58 +0400 Subject: [PATCH 214/220] resolve todo on leaf mode --- .../network/statement-distribution/src/lib.rs | 14 +----- .../src/vstaging/mod.rs | 47 +++++++------------ .../src/vstaging/tests/mod.rs | 10 ---- 3 files changed, 18 insertions(+), 53 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 521ccb8fc02a..11d765d7aa95 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -272,12 +272,7 @@ impl StatementDistributionSubsystem { if let Some(ref activated) = activated { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { - vstaging::handle_active_leaves_update( - ctx, - state, - ActiveLeavesUpdate { activated: Some(activated.clone()), deactivated }, - ) - .await?; + vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?; } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); @@ -294,12 +289,7 @@ impl StatementDistributionSubsystem { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } - vstaging::handle_active_leaves_update( - ctx, - state, - ActiveLeavesUpdate { activated: None, deactivated }, - ) - .await?; + vstaging::handle_deactivate_leaves(state, &deactivated); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 47ef9fef236b..cad60448376f 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -38,11 +38,10 @@ use polkadot_node_subsystem::{ CandidateBackingMessage, HypotheticalCandidate, HypotheticalFrontierRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, }, - overseer, ActiveLeavesUpdate, + overseer, ActivatedLeaf, }; use polkadot_node_subsystem_util::{ - backing_implicit_view::View as ImplicitView, - runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + backing_implicit_view::View as ImplicitView, runtime::ProspectiveParachainsMode, }; use polkadot_primitives::vstaging::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, @@ -392,23 +391,20 @@ pub(crate) async fn handle_network_update( pub(crate) async fn handle_active_leaves_update( ctx: &mut Context, state: &mut State, - update: ActiveLeavesUpdate, + activated: &ActivatedLeaf, + leaf_mode: ProspectiveParachainsMode, ) -> JfyiErrorResult<()> { - if let Some(ref leaf) = update.activated { - state - .implicit_view - .activate_leaf(ctx.sender(), leaf.hash) - .await - .map_err(JfyiError::ActivateLeafFailure)?; - } - - handle_deactivate_leaves(state, &update.deactivated[..]); - - let leaf = match update.activated { - Some(l) => l, - None => return Ok(()), + let seconding_limit = match leaf_mode { + ProspectiveParachainsMode::Disabled => return Ok(()), + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, }; + state + .implicit_view + .activate_leaf(ctx.sender(), activated.hash) + .await + .map_err(JfyiError::ActivateLeafFailure)?; + let new_relay_parents = state.implicit_view.all_allowed_relay_parents().cloned().collect::>(); for new_relay_parent in new_relay_parents.iter().cloned() { @@ -419,17 +415,6 @@ pub(crate) async fn handle_active_leaves_update( // New leaf: fetch info from runtime API and initialize // `per_relay_parent`. - // TODO [now]: Didn't we already request this in `lib.rs`? - let mode = prospective_parachains_mode(ctx.sender(), new_relay_parent).await; - - // request prospective parachains mode, skip disabled relay-parents - // (there should not be any) and set `seconding_limit = max_candidate_depth`. - let seconding_limit = match mode { - Ok(ProspectiveParachainsMode::Disabled) | Err(_) => continue, - Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }) => - max_candidate_depth + 1, - }; - let session_index = polkadot_node_subsystem_util::request_session_index_for_child( new_relay_parent, ctx.sender(), @@ -519,7 +504,7 @@ pub(crate) async fn handle_active_leaves_update( { let mut update_peers = Vec::new(); for (peer, peer_state) in state.peers.iter_mut() { - let fresh = peer_state.reconcile_active_leaf(leaf.hash, &new_relay_parents); + let fresh = peer_state.reconcile_active_leaf(activated.hash, &new_relay_parents); if !fresh.is_empty() { update_peers.push((*peer, fresh)); } @@ -532,7 +517,7 @@ pub(crate) async fn handle_active_leaves_update( } } - new_leaf_fragment_tree_updates(ctx, state, leaf.hash).await; + new_leaf_fragment_tree_updates(ctx, state, activated.hash).await; Ok(()) } @@ -567,7 +552,7 @@ fn find_local_validator_state( }) } -fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { +pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // deactivate the leaf in the implicit view. for leaf in leaves { state.implicit_view.deactivate_leaf(*leaf); diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index cb3e5d9bc253..583d17616629 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -371,16 +371,6 @@ async fn handle_leaf_activation( } ); - // TODO: Can we remove this REDUNDANT request? - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) - ) if parent == *hash => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( From 813a230730eb540a6bca2d3b9c55f4b516dc3d57 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 27 Feb 2023 17:36:53 +0400 Subject: [PATCH 215/220] Unify protocol naming to vstaging --- .../protocol/src/request_response/mod.rs | 18 +++++++++--------- .../protocol/src/request_response/outgoing.rs | 8 ++++---- .../protocol/src/request_response/vstaging.rs | 2 +- .../statement-distribution/src/vstaging/mod.rs | 2 +- .../src/vstaging/tests/cluster.rs | 6 +++--- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 46a7cc5ecbbe..83e2ac12df96 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -78,8 +78,8 @@ pub enum Protocol { DisputeSendingV1, /// Protocol for requesting candidates with attestations in statement distribution - /// in v2. - AttestedCandidateV2, + /// when async backing is enabled. + AttestedCandidateVStaging, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -111,7 +111,7 @@ const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); /// fit statement distribution within a block of 6 seconds.) const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); -/// We want to attested candidate requests to time out relatively fast, +/// We want attested candidate requests to time out relatively fast, /// because slow requests will bottleneck the backing system. Ideally, we'd have /// an adaptive timeout based on the candidate size, because there will be a lot of variance /// in candidate sizes: candidates with no code and no messages vs candidates with code @@ -148,7 +148,7 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; -/// Maximum response sizes for `AttestedCandidateV2`. +/// Maximum response sizes for `AttestedCandidateVStaging`. /// /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and /// additional backing statements. @@ -255,7 +255,7 @@ impl Protocol { request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::AttestedCandidateV2 => RequestResponseConfig { + Protocol::AttestedCandidateVStaging => RequestResponseConfig { name, fallback_names, max_request_size: 1_000, @@ -308,7 +308,7 @@ impl Protocol { // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, - Protocol::AttestedCandidateV2 => { + Protocol::AttestedCandidateVStaging => { // We assume we can utilize up to 70% of the available bandwidth for statements. // This is just a guess/estimate, with the following considerations: If we are // faster than that, queue size will stay low anyway, even if not - requesters will @@ -345,7 +345,7 @@ impl Protocol { Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), // Introduced after legacy names became legacy. - Protocol::AttestedCandidateV2 => None, + Protocol::AttestedCandidateVStaging => None, Protocol::CollationFetchingVStaging => None, } } @@ -398,13 +398,13 @@ impl ReqProtocolNames { let short_name = match protocol { Protocol::ChunkFetchingV1 => "/req_chunk/1", Protocol::CollationFetchingV1 => "/req_collation/1", - Protocol::CollationFetchingVStaging => "/req_collation/2", Protocol::PoVFetchingV1 => "/req_pov/1", Protocol::AvailableDataFetchingV1 => "/req_available_data/1", Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", - Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", + Protocol::CollationFetchingVStaging => "/req_collation/2", + Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/node/network/protocol/src/request_response/outgoing.rs b/node/network/protocol/src/request_response/outgoing.rs index 4e5b3d53feac..e5aa117ff654 100644 --- a/node/network/protocol/src/request_response/outgoing.rs +++ b/node/network/protocol/src/request_response/outgoing.rs @@ -40,9 +40,9 @@ pub enum Requests { StatementFetchingV1(OutgoingRequest), /// Requests for notifying about an ongoing dispute. DisputeSendingV1(OutgoingRequest), - /// Request a candidate and attestations. - AttestedCandidateV2(OutgoingRequest), + /// Request a candidate and attestations. + AttestedCandidateVStaging(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. /// Compared to V1 it requires specifying which candidate is requested by its hash. CollationFetchingVStaging(OutgoingRequest), @@ -59,7 +59,7 @@ impl Requests { Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, - Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2, + Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging, } } @@ -79,7 +79,7 @@ impl Requests { Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), - Self::AttestedCandidateV2(r) => r.encode_request(), + Self::AttestedCandidateVStaging(r) => r.encode_request(), } } } diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index 8bf34b3cc8e9..f84de9505534 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -56,7 +56,7 @@ pub struct AttestedCandidateResponse { impl IsRequest for AttestedCandidateRequest { type Response = AttestedCandidateResponse; - const PROTOCOL: Protocol = Protocol::AttestedCandidateV2; + const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging; } /// Responses as sent by collators. diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index cad60448376f..a562668627a1 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -2375,7 +2375,7 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St while let Some(request) = state.request_manager.next_request(request_props, peer_advertised) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AttestedCandidateV2(request)], + vec![Requests::AttestedCandidateVStaging(request)], IfDisconnected::ImmediateError, )) .await; diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 0004c4084280..ca849dbd39a6 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -559,7 +559,7 @@ fn seconded_statement_leads_to_request() { assert_eq!(requests.len(), 1); assert_matches!( &requests[0], - Requests::AttestedCandidateV2(outgoing) => { + Requests::AttestedCandidateVStaging(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); } @@ -982,7 +982,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateV2(mut outgoing) => { + Requests::AttestedCandidateVStaging(mut outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); @@ -1132,7 +1132,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateV2(mut outgoing) => { + Requests::AttestedCandidateVStaging(mut outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); From 12689f4d0a6e420b00404241f9b6d3c34e1436e7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 27 Feb 2023 16:08:28 -0600 Subject: [PATCH 216/220] fmt, fix grid test after topology change --- node/network/statement-distribution/src/vstaging/grid.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 842dc0bfb6e7..327e88371936 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -1135,10 +1135,11 @@ mod tests { // 3 4 5 // 6 7 8 - // our group: we send to all row/column neighbors and receive nothing + // our group: we send to all row/column neighbors which are not in our + // group and receive nothing. assert_eq!( t.group_views.get(&GroupIndex(0)).unwrap().sending, - vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + vec![1, 2].into_iter().map(ValidatorIndex).collect::>(), ); assert_eq!(t.group_views.get(&GroupIndex(0)).unwrap().receiving, HashSet::new(),); From d8239abc2a06cc939f5806ff14ba3759980dba5a Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Tue, 28 Feb 2023 10:03:12 -0600 Subject: [PATCH 217/220] typo Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> --- node/network/statement-distribution/src/vstaging/requests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 869580503755..3d161964c6eb 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -243,7 +243,7 @@ impl RequestManager { /// Yields the next request to dispatch, if there is any. /// - /// This function accepts three closures as an argument. + /// This function accepts two closures as an argument. /// /// The first closure is used to gather information about the desired /// properties of a response, which is used to select targets and validate From 96e6a107a7cc06efa7e2ce9a4021473b48e5eefa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 28 Feb 2023 13:34:52 -0600 Subject: [PATCH 218/220] address review --- node/network/statement-distribution/src/vstaging/grid.rs | 2 +- .../statement-distribution/src/vstaging/requests.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 327e88371936..95c58e576f91 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -746,7 +746,7 @@ impl ReceivedManifests { /// /// This assumes that the manifest has already been checked for /// validity - i.e. that the bitvecs match the claimed group in size - /// and that that the manifest includes at least one `Seconded` + /// and that the manifest includes at least one `Seconded` /// attestation and includes enough attestations for the candidate /// to be backed. /// diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 3d161964c6eb..507bbbb0ef18 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -347,10 +347,10 @@ impl RequestManager { /// Await the next incoming response to a sent request, or immediately /// return `None` if there are no pending responses. pub async fn await_incoming(&mut self) -> Option { - match self.pending_responses.next().await { - None => None, - Some(response) => Some(UnhandledResponse { response }), - } + self.pending_responses + .next() + .await + .map(|response| UnhandledResponse { response }) } } From d75300af36527670ace967c37b1221c77da17e93 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 2 Mar 2023 19:36:31 -0700 Subject: [PATCH 219/220] adjust comment, make easier to understand --- node/network/statement-distribution/src/vstaging/grid.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 95c58e576f91..d33c6af8ede0 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -900,11 +900,11 @@ impl FilterQuery for StatementFilter { /// concerning the candidate. #[derive(Debug, Clone)] struct MutualKnowledge { - /// Knowledge the remote peer has about the candidate. `Some` only if they - /// have advertised or requested the candidate. + /// Knowledge the remote peer has about the candidate, as far as we're aware. + /// `Some` only if they have advertised, acknowledged, orrequested the candidate. remote_knowledge: Option, /// Knowledge we have indicated to the remote peer about the candidate. - /// `Some` only if we have advertised or requested the candidate + /// `Some` only if we have advertised, acknowledged, or requested the candidate /// from them. local_knowledge: Option, } From caea5691d0cc0c0ae43a8c8f965b824b1e1cfcf5 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 3 Mar 2023 09:55:10 +0100 Subject: [PATCH 220/220] Fix typo --- node/network/statement-distribution/src/vstaging/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index d33c6af8ede0..bdcbabffd3e5 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -901,7 +901,7 @@ impl FilterQuery for StatementFilter { #[derive(Debug, Clone)] struct MutualKnowledge { /// Knowledge the remote peer has about the candidate, as far as we're aware. - /// `Some` only if they have advertised, acknowledged, orrequested the candidate. + /// `Some` only if they have advertised, acknowledged, or requested the candidate. remote_knowledge: Option, /// Knowledge we have indicated to the remote peer about the candidate. /// `Some` only if we have advertised, acknowledged, or requested the candidate