diff --git a/gossipsub/Cargo.toml b/gossipsub/Cargo.toml new file mode 100644 index 0000000..b09c296 --- /dev/null +++ b/gossipsub/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "gossipsub" +edition = "2021" +description = "Gossipsub protocol" +version = "0.46.2" +authors = ["Age Manning "] +license = "MIT" +repository = "https://github.com/n0-computer/iroh-experiments" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[features] +wasm-bindgen = ["getrandom/js"] + +[dependencies] +base64 = "0.22.1" +byteorder = "1.5.0" +bytes = "1.6" +either = "1.11" +fnv = "1.0.7" +futures = "0.3" +futures-ticker = "0.0.3" +getrandom = "0.2.15" +hex_fmt = "0.3.0" +web-time = "1.1" +rand = "0.8" +regex = "1.10.5" +serde = { version = "1", features = ["derive"] } +sha2 = "0.10.8" +smallvec = "1.13.2" +tracing = "0.1" +void = "1.0.2" +iroh-metrics = "0.20" +iroh = "0.20.0" +tokio = { version = "1", features = ["full"] } # TODO: reduce features +prometheus-client = "0.22.2" +tokio-util = { version = "0.7.11", features = ["codec"] } +tokio-serde-postcard = "0.1.0" +tokio-serde = "0.9.0" +postcard = "1.0.8" +anyhow = "1.0.86" +tokio-stream = { version = "0.1.15", features = ["sync"] } + +[dev-dependencies] +tokio = { version = "1", features = ["full"] } +hex = "0.4.2" +quickcheck = "1.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +# Passing arguments to the docsrs builder in order to properly document cfg's. +# More information: https://docs.rs/about/builds#cross-compiling +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +rustc-args = ["--cfg", "docsrs"] diff --git a/gossipsub/LICENSE.original b/gossipsub/LICENSE.original new file mode 100644 index 0000000..4dbecd3 --- /dev/null +++ b/gossipsub/LICENSE.original @@ -0,0 +1,21 @@ +Original License, as found in https://github.com/libp2p/rust-libp2p + +Copyright 2020 Sigma Prime Pty Ltd. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/gossipsub/src/backoff.rs b/gossipsub/src/backoff.rs new file mode 100644 index 0000000..031968b --- /dev/null +++ b/gossipsub/src/backoff.rs @@ -0,0 +1,157 @@ +//! Data structure for efficiently storing known back-off's when pruning peers. +use crate::topic::TopicHash; + +use iroh::net::NodeId; +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; +use std::time::Duration; +use web_time::Instant; + +#[derive(Copy, Clone, Debug)] +struct HeartbeatIndex(usize); + +/// Stores backoffs in an efficient manner. +#[derive(Debug)] +pub(crate) struct BackoffStorage { + /// Stores backoffs and the index in backoffs_by_heartbeat per peer per topic. + backoffs: HashMap>, + /// Stores peer topic pairs per heartbeat (this is cyclic the current index is + /// heartbeat_index). + backoffs_by_heartbeat: Vec>, + /// The index in the backoffs_by_heartbeat vector corresponding to the current heartbeat. + heartbeat_index: HeartbeatIndex, + /// The heartbeat interval duration from the config. + heartbeat_interval: Duration, + /// Backoff slack from the config. + backoff_slack: u32, +} + +impl BackoffStorage { + fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize { + ((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos()) + as usize + } + + pub(crate) fn new( + prune_backoff: &Duration, + heartbeat_interval: Duration, + backoff_slack: u32, + ) -> BackoffStorage { + // We add one additional slot for partial heartbeat + let max_heartbeats = + Self::heartbeats(prune_backoff, &heartbeat_interval) + backoff_slack as usize + 1; + BackoffStorage { + backoffs: HashMap::new(), + backoffs_by_heartbeat: vec![HashSet::new(); max_heartbeats], + heartbeat_index: HeartbeatIndex(0), + heartbeat_interval, + backoff_slack, + } + } + + /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call + /// doesn't change anything). + pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &NodeId, time: Duration) { + let instant = Instant::now() + time; + let insert_into_backoffs_by_heartbeat = + |heartbeat_index: HeartbeatIndex, + backoffs_by_heartbeat: &mut Vec>, + heartbeat_interval, + backoff_slack| { + let pair = (topic.clone(), *peer); + let index = (heartbeat_index.0 + + Self::heartbeats(&time, heartbeat_interval) + + backoff_slack as usize) + % backoffs_by_heartbeat.len(); + backoffs_by_heartbeat[index].insert(pair); + HeartbeatIndex(index) + }; + match self.backoffs.entry(topic.clone()).or_default().entry(*peer) { + Entry::Occupied(mut o) => { + let (backoff, index) = o.get(); + if backoff < &instant { + let pair = (topic.clone(), *peer); + if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) { + s.remove(&pair); + } + let index = insert_into_backoffs_by_heartbeat( + self.heartbeat_index, + &mut self.backoffs_by_heartbeat, + &self.heartbeat_interval, + self.backoff_slack, + ); + o.insert((instant, index)); + } + } + Entry::Vacant(v) => { + let index = insert_into_backoffs_by_heartbeat( + self.heartbeat_index, + &mut self.backoffs_by_heartbeat, + &self.heartbeat_interval, + self.backoff_slack, + ); + v.insert((instant, index)); + } + }; + } + + /// Checks if a given peer is backoffed for the given topic. This method respects the + /// configured BACKOFF_SLACK and may return true even if the backup is already over. + /// It is guaranteed to return false if the backoff is not over and eventually if enough time + /// passed true if the backoff is over. + /// + /// This method should be used for deciding if we can already send a GRAFT to a previously + /// backoffed peer. + pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &NodeId) -> bool { + self.backoffs + .get(topic) + .map_or(false, |m| m.contains_key(peer)) + } + + pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &NodeId) -> Option { + Self::get_backoff_time_from_backoffs(&self.backoffs, topic, peer) + } + + fn get_backoff_time_from_backoffs( + backoffs: &HashMap>, + topic: &TopicHash, + peer: &NodeId, + ) -> Option { + backoffs + .get(topic) + .and_then(|m| m.get(peer).map(|(i, _)| *i)) + } + + /// Applies a heartbeat. That should be called regularly in intervals of length + /// `heartbeat_interval`. + pub(crate) fn heartbeat(&mut self) { + // Clean up backoffs_by_heartbeat + if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index.0) { + let backoffs = &mut self.backoffs; + let slack = self.heartbeat_interval * self.backoff_slack; + let now = Instant::now(); + s.retain(|(topic, peer)| { + let keep = match Self::get_backoff_time_from_backoffs(backoffs, topic, peer) { + Some(backoff_time) => backoff_time + slack > now, + None => false, + }; + if !keep { + //remove from backoffs + if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) { + if m.get_mut().remove(peer).is_some() && m.get().is_empty() { + m.remove(); + } + } + } + + keep + }); + } + + // Increase heartbeat index + self.heartbeat_index = + HeartbeatIndex((self.heartbeat_index.0 + 1) % self.backoffs_by_heartbeat.len()); + } +} diff --git a/gossipsub/src/behaviour.rs b/gossipsub/src/behaviour.rs new file mode 100644 index 0000000..d7765f9 --- /dev/null +++ b/gossipsub/src/behaviour.rs @@ -0,0 +1,3386 @@ +use std::{ + cmp::{max, Ordering}, + collections::{BTreeSet, HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + +use futures::StreamExt; +use futures_ticker::Ticker; +use iroh::{ + net::{ + endpoint::{Connecting, Connection}, + key::SecretKey, + NodeAddr, NodeId, + }, + node::ProtocolHandler, +}; +use rand::{seq::SliceRandom, thread_rng}; + +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinHandle, +}; +use web_time::{Instant, SystemTime}; + +use crate::gossip_promises::GossipPromises; +use crate::handler::{Handler, HandlerEvent, HandlerIn}; +use crate::mcache::MessageCache; +use crate::{ + backoff::BackoffStorage, + metrics::{Churn, Inclusion, Penalty}, +}; +use crate::{ + config::{Config, ValidationMode}, + types, +}; +// use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; +use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; +use crate::protocol::SIGNING_PREFIX; +use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; +use crate::time_cache::DuplicateCache; +use crate::topic::{Hasher, Topic, TopicHash}; +use crate::transform::{DataTransform, IdentityTransform}; +use crate::types::{ + ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, + SubscriptionAction, +}; +use crate::types::{PeerConnections, RpcOut}; +use crate::TopicScoreParams; +use crate::{PublishError, SubscriptionError, ValidationError}; +use std::{cmp::Ordering::Equal, fmt::Debug}; + +#[cfg(test)] +mod tests; + +/// Determines if published messages should be signed or not. +/// +/// Without signing, a number of privacy preserving modes can be selected. +/// +/// NOTE: The default validation settings are to require signatures. The [`ValidationMode`] +/// should be updated in the [`Config`] to allow for unsigned messages. +#[derive(Debug, Clone)] +pub enum MessageAuthenticity { + /// Message signing is enabled. The author will be the owner of the key and the sequence number + /// will be linearly increasing. + Signed(SecretKey), + /// Message signing is disabled. + /// + /// The specified [`NodeId`] will be used as the author of all published messages. The sequence + /// number will be randomized. + Author(NodeId), + /// Message signing is disabled. + /// + /// A random [`NodeId`] will be used when publishing each message. The sequence number will be + /// randomized. + RandomAuthor, + /// Message signing is disabled. + /// + /// The author of the message and the sequence numbers are excluded from the message. + /// + /// NOTE: Excluding these fields may make these messages invalid by other nodes who + /// enforce validation of these fields. See [`ValidationMode`] in the [`Config`] + /// for how to customise this for rust-libp2p gossipsub. A custom `message_id` + /// function will need to be set to prevent all messages from a peer being filtered + /// as duplicates. + Anonymous, +} + +impl MessageAuthenticity { + /// Returns true if signing is enabled. + pub fn is_signing(&self) -> bool { + matches!(self, MessageAuthenticity::Signed(_)) + } + + pub fn is_anonymous(&self) -> bool { + matches!(self, MessageAuthenticity::Anonymous) + } +} + +/// Event that can be emitted by the gossipsub behaviour. +#[derive(Debug)] +pub enum Event { + /// A message has been received. + Message { + /// The peer that forwarded us this message. + propagation_source: NodeId, + /// The [`MessageId`] of the message. This should be referenced by the application when + /// validating a message (if required). + message_id: MessageId, + /// The decompressed message itself. + message: Message, + }, + /// A remote subscribed to a topic. + Subscribed { + /// Remote that has subscribed. + peer_id: NodeId, + /// The topic it has subscribed to. + topic: TopicHash, + }, + /// A remote unsubscribed from a topic. + Unsubscribed { + /// Remote that has unsubscribed. + peer_id: NodeId, + /// The topic it has subscribed from. + topic: TopicHash, + }, + /// A peer that does not support gossipsub has connected. + GossipsubNotSupported { peer_id: NodeId }, +} + +/// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`] +/// for further details. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +enum PublishConfig { + Signing { + keypair: SecretKey, + author: NodeId, + inline_key: Option>, + last_seq_no: SequenceNumber, + }, + Author(NodeId), + RandomAuthor, + Anonymous, +} + +/// A strictly linearly increasing sequence number. +/// +/// We start from the current time as unix timestamp in milliseconds. +#[derive(Debug)] +struct SequenceNumber(u64); + +impl SequenceNumber { + fn new() -> Self { + let unix_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time to be linear") + .as_nanos(); + + Self(unix_timestamp as u64) + } + + fn next(&mut self) -> u64 { + self.0 = self + .0 + .checked_add(1) + .expect("to not exhaust u64 space for sequence numbers"); + + self.0 + } +} + +impl PublishConfig { + pub(crate) fn get_own_id(&self) -> Option<&NodeId> { + match self { + Self::Signing { author, .. } => Some(author), + Self::Author(author) => Some(author), + _ => None, + } + } +} + +impl From for PublishConfig { + fn from(authenticity: MessageAuthenticity) -> Self { + match authenticity { + MessageAuthenticity::Signed(keypair) => { + let public_key = keypair.public(); + + PublishConfig::Signing { + keypair, + author: public_key, + inline_key: None, + last_seq_no: SequenceNumber::new(), + } + } + MessageAuthenticity::Author(peer_id) => PublishConfig::Author(peer_id), + MessageAuthenticity::RandomAuthor => PublishConfig::RandomAuthor, + MessageAuthenticity::Anonymous => PublishConfig::Anonymous, + } + } +} + +/// Network behaviour that handles the gossipsub protocol. +/// +/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] instance. If +/// message signing is disabled, the [`ValidationMode`] in the config should be adjusted to an +/// appropriate level to accept unsigned messages. +/// +/// The DataTransform trait allows applications to optionally add extra encoding/decoding +/// functionality to the underlying messages. This is intended for custom compression algorithms. +/// +/// The TopicSubscriptionFilter allows applications to implement specific filters on topics to +/// prevent unwanted messages being propagated and evaluated. +#[derive(Debug)] +pub struct Behaviour { + sender: mpsc::Sender, + task: JoinHandle<()>, +} + +impl Behaviour { + pub fn new(privacy: MessageAuthenticity, config: Config) -> anyhow::Result { + let this = Self::new_with_subscription_filter_and_transform( + privacy, + config, + AllowAllSubscriptionFilter::default(), + IdentityTransform::default(), + )?; + Ok(this) + } + + pub fn new_with_subscription_filter( + privacy: MessageAuthenticity, + config: Config, + subscription_filter: F, + ) -> anyhow::Result + where + D: DataTransform + Default + Send + 'static, + F: TopicSubscriptionFilter + Send + 'static, + { + let res = Self::new_with_subscription_filter_and_transform( + privacy, + config, + subscription_filter, + D::default(), + )?; + Ok(res) + } + + pub fn new_with_subscription_filter_and_transform( + privacy: MessageAuthenticity, + config: Config, + subscription_filter: F, + data_transform: D, + ) -> anyhow::Result + where + D: DataTransform + Send + 'static, + F: TopicSubscriptionFilter + Send + 'static, + { + let inner = BehaviourInner::new_with_subscription_filter_and_transform( + privacy, + config, + subscription_filter, + data_transform, + ) + .map_err(|e| anyhow::anyhow!("{}", e))?; + + let (s, r) = mpsc::channel(128); + let task = tokio::task::spawn(async move { + inner.run(r).await; + }); + + Ok(Behaviour { sender: s, task }) + } + + /// Connect to another node, running gossip. + pub async fn connect(&self, node_addr: NodeAddr) -> anyhow::Result<()> { + let (s, r) = oneshot::channel(); + self.sender + .send(ActorMessage::Connect { + node_addr, + response: s, + }) + .await?; + let res = r.await?; + res + } + + /// Lists the hashes of the topics we are currently subscribed to. + pub async fn topics(&self) -> Vec { + todo!() + } + + /// Lists all mesh peers for a certain topic hash. + pub async fn mesh_peers(&self, topic_hash: TopicHash) -> Vec { + todo!() + } + + pub async fn all_mesh_peers(&self) -> Vec { + todo!() + } + + /// Lists all known peers and their associated subscribed topics. + pub async fn all_peers(&self) -> Vec<(NodeId, Vec)> { + todo!() + } + + /// Returns the gossipsub score for a given peer, if one exists. + pub async fn peer_score(&self, peer_id: NodeId) -> Option { + todo!() + } + + /// Subscribe to a topic. + /// + /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already + /// subscribed. + pub async fn subscribe(&self, topic: Topic) -> Result { + todo!() + } + + /// Subscribe to events + pub async fn subscribe_events(&self) -> Option> { + todo!() + } + + /// Unsubscribes from a topic. + /// + /// Returns [`Ok(true)`] if we were subscribed to this topic. + pub async fn unsubscribe(&self, topic: Topic) -> Result { + todo!() + } + + /// Publishes a message with multiple topics to the network. + pub async fn publish( + &self, + topic: impl Into, + data: impl Into>, + ) -> Result { + todo!() + } + + /// This function should be called when [`Config::validate_messages()`] is `true` after + /// the message got validated by the caller. Messages are stored in the ['Memcache'] and + /// validation is expected to be fast enough that the messages should still exist in the cache. + /// There are three possible validation outcomes and the outcome is given in acceptance. + /// + /// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the + /// network. The `propagation_source` parameter indicates who the message was received by and + /// will not be forwarded back to that peer. + /// + /// If acceptance = [`MessageAcceptance::Reject`] the message will be deleted from the memcache + /// and the P₄ penalty will be applied to the `propagation_source`. + // + /// If acceptance = [`MessageAcceptance::Ignore`] the message will be deleted from the memcache + /// but no P₄ penalty will be applied. + /// + /// This function will return true if the message was found in the cache and false if was not + /// in the cache anymore. + /// + /// This should only be called once per message. + pub async fn report_message_validation_result( + &self, + msg_id: MessageId, + propagation_source: NodeId, + acceptance: MessageAcceptance, + ) -> Result { + todo!() + } + + /// Adds a new peer to the list of explicitly connected peers. + pub async fn add_explicit_peer(&self, peer_id: NodeId) { + todo!() + } + + /// This removes the peer from explicitly connected peers, note that this does not disconnect + /// the peer. + pub async fn remove_explicit_peer(&self, peer_id: NodeId) { + todo!() + } + + /// Blacklists a peer. All messages from this peer will be rejected and any message that was + /// created by this peer will be rejected. + pub async fn blacklist_peer(&self, peer_id: NodeId) { + todo!() + } + + /// Removes a peer from the blacklist if it has previously been blacklisted. + pub async fn remove_blacklisted_peer(&self, peer_id: NodeId) { + todo!() + } + + /// Sets scoring parameters for a topic. + /// + /// The [`Self::with_peer_score()`] must first be called to initialise peer scoring. + pub async fn set_topic_params( + &self, + topic: Topic, + params: TopicScoreParams, + ) -> Result<(), &'static str> { + todo!() + } + + /// Returns a scoring parameters for a topic if existent. + pub async fn get_topic_params(&self, topic: Topic) -> Option<&TopicScoreParams> { + todo!() + } + + /// Sets the application specific score for a peer. Returns true if scoring is active and + /// the peer is connected or if the score of the peer is not yet expired, false otherwise. + pub async fn set_application_score(&self, peer_id: NodeId, new_score: f64) -> bool { + todo!() + } + + /// Constructs a [`RawMessage`] performing message signing if required. + pub(crate) async fn build_raw_message( + &self, + topic: TopicHash, + data: Vec, + ) -> Result { + todo!() + } +} + +#[derive(Debug)] +enum ActorMessage { + Connect { + node_addr: NodeAddr, + response: oneshot::Sender>, + }, +} + +#[derive(Debug)] +struct BehaviourInner { + /// Configuration providing gossipsub performance parameters. + config: Config, + + /// Events that need to be yielded to user + events: mpsc::Sender, + events_receiver: Option>, + + /// Pools non-urgent control messages between heartbeats. + control_pool: HashMap>, + + /// Information used for publishing messages. + publish_config: PublishConfig, + + /// An LRU Time cache for storing seen messages (based on their ID). This cache prevents + /// duplicates from being propagated to the application and on the network. + duplicate_cache: DuplicateCache, + + /// A set of connected peers, indexed by their [`NodeId`] tracking both the [`PeerKind`] and + /// the set of [`ConnectionId`]s. + connected_peers: HashMap, + + /// A map of all connected peers - A map of topic hash to a list of gossipsub peer Ids. + topic_peers: HashMap>, + + /// A map of all connected peers to their subscribed topics. + peer_topics: HashMap>, + + /// A set of all explicit peers. These are peers that remain connected and we unconditionally + /// forward messages to, outside of the scoring system. + explicit_peers: HashSet, + + /// A list of peers that have been blacklisted by the user. + /// Messages are not sent to and are rejected from these peers. + blacklisted_peers: HashSet, + + /// Overlay network of connected peers - Maps topics to connected gossipsub peers. + mesh: HashMap>, + + /// Map of topics to list of peers that we publish to, but don't subscribe to. + fanout: HashMap>, + + /// The last publish time for fanout topics. + fanout_last_pub: HashMap, + + ///Storage for backoffs + backoffs: BackoffStorage, + + /// Message cache for the last few heartbeats. + mcache: MessageCache, + + /// Heartbeat interval stream. + heartbeat: Ticker, + + /// Number of heartbeats since the beginning of time; this allows us to amortize some resource + /// clean up -- eg backoff clean up. + heartbeat_ticks: u64, + + /// We remember all peers we found through peer exchange, since those peers are not considered + /// as safe as randomly discovered outbound peers. This behaviour diverges from the go + /// implementation to avoid possible love bombing attacks in PX. When disconnecting peers will + /// be removed from this list which may result in a true outbound rediscovery. + px_peers: HashSet, + + /// Set of connected outbound peers (we only consider true outbound peers found through + /// discovery and not by PX). + outbound_peers: HashSet, + + /// Stores optional peer score data together with thresholds, decay interval and gossip + /// promises. + peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + + /// Counts the number of `IHAVE` received from each peer since the last heartbeat. + count_received_ihave: HashMap, + + /// Counts the number of `IWANT` that we sent the each peer since the last heartbeat. + count_sent_iwant: HashMap, + + /// Keeps track of IWANT messages that we are awaiting to send. + /// This is used to prevent sending duplicate IWANT messages for the same message. + pending_iwant_msgs: HashSet, + + /// Short term cache for published message ids. This is used for penalizing peers sending + /// our own messages back if the messages are anonymous or use a random author. + published_message_ids: DuplicateCache, + + /// The filter used to handle message subscriptions. + subscription_filter: F, + + /// A general transformation function that can be applied to data received from the wire before + /// calculating the message-id and sending to the application. This is designed to allow the + /// user to implement arbitrary topic-based compression algorithms. + data_transform: D, + // /// Keep track of a set of internal metrics relating to gossipsub. + // metrics: Option, +} + +impl BehaviourInner +where + D: DataTransform + Send + 'static, + F: TopicSubscriptionFilter + Send + 'static, +{ + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a + /// [`Config`] and a custom subscription filter and data transform. + fn new_with_subscription_filter_and_transform( + privacy: MessageAuthenticity, + config: Config, + // metrics: Option<(&mut Registry, MetricsConfig)>, + subscription_filter: F, + data_transform: D, + ) -> Result { + // Set up the router given the configuration settings. + + // We do not allow configurations where a published message would also be rejected if it + // were received locally. + validate_config(&privacy, config.validation_mode())?; + + let (events, events_receiver) = mpsc::channel(128); + + Ok(BehaviourInner { + // metrics: metrics.map(|(registry, cfg)| Metrics::new(registry, cfg)), + events, + events_receiver: Some(events_receiver), + control_pool: HashMap::new(), + publish_config: privacy.into(), + duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), + topic_peers: HashMap::new(), + peer_topics: HashMap::new(), + explicit_peers: HashSet::new(), + blacklisted_peers: HashSet::new(), + mesh: HashMap::new(), + fanout: HashMap::new(), + fanout_last_pub: HashMap::new(), + backoffs: BackoffStorage::new( + &config.prune_backoff(), + config.heartbeat_interval(), + config.backoff_slack(), + ), + mcache: MessageCache::new(config.history_gossip(), config.history_length()), + heartbeat: Ticker::new_with_next( + config.heartbeat_interval(), + config.heartbeat_initial_delay(), + ), + heartbeat_ticks: 0, + px_peers: HashSet::new(), + outbound_peers: HashSet::new(), + peer_score: None, + count_received_ihave: HashMap::new(), + count_sent_iwant: HashMap::new(), + pending_iwant_msgs: HashSet::new(), + connected_peers: HashMap::new(), + published_message_ids: DuplicateCache::new(config.published_message_ids_cache_time()), + config, + subscription_filter, + data_transform, + }) + } + + /// Connect to another node, running gossip. + fn connect(&self, node_addr: NodeAddr) -> anyhow::Result<()> { + todo!() + } + + /// Subscribe to events + fn subscribe_events(&mut self) -> Option> { + self.events_receiver.take() + } + + /// Lists the hashes of the topics we are currently subscribed to. + fn topics(&self) -> impl Iterator { + self.mesh.keys() + } + + /// Lists all mesh peers for a certain topic hash. + fn mesh_peers(&self, topic_hash: &TopicHash) -> impl Iterator { + self.mesh.get(topic_hash).into_iter().flat_map(|x| x.iter()) + } + + fn all_mesh_peers(&self) -> impl Iterator { + let mut res = BTreeSet::new(); + for peers in self.mesh.values() { + res.extend(peers); + } + res.into_iter() + } + + /// Lists all known peers and their associated subscribed topics. + fn all_peers(&self) -> impl Iterator)> { + self.peer_topics + .iter() + .map(|(peer_id, topic_set)| (peer_id, topic_set.iter().collect())) + } + + /// Returns the gossipsub score for a given peer, if one exists. + fn peer_score(&self, peer_id: &NodeId) -> Option { + self.peer_score + .as_ref() + .map(|(score, ..)| score.score(peer_id)) + } + + /// Subscribe to a topic. + /// + /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already + /// subscribed. + fn subscribe(&mut self, topic: &Topic) -> Result { + tracing::debug!(%topic, "Subscribing to topic"); + let topic_hash = topic.hash(); + if !self.subscription_filter.can_subscribe(&topic_hash) { + return Err(SubscriptionError::NotAllowed); + } + + if self.mesh.contains_key(&topic_hash) { + tracing::debug!(%topic, "Topic is already in the mesh"); + return Ok(false); + } + + // send subscription request to all peers + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); + let event = RpcOut::Subscribe(topic_hash.clone()); + self.send_message(peer, event); + } + + // call JOIN(topic) + // this will add new peers to the mesh for the topic + self.join(&topic_hash); + tracing::debug!(%topic, "Subscribed to topic"); + Ok(true) + } + + /// Unsubscribes from a topic. + /// + /// Returns [`Ok(true)`] if we were subscribed to this topic. + #[allow(clippy::unnecessary_wraps)] + fn unsubscribe(&mut self, topic: &Topic) -> Result { + tracing::debug!(%topic, "Unsubscribing from topic"); + let topic_hash = topic.hash(); + + if !self.mesh.contains_key(&topic_hash) { + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); + // we are not subscribed + return Ok(false); + } + + // announce to all peers + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); + let event = RpcOut::Unsubscribe(topic_hash.clone()); + self.send_message(peer, event); + } + + // call LEAVE(topic) + // this will remove the topic from the mesh + self.leave(&topic_hash); + + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); + Ok(true) + } + + /// Publishes a message with multiple topics to the network. + fn publish( + &mut self, + topic: impl Into, + data: impl Into>, + ) -> Result { + let data = data.into(); + let topic = topic.into(); + + // Transform the data before building a raw_message. + let transformed_data = self + .data_transform + .outbound_transform(&topic, data.clone())?; + + let raw_message = self.build_raw_message(topic, transformed_data)?; + + // calculate the message id from the un-transformed data + let msg_id = self.config.message_id(&Message { + source: raw_message.source, + data, // the uncompressed form + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + // TODO: + // check that the size doesn't exceed the max transmission size + + // Check the if the message has been published before + if self.duplicate_cache.contains(&msg_id) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" + ); + return Err(PublishError::Duplicate); + } + + tracing::trace!(message=%msg_id, "Publishing message"); + + let topic_hash = raw_message.topic.clone(); + + let mut recipient_peers = HashSet::new(); + if let Some(set) = self.topic_peers.get(&topic_hash) { + if self.config.flood_publish() { + // Forward to all peers above score and all explicit peers + recipient_peers.extend(set.iter().filter(|p| { + self.explicit_peers.contains(*p) + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 + })); + } else { + match self.mesh.get(&raw_message.topic) { + // Mesh peers + Some(mesh_peers) => { + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // If we have fanout peers add them to the map. + if self.fanout.contains_key(&topic_hash) { + for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + recipient_peers.insert(*peer); + } + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let mesh_n = self.config.mesh_n(); + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + &topic_hash, + mesh_n, + { + |p| { + !self.explicit_peers.contains(p) + && !self + .score_below_threshold(p, |pst| { + pst.publish_threshold + }) + .0 + } + }, + ); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + + // Explicit peers + for peer in &self.explicit_peers { + if set.contains(peer) { + recipient_peers.insert(*peer); + } + } + } + } + + if recipient_peers.is_empty() { + return Err(PublishError::InsufficientPeers); + } + + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + self.duplicate_cache.insert(msg_id.clone()); + self.mcache.put(&msg_id, raw_message.clone()); + + // If the message is anonymous or has a random author add it to the published message ids + // cache. + if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { + if !self.config.allow_self_origin() { + self.published_message_ids.insert(msg_id.clone()); + } + } + + // Send to peers we know are subscribed to the topic. + for peer_id in recipient_peers.iter() { + tracing::trace!(peer=%peer_id, "Sending message to peer"); + self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); + } + + tracing::debug!(message=%msg_id, "Published message"); + + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_published_message(&topic_hash); + // } + + Ok(msg_id) + } + + fn report_message_validation_result( + &mut self, + msg_id: &MessageId, + propagation_source: &NodeId, + acceptance: MessageAcceptance, + ) -> Result { + let reject_reason = match acceptance { + MessageAcceptance::Accept => { + let (raw_message, originating_peers) = match self.mcache.validate(msg_id) { + Some((raw_message, originating_peers)) => { + (raw_message.clone(), originating_peers) + } + None => { + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" + ); + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.memcache_miss(); + // } + return Ok(false); + } + }; + + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_msg_validation(&raw_message.topic, &acceptance); + // } + + self.forward_msg( + msg_id, + raw_message, + Some(propagation_source), + originating_peers, + )?; + return Ok(true); + } + MessageAcceptance::Reject => RejectReason::ValidationFailed, + MessageAcceptance::Ignore => RejectReason::ValidationIgnored, + }; + + if let Some((raw_message, originating_peers)) = self.mcache.remove(msg_id) { + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_msg_validation(&raw_message.topic, &acceptance); + // } + + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + reject_reason, + ); + for peer in originating_peers.iter() { + peer_score.reject_message(peer, msg_id, &raw_message.topic, reject_reason); + } + } + Ok(true) + } else { + tracing::warn!(message=%msg_id, "Rejected message not in cache"); + Ok(false) + } + } + + /// Adds a new peer to the list of explicitly connected peers. + fn add_explicit_peer(&mut self, peer_id: &NodeId) { + tracing::debug!(peer=%peer_id, "Adding explicit peer"); + + self.explicit_peers.insert(*peer_id); + + self.check_explicit_peer_connection(peer_id); + } + + /// This removes the peer from explicitly connected peers, note that this does not disconnect + /// the peer. + fn remove_explicit_peer(&mut self, peer_id: &NodeId) { + tracing::debug!(peer=%peer_id, "Removing explicit peer"); + self.explicit_peers.remove(peer_id); + } + + /// Blacklists a peer. All messages from this peer will be rejected and any message that was + /// created by this peer will be rejected. + fn blacklist_peer(&mut self, peer_id: &NodeId) { + if self.blacklisted_peers.insert(*peer_id) { + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); + } + } + + /// Removes a peer from the blacklist if it has previously been blacklisted. + fn remove_blacklisted_peer(&mut self, peer_id: &NodeId) { + if self.blacklisted_peers.remove(peer_id) { + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); + } + } + + /// Activates the peer scoring system with the given parameters. This will reset all scores + /// if there was already another peer scoring system activated. Returns an error if the + /// params are not valid or if they got already set. + pub fn with_peer_score( + &mut self, + params: PeerScoreParams, + threshold: PeerScoreThresholds, + ) -> Result<(), String> { + self.with_peer_score_and_message_delivery_time_callback(params, threshold, None) + } + + /// Activates the peer scoring system with the given parameters and a message delivery time + /// callback. Returns an error if the parameters got already set. + pub fn with_peer_score_and_message_delivery_time_callback( + &mut self, + params: PeerScoreParams, + threshold: PeerScoreThresholds, + callback: Option, + ) -> Result<(), String> { + params.validate()?; + threshold.validate()?; + + if self.peer_score.is_some() { + return Err("Peer score set twice".into()); + } + + let interval = Ticker::new(params.decay_interval); + let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); + self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); + Ok(()) + } + + /// Sets scoring parameters for a topic. + /// + /// The [`Self::with_peer_score()`] must first be called to initialise peer scoring. + fn set_topic_params( + &mut self, + topic: Topic, + params: TopicScoreParams, + ) -> Result<(), &'static str> { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.set_topic_params(topic.hash(), params); + Ok(()) + } else { + Err("Peer score must be initialised with `with_peer_score()`") + } + } + + /// Returns a scoring parameters for a topic if existent. + fn get_topic_params(&self, topic: &Topic) -> Option<&TopicScoreParams> { + self.peer_score.as_ref()?.0.get_topic_params(&topic.hash()) + } + + /// Sets the application specific score for a peer. Returns true if scoring is active and + /// the peer is connected or if the score of the peer is not yet expired, false otherwise. + fn set_application_score(&mut self, peer_id: &NodeId, new_score: f64) -> bool { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.set_application_score(peer_id, new_score) + } else { + false + } + } + + /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. + async fn join(&mut self, topic_hash: &TopicHash) { + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); + + // if we are already in the mesh, return + if self.mesh.contains_key(topic_hash) { + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); + return; + } + + let mut added_peers = HashSet::new(); + + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.joined(topic_hash) + // } + + // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, + // removing the fanout entry. + if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" + ); + + // remove explicit peers, peers with negative scores, and backoffed peers + peers.retain(|p| { + !self.explicit_peers.contains(p) + && !self.score_below_threshold(p, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, p) + }); + + // Add up to mesh_n of them them to the mesh + // NOTE: These aren't randomly added, currently FIFO + let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers + ); + added_peers.extend(peers.iter().take(add_peers)); + + self.mesh.insert( + topic_hash.clone(), + peers.into_iter().take(add_peers).collect(), + ); + + // remove the last published time + self.fanout_last_pub.remove(topic_hash); + } + + let fanaout_added = added_peers.len(); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Fanout, fanaout_added) + // } + + // check if we need to get more peers, which we randomly select + if added_peers.len() < self.config.mesh_n() { + // get the peers + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + topic_hash, + self.config.mesh_n() - added_peers.len(), + |peer| { + !added_peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self.score_below_threshold(peer, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, peer) + }, + ); + added_peers.extend(new_peers.clone()); + // add them to the mesh + tracing::debug!( + "JOIN: Inserting {:?} random peers into the mesh", + new_peers.len() + ); + let mesh_peers = self.mesh.entry(topic_hash.clone()).or_default(); + mesh_peers.extend(new_peers); + } + + let random_added = added_peers.len() - fanaout_added; + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Random, random_added) + // } + + for peer_id in added_peers { + // Send a GRAFT control message + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(&peer_id, topic_hash.clone()); + } + Self::control_pool_add( + &mut self.control_pool, + peer_id, + ControlAction::Graft { + topic_hash: topic_hash.clone(), + }, + ); + + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + peer_id, + vec![topic_hash], + &self.mesh, + self.peer_topics.get(&peer_id), + &self.connected_peers, + ) + .await; + } + + let mesh_peers = self.mesh_peers(topic_hash).count(); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.set_mesh_peers(topic_hash, mesh_peers) + // } + + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); + } + + /// Creates a PRUNE gossipsub action. + fn make_prune( + &mut self, + topic_hash: &TopicHash, + peer: &NodeId, + do_px: bool, + on_unsubscribe: bool, + ) -> ControlAction { + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.prune(peer, topic_hash.clone()); + } + + // Select peers for peer exchange + let peers = if do_px { + get_random_peers( + &self.topic_peers, + &self.connected_peers, + topic_hash, + self.config.prune_peers(), + |p| p != peer && !self.score_below_threshold(p, |_| 0.0).0, + ) + .into_iter() + .map(|p| PeerInfo { peer_id: Some(p) }) + .collect() + } else { + Vec::new() + }; + + let backoff = if on_unsubscribe { + self.config.unsubscribe_backoff() + } else { + self.config.prune_backoff() + }; + + // update backoff + self.backoffs.update_backoff(topic_hash, peer, backoff); + + ControlAction::Prune { + topic_hash: topic_hash.clone(), + peers, + backoff: Some(backoff.as_secs()), + } + } + + /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. + async fn leave(&mut self, topic_hash: &TopicHash) { + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); + + // If our mesh contains the topic, send prune to peers and delete it from the mesh + if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.left(topic_hash) + // } + for peer in peers { + // Send a PRUNE control message + tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); + let on_unsubscribe = true; + let control = + self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); + Self::control_pool_add(&mut self.control_pool, peer, control); + + // If the peer did not previously exist in any mesh, inform the handler + peer_removed_from_mesh( + peer, + topic_hash, + &self.mesh, + self.peer_topics.get(&peer), + &self.connected_peers, + ) + .await; + } + } + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); + } + + /// Checks if the given peer is still connected and if not dials the peer again. + fn check_explicit_peer_connection(&mut self, peer_id: &NodeId) { + if !self.peer_topics.contains_key(peer_id) { + // Connect to peer + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); + // TODO: call dial + // self.events.push_back(ToSwarm::Dial { + // opts: DialOpts::peer_id(*peer_id).build(), + // }); + } + } + + /// Determines if a peer's score is below a given `PeerScoreThreshold` chosen via the + /// `threshold` parameter. + fn score_below_threshold( + &self, + peer_id: &NodeId, + threshold: impl Fn(&PeerScoreThresholds) -> f64, + ) -> (bool, f64) { + Self::score_below_threshold_from_scores(&self.peer_score, peer_id, threshold) + } + + fn score_below_threshold_from_scores( + peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + peer_id: &NodeId, + threshold: impl Fn(&PeerScoreThresholds) -> f64, + ) -> (bool, f64) { + if let Some((peer_score, thresholds, ..)) = peer_score { + let score = peer_score.score(peer_id); + if score < threshold(thresholds) { + return (true, score); + } + (false, score) + } else { + (false, 0.0) + } + } + + /// Handles an IHAVE control message. Checks our cache of messages. If the message is unknown, + /// requests it with an IWANT control message. + fn handle_ihave(&mut self, peer_id: &NodeId, ihave_msgs: Vec<(TopicHash, Vec)>) { + // We ignore IHAVE gossip from any peer whose score is below the gossip threshold + if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" + ); + return; + } + + // IHAVE flood protection + let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); + *peer_have += 1; + if *peer_have > self.config.max_ihave_messages() { + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ + interval; ignoring", + *peer_have + ); + return; + } + + if let Some(iasked) = self.count_sent_iwant.get(peer_id) { + if *iasked >= self.config.max_ihave_length() { + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked + ); + return; + } + } + + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); + + let mut iwant_ids = HashSet::new(); + + let want_message = |id: &MessageId| { + if self.duplicate_cache.contains(id) { + return false; + } + + if self.pending_iwant_msgs.contains(id) { + return false; + } + + self.peer_score + .as_ref() + .map(|(_, _, _, promises)| !promises.contains(id)) + .unwrap_or(true) + }; + + for (topic, ids) in ihave_msgs { + // only process the message if we are subscribed + if !self.mesh.contains_key(&topic) { + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" + ); + continue; + } + + for id in ids.into_iter().filter(want_message) { + // have not seen this message and are not currently requesting it + if iwant_ids.insert(id) { + // Register the IWANT metric + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_iwant(&topic); + // } + } + } + } + + if !iwant_ids.is_empty() { + let iasked = self.count_sent_iwant.entry(*peer_id).or_insert(0); + let mut iask = iwant_ids.len(); + if *iasked + iask > self.config.max_ihave_length() { + iask = self.config.max_ihave_length().saturating_sub(*iasked); + } + + // Send the list of IWANT control messages + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", + iask, + iwant_ids.len() + ); + + // Ask in random order + let mut iwant_ids_vec: Vec<_> = iwant_ids.into_iter().collect(); + let mut rng = thread_rng(); + iwant_ids_vec.partial_shuffle(&mut rng, iask); + + iwant_ids_vec.truncate(iask); + *iasked += iask; + + for message_id in &iwant_ids_vec { + // Add all messages to the pending list + self.pending_iwant_msgs.insert(message_id.clone()); + } + + if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { + gossip_promises.add_promise( + *peer_id, + &iwant_ids_vec, + Instant::now() + self.config.iwant_followup_time(), + ); + } + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", + iwant_ids_vec + ); + + Self::control_pool_add( + &mut self.control_pool, + *peer_id, + ControlAction::IWant { + message_ids: iwant_ids_vec, + }, + ); + } + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); + } + + /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is + /// forwarded to the requesting peer. + async fn handle_iwant(&mut self, peer_id: &NodeId, iwant_msgs: Vec) { + // We ignore IWANT gossip from any peer whose score is below the gossip threshold + if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score + ); + return; + } + + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); + + for id in iwant_msgs { + // If we have it and the IHAVE count is not above the threshold, + // forward the message. + if let Some((msg, count)) = self + .mcache + .get_with_iwant_counts(&id, peer_id) + .map(|(msg, count)| (msg.clone(), count)) + { + if count > self.config.gossip_retransimission() { + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" + ); + } else { + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); + self.send_message(*peer_id, RpcOut::Forward(msg)).await; + } + } + } + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); + } + + /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, + /// responds with PRUNE messages. + async fn handle_graft(&mut self, peer_id: &NodeId, topics: Vec) { + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); + + let mut to_prune_topics = HashSet::new(); + + let mut do_px = self.config.do_px(); + + // For each topic, if a peer has grafted us, then we necessarily must be in their mesh + // and they must be subscribed to the topic. Ensure we have recorded the mapping. + for topic in &topics { + self.peer_topics + .entry(*peer_id) + .or_default() + .insert(topic.clone()); + self.topic_peers + .entry(topic.clone()) + .or_default() + .insert(*peer_id); + } + + // we don't GRAFT to/from explicit peers; complain loudly if this happens + if self.explicit_peers.contains(peer_id) { + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); + // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics + to_prune_topics = topics.into_iter().collect(); + // but don't PX + do_px = false + } else { + let (below_zero, score) = self.score_below_threshold(peer_id, |_| 0.0); + let now = Instant::now(); + for topic_hash in topics { + if let Some(peers) = self.mesh.get_mut(&topic_hash) { + // if the peer is already in the mesh ignore the graft + if peers.contains(peer_id) { + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" + ); + continue; + } + + // make sure we are not backing off that peer + if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) + { + if backoff_time > now { + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" + ); + // add behavioural penalty + if let Some((peer_score, ..)) = &mut self.peer_score { + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_score_penalty(Penalty::GraftBackoff); + // } + peer_score.add_penalty(peer_id, 1); + + // check the flood cutoff + // See: https://github.com/rust-lang/rust-clippy/issues/10061 + #[allow(unknown_lints, clippy::unchecked_duration_subtraction)] + let flood_cutoff = (backoff_time + + self.config.graft_flood_threshold()) + - self.config.prune_backoff(); + if flood_cutoff > now { + //extra penalty + peer_score.add_penalty(peer_id, 1); + } + } + // no PX + do_px = false; + + to_prune_topics.insert(topic_hash.clone()); + continue; + } + } + + // check the score + if below_zero { + // we don't GRAFT peers with negative score + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" + ); + // we do send them PRUNE however, because it's a matter of protocol correctness + to_prune_topics.insert(topic_hash.clone()); + // but we won't PX to them + do_px = false; + continue; + } + + // check mesh upper bound and only allow graft if the upper bound is not reached or + // if it is an outbound peer + if peers.len() >= self.config.mesh_n_high() + && !self.outbound_peers.contains(peer_id) + { + to_prune_topics.insert(topic_hash.clone()); + continue; + } + + // add peer to the mesh + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" + ); + + if peers.insert(*peer_id) { + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(&topic_hash, Inclusion::Subscribed, 1) + // } + } + + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + *peer_id, + vec![&topic_hash], + &self.mesh, + self.peer_topics.get(peer_id), + &self.connected_peers, + ) + .await; + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(peer_id, topic_hash); + } + } else { + // don't do PX when there is an unknown topic to avoid leaking our peers + do_px = false; + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" + ); + // spam hardening: ignore GRAFTs for unknown topics + continue; + } + } + } + + if !to_prune_topics.is_empty() { + // build the prune messages to send + let on_unsubscribe = false; + for action in to_prune_topics + .iter() + .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) + .collect::>() + { + self.send_message(*peer_id, RpcOut::Control(action)); + } + // Send the prune messages to the peer + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" + ); + } + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); + } + + async fn remove_peer_from_mesh( + &mut self, + peer_id: &NodeId, + topic_hash: &TopicHash, + backoff: Option, + always_update_backoff: bool, + reason: Churn, + ) { + let mut update_backoff = always_update_backoff; + if let Some(peers) = self.mesh.get_mut(topic_hash) { + // remove the peer if it exists in the mesh + if peers.remove(peer_id) { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" + ); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_removed(topic_hash, reason, 1) + // } + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.prune(peer_id, topic_hash.clone()); + } + + update_backoff = true; + + // inform the handler + peer_removed_from_mesh( + *peer_id, + topic_hash, + &self.mesh, + self.peer_topics.get(peer_id), + &self.connected_peers, + ) + .await; + } + } + if update_backoff { + let time = if let Some(backoff) = backoff { + Duration::from_secs(backoff) + } else { + self.config.prune_backoff() + }; + // is there a backoff specified by the peer? if so obey it. + self.backoffs.update_backoff(topic_hash, peer_id, time); + } + } + + /// Handles PRUNE control messages. Removes peer from the mesh. + async fn handle_prune( + &mut self, + peer_id: &NodeId, + prune_data: Vec<(TopicHash, Vec, Option)>, + ) { + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); + let (below_threshold, score) = + self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); + for (topic_hash, px, backoff) in prune_data { + self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune) + .await; + + if self.mesh.contains_key(&topic_hash) { + //connect to px peers + if !px.is_empty() { + // we ignore PX from peers with insufficient score + if below_threshold { + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" + ); + continue; + } + + // NOTE: We cannot dial any peers from PX currently as we typically will not + // know their multiaddr. Until SignedRecords are spec'd this + // remains a stub. By default `config.prune_peers()` is set to zero and + // this is skipped. If the user modifies this, this will only be able to + // dial already known peers (from an external discovery mechanism for + // example). + if self.config.prune_peers() > 0 { + self.px_connect(px); + } + } + } + } + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); + } + + fn px_connect(&mut self, mut px: Vec) { + let n = self.config.prune_peers(); + // Ignore peerInfo with no ID + // + //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a + // signed peer record? + px.retain(|p| p.peer_id.is_some()); + if px.len() > n { + // only use at most prune_peers many random peers + let mut rng = thread_rng(); + px.partial_shuffle(&mut rng, n); + px = px.into_iter().take(n).collect(); + } + + for p in px { + // TODO: Once signed records are spec'd: extract signed peer record if given and handle + // it, see https://github.com/libp2p/specs/pull/217 + if let Some(peer_id) = p.peer_id { + // mark as px peer + self.px_peers.insert(peer_id); + + // dial peer + // TODO: dial + // self.events.push_back(ToSwarm::Dial { + // opts: DialOpts::peer_id(peer_id).build(), + // }); + } + } + } + + /// Applies some basic checks to whether this message is valid. Does not apply user validation + /// checks. + fn message_is_valid( + &mut self, + msg_id: &MessageId, + raw_message: &mut RawMessage, + propagation_source: &NodeId, + ) -> bool { + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" + ); + + // Reject any message from a blacklisted peer + if self.blacklisted_peers.contains(propagation_source) { + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" + ); + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + RejectReason::BlackListedPeer, + ); + gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); + } + return false; + } + + // Also reject any message that originated from a blacklisted peer + if let Some(source) = raw_message.source.as_ref() { + if self.blacklisted_peers.contains(source) { + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" + ); + self.handle_invalid_message( + propagation_source, + raw_message, + RejectReason::BlackListedSource, + ); + return false; + } + } + + // If we are not validating messages, assume this message is validated + // This will allow the message to be gossiped without explicitly calling + // `validate_message`. + if !self.config.validate_messages() { + raw_message.validated = true; + } + + // reject messages claiming to be from ourselves but not locally published + let self_published = !self.config.allow_self_origin() + && if let Some(own_id) = self.publish_config.get_own_id() { + own_id != propagation_source + && raw_message.source.as_ref().map_or(false, |s| s == own_id) + } else { + self.published_message_ids.contains(msg_id) + }; + + if self_published { + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" + ); + self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); + return false; + } + + true + } + + /// Handles a newly received [`RawMessage`]. + /// + /// Forwards the message to all peers in the mesh. + async fn handle_received_message( + &mut self, + mut raw_message: RawMessage, + propagation_source: &NodeId, + ) { + // Record the received metric + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); + // } + + // Try and perform the data transform to the message. If it fails, consider it invalid. + let message = match self.data_transform.inbound_transform(raw_message.clone()) { + Ok(message) => message, + Err(e) => { + tracing::debug!("Invalid message. Transform error: {:?}", e); + // Reject the message and return + self.handle_invalid_message( + propagation_source, + &raw_message, + RejectReason::ValidationError(ValidationError::TransformFailed), + ); + return; + } + }; + + // Calculate the message id on the transformed data. + let msg_id = self.config.message_id(&message); + + // Check the validity of the message + // Peers get penalized if this message is invalid. We don't add it to the duplicate cache + // and instead continually penalize peers that repeatedly send this message. + if !self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { + return; + } + + if !self.duplicate_cache.insert(msg_id.clone()) { + tracing::debug!(message=%msg_id, "Message already received, ignoring"); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); + } + self.mcache.observe_duplicate(&msg_id, propagation_source); + return; + } + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" + ); + + // Record the received message with the metrics + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.msg_recvd(&message.topic); + // } + + // Tells score that message arrived (but is maybe not fully validated yet). + // Consider the message as delivered for gossip promises. + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + peer_score.validate_message(propagation_source, &msg_id, &message.topic); + gossip_promises.message_delivered(&msg_id); + } + + // Add the message to our memcache + self.mcache.put(&msg_id, raw_message.clone()); + + // Dispatch the message to the user if we are subscribed to any of the topics + if self.mesh.contains_key(&message.topic) { + tracing::debug!("Sending received message to user"); + self.events + .send(Event::Message { + propagation_source: *propagation_source, + message_id: msg_id.clone(), + message, + }) + .await; + } else { + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" + ); + return; + } + + // forward the message to mesh peers, if no validation is required + if !self.config.validate_messages() { + if self + .forward_msg( + &msg_id, + raw_message, + Some(propagation_source), + HashSet::new(), + ) + .is_err() + { + tracing::error!("Failed to forward message. Too large"); + } + tracing::debug!(message=%msg_id, "Completed message handling for message"); + } + } + + // Handles invalid messages received. + fn handle_invalid_message( + &mut self, + propagation_source: &NodeId, + raw_message: &RawMessage, + reject_reason: RejectReason, + ) { + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_invalid_message(&raw_message.topic); + // } + + if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { + let message_id = self.config.message_id(&message); + + peer_score.reject_message( + propagation_source, + &message_id, + &message.topic, + reject_reason, + ); + + gossip_promises.reject_message(&message_id, &reject_reason); + } else { + // The message is invalid, we reject it ignoring any gossip promises. If a peer is + // advertising this message via an IHAVE and it's invalid it will be double + // penalized, one for sending us an invalid and again for breaking a promise. + peer_score.reject_invalid_message(propagation_source, &raw_message.topic); + } + } + } + + /// Handles received subscriptions. + async fn handle_received_subscriptions( + &mut self, + subscriptions: &[Subscription], + propagation_source: &NodeId, + ) { + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", + subscriptions, + ); + + let mut unsubscribed_peers = Vec::new(); + + let Some(subscribed_topics) = self.peer_topics.get_mut(propagation_source) else { + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" + ); + return; + }; + + // Collect potential graft topics for the peer. + let mut topics_to_graft = Vec::new(); + + // Notify the application about the subscription, after the grafts are sent. + let mut application_event = Vec::new(); + + let filtered_topics = match self + .subscription_filter + .filter_incoming_subscriptions(subscriptions, subscribed_topics) + { + Ok(topics) => topics, + Err(s) => { + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s + ); + return; + } + }; + + for subscription in filtered_topics { + // get the peers from the mapping, or insert empty lists if the topic doesn't exist + let topic_hash = &subscription.topic_hash; + let peer_list = self.topic_peers.entry(topic_hash.clone()).or_default(); + + match subscription.action { + SubscriptionAction::Subscribe => { + if peer_list.insert(*propagation_source) { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" + ); + } + + // add to the peer_topics mapping + subscribed_topics.insert(topic_hash.clone()); + + // if the mesh needs peers add the peer to the mesh + if !self.explicit_peers.contains(propagation_source) + && self.connected_peers.get(propagation_source).is_some() + && !Self::score_below_threshold_from_scores( + &self.peer_score, + propagation_source, + |_| 0.0, + ) + .0 + && !self + .backoffs + .is_backoff_with_slack(topic_hash, propagation_source) + { + if let Some(peers) = self.mesh.get_mut(topic_hash) { + if peers.len() < self.config.mesh_n_low() + && peers.insert(*propagation_source) + { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" + ); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Subscribed, 1) + // } + // send graft to the peer + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" + ); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(propagation_source, topic_hash.clone()); + } + topics_to_graft.push(topic_hash.clone()); + } + } + } + // generates a subscription event to be polled + application_event.push(Event::Subscribed { + peer_id: *propagation_source, + topic: topic_hash.clone(), + }); + } + SubscriptionAction::Unsubscribe => { + if peer_list.remove(propagation_source) { + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" + ); + } + + // remove topic from the peer_topics mapping + subscribed_topics.remove(topic_hash); + unsubscribed_peers.push((*propagation_source, topic_hash.clone())); + // generate an unsubscribe event to be polled + application_event.push(Event::Unsubscribed { + peer_id: *propagation_source, + topic: topic_hash.clone(), + }); + } + } + + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.set_topic_peers(topic_hash, peer_list.len()); + // } + } + + // remove unsubscribed peers from the mesh if it exists + for (peer_id, topic_hash) in unsubscribed_peers { + self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); + } + + // Potentially inform the handler if we have added this peer to a mesh for the first time. + let topics_joined = topics_to_graft.iter().collect::>(); + if !topics_joined.is_empty() { + peer_added_to_mesh( + *propagation_source, + topics_joined, + &self.mesh, + self.peer_topics.get(propagation_source), + &self.connected_peers, + ) + .await; + } + + // If we need to send grafts to peer, do so immediately, rather than waiting for the + // heartbeat. + for action in topics_to_graft + .into_iter() + .map(|topic_hash| ControlAction::Graft { topic_hash }) + .collect::>() + { + self.send_message(*propagation_source, RpcOut::Control(action)) + .await; + } + + // Notify the application of the subscriptions + for event in application_event { + self.events.send(event).await; + } + + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" + ); + } + + /// Applies penalties to peers that did not respond to our IWANT requests. + fn apply_iwant_penalties(&mut self) { + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + for (peer, count) in gossip_promises.get_broken_promises() { + peer_score.add_penalty(&peer, count); + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.register_score_penalty(Penalty::BrokenPromise); + // } + } + } + } + + /// Heartbeat function which shifts the memcache and updates the mesh. + fn heartbeat(&mut self) { + tracing::debug!("Starting heartbeat"); + let start = Instant::now(); + + self.heartbeat_ticks += 1; + + let mut to_graft = HashMap::new(); + let mut to_prune = HashMap::new(); + let mut no_px = HashSet::new(); + + // clean up expired backoffs + self.backoffs.heartbeat(); + + // clean up ihave counters + self.count_sent_iwant.clear(); + self.count_received_ihave.clear(); + + // apply iwant penalties + self.apply_iwant_penalties(); + + // check connections to explicit peers + if self.heartbeat_ticks % self.config.check_explicit_peers_ticks() == 0 { + for p in self.explicit_peers.clone() { + self.check_explicit_peer_connection(&p); + } + } + + // Cache the scores of all connected peers, and record metrics for current penalties. + let mut scores = HashMap::with_capacity(self.connected_peers.len()); + if let Some((peer_score, ..)) = &self.peer_score { + for peer_id in self.connected_peers.keys() { + scores.entry(peer_id); + // TODO + // .or_insert_with(|| peer_score.metric_score(peer_id, self.metrics.as_mut())); + } + } + + // maintain the mesh for each topic + for (topic_hash, peers) in self.mesh.iter_mut() { + let explicit_peers = &self.explicit_peers; + let backoffs = &self.backoffs; + let topic_peers = &self.topic_peers; + let outbound_peers = &self.outbound_peers; + + // drop all peers with negative score, without PX + // if there is at some point a stable retain method for BTreeSet the following can be + // written more efficiently with retain. + let mut to_remove_peers = Vec::new(); + for peer_id in peers.iter() { + let peer_score = *scores.get(peer_id).unwrap_or(&0.0); + + // Record the score per mesh + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.observe_mesh_peers_score(topic_hash, peer_score); + // } + + if peer_score < 0.0 { + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" + ); + + let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + no_px.insert(*peer_id); + to_remove_peers.push(*peer_id); + } + } + + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_removed(topic_hash, Churn::BadScore, to_remove_peers.len()) + // } + + for peer_id in to_remove_peers { + peers.remove(&peer_id); + } + + // too little peers - add some + if peers.len() < self.config.mesh_n_low() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", + peers.len(), + self.config.mesh_n_low() + ); + // not enough peers - get mesh_n - current_length more + let desired_peers = self.config.mesh_n() - peers.len(); + let peer_list = get_random_peers( + topic_peers, + &self.connected_peers, + topic_hash, + desired_peers, + |peer| { + !peers.contains(peer) + && !explicit_peers.contains(peer) + && !backoffs.is_backoff_with_slack(topic_hash, peer) + && *scores.get(peer).unwrap_or(&0.0) >= 0.0 + }, + ); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) + // } + peers.extend(peer_list); + } + + // too many peers - remove some + if peers.len() > self.config.mesh_n_high() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", + peers.len(), + self.config.mesh_n_high() + ); + let excess_peer_no = peers.len() - self.config.mesh_n(); + + // shuffle the peers and then sort by score ascending beginning with the worst + let mut rng = thread_rng(); + let mut shuffled = peers.iter().copied().collect::>(); + shuffled.shuffle(&mut rng); + shuffled.sort_by(|p1, p2| { + let score_p1 = *scores.get(p1).unwrap_or(&0.0); + let score_p2 = *scores.get(p2).unwrap_or(&0.0); + + score_p1.partial_cmp(&score_p2).unwrap_or(Ordering::Equal) + }); + // shuffle everything except the last retain_scores many peers (the best ones) + shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + + // count total number of outbound peers + let mut outbound = { + let outbound_peers = &self.outbound_peers; + shuffled + .iter() + .filter(|p| outbound_peers.contains(*p)) + .count() + }; + + // remove the first excess_peer_no allowed (by outbound restrictions) peers adding + // them to to_prune + let mut removed = 0; + for peer in shuffled { + if removed == excess_peer_no { + break; + } + if self.outbound_peers.contains(&peer) { + if outbound <= self.config.mesh_outbound_min() { + // do not remove anymore outbound peers + continue; + } else { + // an outbound peer gets removed + outbound -= 1; + } + } + + // remove the peer + peers.remove(&peer); + let current_topic = to_prune.entry(peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + removed += 1; + } + + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_removed(topic_hash, Churn::Excess, removed) + // } + } + + // do we have enough outbound peers? + if peers.len() >= self.config.mesh_n_low() { + // count number of outbound peers we have + let outbound = { peers.iter().filter(|p| outbound_peers.contains(*p)).count() }; + + // if we have not enough outbound peers, graft to some new outbound peers + if outbound < self.config.mesh_outbound_min() { + let needed = self.config.mesh_outbound_min() - outbound; + let peer_list = get_random_peers( + topic_peers, + &self.connected_peers, + topic_hash, + needed, + |peer| { + !peers.contains(peer) + && !explicit_peers.contains(peer) + && !backoffs.is_backoff_with_slack(topic_hash, peer) + && *scores.get(peer).unwrap_or(&0.0) >= 0.0 + && outbound_peers.contains(peer) + }, + ); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) + // } + peers.extend(peer_list); + } + } + + // should we try to improve the mesh with opportunistic grafting? + if self.heartbeat_ticks % self.config.opportunistic_graft_ticks() == 0 + && peers.len() > 1 + && self.peer_score.is_some() + { + if let Some((_, thresholds, _, _)) = &self.peer_score { + // Opportunistic grafting works as follows: we check the median score of peers + // in the mesh; if this score is below the opportunisticGraftThreshold, we + // select a few peers at random with score over the median. + // The intention is to (slowly) improve an underperforming mesh by introducing + // good scoring peers that may have been gossiping at us. This allows us to + // get out of sticky situations where we are stuck with poor peers and also + // recover from churn of good peers. + + // now compute the median peer score in the mesh + let mut peers_by_score: Vec<_> = peers.iter().collect(); + peers_by_score.sort_by(|p1, p2| { + let p1_score = *scores.get(p1).unwrap_or(&0.0); + let p2_score = *scores.get(p2).unwrap_or(&0.0); + p1_score.partial_cmp(&p2_score).unwrap_or(Equal) + }); + + let middle = peers_by_score.len() / 2; + let median = if peers_by_score.len() % 2 == 0 { + let sub_middle_peer = *peers_by_score + .get(middle - 1) + .expect("middle < vector length and middle > 0 since peers.len() > 0"); + let sub_middle_score = *scores.get(sub_middle_peer).unwrap_or(&0.0); + let middle_peer = + *peers_by_score.get(middle).expect("middle < vector length"); + let middle_score = *scores.get(middle_peer).unwrap_or(&0.0); + + (sub_middle_score + middle_score) * 0.5 + } else { + *scores + .get(*peers_by_score.get(middle).expect("middle < vector length")) + .unwrap_or(&0.0) + }; + + // if the median score is below the threshold, select a better peer (if any) and + // GRAFT + if median < thresholds.opportunistic_graft_threshold { + let peer_list = get_random_peers( + topic_peers, + &self.connected_peers, + topic_hash, + self.config.opportunistic_graft_peers(), + |peer_id| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && !backoffs.is_backoff_with_slack(topic_hash, peer_id) + && *scores.get(peer_id).unwrap_or(&0.0) > median + }, + ); + for peer in &peer_list { + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); + current_topic.push(topic_hash.clone()); + } + // update the mesh + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list + ); + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) + // } + peers.extend(peer_list); + } + } + } + // Register the final count of peers in the mesh + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.set_mesh_peers(topic_hash, peers.len()) + // } + } + + // remove expired fanout topics + { + let fanout = &mut self.fanout; // help the borrow checker + let fanout_ttl = self.config.fanout_ttl(); + self.fanout_last_pub.retain(|topic_hash, last_pub_time| { + if *last_pub_time + fanout_ttl < Instant::now() { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" + ); + fanout.remove(topic_hash); + return false; + } + true + }); + } + + // maintain fanout + // check if our peers are still a part of the topic + for (topic_hash, peers) in self.fanout.iter_mut() { + let mut to_remove_peers = Vec::new(); + let publish_threshold = match &self.peer_score { + Some((_, thresholds, _, _)) => thresholds.publish_threshold, + _ => 0.0, + }; + for peer in peers.iter() { + // is the peer still subscribed to the topic? + let peer_score = *scores.get(peer).unwrap_or(&0.0); + match self.peer_topics.get(peer) { + Some(topics) => { + if !topics.contains(topic_hash) || peer_score < publish_threshold { + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" + ); + to_remove_peers.push(*peer); + } + } + None => { + // remove if the peer has disconnected + to_remove_peers.push(*peer); + } + } + } + for to_remove in to_remove_peers { + peers.remove(&to_remove); + } + + // not enough peers + if peers.len() < self.config.mesh_n() { + tracing::debug!( + "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", + peers.len(), + self.config.mesh_n() + ); + let needed_peers = self.config.mesh_n() - peers.len(); + let explicit_peers = &self.explicit_peers; + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + topic_hash, + needed_peers, + |peer_id| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && *scores.get(peer_id).unwrap_or(&0.0) < publish_threshold + }, + ); + peers.extend(new_peers); + } + } + + if self.peer_score.is_some() { + tracing::trace!("Mesh message deliveries: {:?}", { + self.mesh + .iter() + .map(|(t, peers)| { + ( + t.clone(), + peers + .iter() + .map(|p| { + ( + *p, + self.peer_score + .as_ref() + .expect("peer_score.is_some()") + .0 + .mesh_message_deliveries(p, t) + .unwrap_or(0.0), + ) + }) + .collect::>(), + ) + }) + .collect::>>() + }) + } + + self.emit_gossip(); + + // send graft/prunes + if !to_graft.is_empty() | !to_prune.is_empty() { + self.send_graft_prune(to_graft, to_prune, no_px); + } + + // piggyback pooled control messages + self.flush_control_pool(); + + // shift the memcache + self.mcache.shift(); + + tracing::debug!("Completed Heartbeat"); + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); + // metrics.observe_heartbeat_duration(duration); + // } + } + + /// Emits gossip - Send IHAVE messages to a random set of gossip peers. This is applied to mesh + /// and fanout peers + fn emit_gossip(&mut self) { + let mut rng = thread_rng(); + for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) { + let mut message_ids = self.mcache.get_gossip_message_ids(topic_hash); + if message_ids.is_empty() { + continue; + } + + // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list + if message_ids.len() > self.config.max_ihave_length() { + // we do the truncation (with shuffling) per peer below + tracing::debug!( + "too many messages for gossip; will truncate IHAVE list ({} messages)", + message_ids.len() + ); + } else { + // shuffle to emit in random order + message_ids.shuffle(&mut rng); + } + + // dynamic number of peers to gossip based on `gossip_factor` with minimum `gossip_lazy` + let n_map = |m| { + max( + self.config.gossip_lazy(), + (self.config.gossip_factor() * m as f64) as usize, + ) + }; + // get gossip_lazy random peers + let to_msg_peers = get_random_peers_dynamic( + &self.topic_peers, + &self.connected_peers, + topic_hash, + n_map, + |peer| { + !peers.contains(peer) + && !self.explicit_peers.contains(peer) + && !self.score_below_threshold(peer, |ts| ts.gossip_threshold).0 + }, + ); + + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); + + for peer in to_msg_peers { + let mut peer_message_ids = message_ids.clone(); + + if peer_message_ids.len() > self.config.max_ihave_length() { + // We do this per peer so that we emit a different set for each peer. + // we have enough redundancy in the system that this will significantly increase + // the message coverage when we do truncate. + peer_message_ids.partial_shuffle(&mut rng, self.config.max_ihave_length()); + peer_message_ids.truncate(self.config.max_ihave_length()); + } + + // send an IHAVE message + Self::control_pool_add( + &mut self.control_pool, + peer, + ControlAction::IHave { + topic_hash: topic_hash.clone(), + message_ids: peer_message_ids, + }, + ); + } + } + } + + /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control + /// messages. + async fn send_graft_prune( + &mut self, + to_graft: HashMap>, + mut to_prune: HashMap>, + no_px: HashSet, + ) { + // handle the grafts and overlapping prunes per peer + for (peer, topics) in to_graft.into_iter() { + for topic in &topics { + // inform scoring of graft + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.graft(&peer, topic.clone()); + } + + // inform the handler of the peer being added to the mesh + // If the peer did not previously exist in any mesh, inform the handler + peer_added_to_mesh( + peer, + vec![topic], + &self.mesh, + self.peer_topics.get(&peer), + &self.connected_peers, + ) + .await; + } + let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { + topic_hash: topic_hash.clone(), + }); + + // If there are prunes associated with the same peer add them. + // NOTE: In this case a peer has been added to a topic mesh, and removed from another. + // It therefore must be in at least one mesh and we do not need to inform the handler + // of its removal from another. + + // The following prunes are not due to unsubscribing. + let prunes = to_prune + .remove(&peer) + .into_iter() + .flatten() + .map(|topic_hash| { + self.make_prune( + &topic_hash, + &peer, + self.config.do_px() && !no_px.contains(&peer), + false, + ) + }); + + // send the control messages + for msg in control_msgs.chain(prunes).collect::>() { + self.send_message(peer, RpcOut::Control(msg)); + } + } + + // handle the remaining prunes + // The following prunes are not due to unsubscribing. + for (peer, topics) in to_prune.iter() { + for topic_hash in topics { + let prune = self.make_prune( + topic_hash, + peer, + self.config.do_px() && !no_px.contains(peer), + false, + ); + self.send_message(*peer, RpcOut::Control(prune)); + + // inform the handler + peer_removed_from_mesh( + *peer, + topic_hash, + &self.mesh, + self.peer_topics.get(peer), + &self.connected_peers, + ) + .await; + } + } + } + + /// Helper function which forwards a message to mesh\[topic\] peers. + /// + /// Returns true if at least one peer was messaged. + #[allow(clippy::unnecessary_wraps)] + fn forward_msg( + &mut self, + msg_id: &MessageId, + message: RawMessage, + propagation_source: Option<&NodeId>, + originating_peers: HashSet, + ) -> Result { + // message is fully validated inform peer_score + if let Some((peer_score, ..)) = &mut self.peer_score { + if let Some(peer) = propagation_source { + peer_score.deliver_message(peer, msg_id, &message.topic); + } + } + + tracing::debug!(message=%msg_id, "Forwarding message"); + let mut recipient_peers = HashSet::new(); + + { + // Populate the recipient peers mapping + + // Add explicit peers + for peer_id in &self.explicit_peers { + if let Some(topics) = self.peer_topics.get(peer_id) { + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + && topics.contains(&message.topic) + { + recipient_peers.insert(*peer_id); + } + } + } + + // add mesh peers + let topic = &message.topic; + // mesh + if let Some(mesh_peers) = self.mesh.get(topic) { + for peer_id in mesh_peers { + if Some(peer_id) != propagation_source + && !originating_peers.contains(peer_id) + && Some(peer_id) != message.source.as_ref() + { + recipient_peers.insert(*peer_id); + } + } + } + } + + // forward the message to peers + if !recipient_peers.is_empty() { + let event = RpcOut::Forward(message.clone()); + + for peer in recipient_peers.iter() { + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); + self.send_message(*peer, event.clone()); + } + tracing::debug!("Completed forwarding message"); + Ok(true) + } else { + Ok(false) + } + } + + fn build_raw_message( + &mut self, + topic: TopicHash, + data: Vec, + ) -> Result { + match &mut self.publish_config { + PublishConfig::Signing { + ref keypair, + author, + inline_key, + last_seq_no, + } => { + let sequence_number = last_seq_no.next(); + + let signature = { + let message = types::Message { + source: Some(*author), + data: data.clone(), + sequence_number: Some(sequence_number), + topic: topic.clone(), + }; + + let buf = postcard::to_stdvec(&message).unwrap(); + + // the signature is over the bytes "libp2p-pubsub:" + let mut signature_bytes = SIGNING_PREFIX.to_vec(); + signature_bytes.extend_from_slice(&buf); + Some(keypair.sign(&signature_bytes)) + }; + + Ok(RawMessage { + source: Some(*author), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(sequence_number), + topic, + signature: signature.map(|sig| sig.to_vec()), + validated: true, // all published messages are valid + }) + } + PublishConfig::Author(peer_id) => { + Ok(RawMessage { + source: Some(*peer_id), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(rand::random()), + topic, + signature: None, + validated: true, // all published messages are valid + }) + } + PublishConfig::RandomAuthor => { + Ok(RawMessage { + source: Some(SecretKey::generate().public()), + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: Some(rand::random()), + topic, + signature: None, + validated: true, // all published messages are valid + }) + } + PublishConfig::Anonymous => { + Ok(RawMessage { + source: None, + data, + // To be interoperable with the go-implementation this is treated as a 64-bit + // big-endian uint. + sequence_number: None, + topic, + signature: None, + validated: true, // all published messages are valid + }) + } + } + } + + // adds a control action to control_pool + fn control_pool_add( + control_pool: &mut HashMap>, + peer: NodeId, + control: ControlAction, + ) { + control_pool.entry(peer).or_default().push(control); + } + + /// Takes each control action mapping and turns it into a message + fn flush_control_pool(&mut self) { + for (peer, controls) in self.control_pool.drain().collect::>() { + for msg in controls { + self.send_message(peer, RpcOut::Control(msg)); + } + } + + // This clears all pending IWANT messages + self.pending_iwant_msgs.clear(); + } + + /// Send a [`RpcOut`] message to a peer. This will wrap the message in an arc if it + /// is not already an arc. + async fn send_message(&mut self, peer_id: NodeId, rpc: RpcOut) { + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { + // // register bytes sent on the internal metrics. + // m.msg_sent(&message.topic, message.raw_protobuf_len()); + // } + // } + + if let Some(conn) = self.connected_peers.get(&peer_id) { + conn.connection_sender.send(HandlerIn::Message(rpc)).await; + } else { + // TODO: handle unknown peer + } + } + + async fn accept_connection(&mut self, peer_id: NodeId, endpoint: Connection, is_dialer: bool) { + // TODO: track + let other_established = 0; + + if is_dialer && other_established == 0 && !self.px_peers.contains(&peer_id) { + // The first connection is outbound and it is not a peer from peer exchange => mark + // it as outbound peer + self.outbound_peers.insert(peer_id); + } + + // Add the IP to the peer scoring system + if let Some((peer_score, ..)) = &mut self.peer_score { + let ip = endpoint.remote_address().ip(); + peer_score.add_ip(&peer_id, ip); + } + let connection_id = endpoint.stable_id(); + + let (connection_sender, connection_receiver) = mpsc::channel(128); + let (handler_sender, handler_receiver) = mpsc::channel(128); + let handler = Handler::new( + self.config.protocol_config(), + handler_sender, + connection_receiver, + ); + let connection_task = tokio::task::spawn(async move { + // TODO: spawn connection handler + let handler = handler; + // TODO: on connection close, send message to behaviour, to trigger `on_connection_closed` + }); + + // By default we assume a peer is only a floodsub peer. + // + // The protocol negotiation occurs once a message is sent/received. Once this happens we + // update the type of peer that this is in order to determine which kind of routing should + // occur. + self.connected_peers + .entry(peer_id) + .or_insert(PeerConnections { + connections: vec![], + connection_task, + connection_sender, + }) + .connections + .push(connection_id); + + if other_established > 0 { + return; // Not our first connection to this peer, hence nothing to do. + } + + // Insert an empty set of the topics of this peer until known. + self.peer_topics.insert(peer_id, Default::default()); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.add_peer(peer_id); + } + + // Ignore connections from blacklisted peers. + if self.blacklisted_peers.contains(&peer_id) { + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); + return; + } + + tracing::debug!(peer=%peer_id, "New peer connected"); + // We need to send our subscriptions to the newly-connected node. + for topic_hash in self.mesh.clone().into_keys() { + self.send_message(peer_id, RpcOut::Subscribe(topic_hash)) + .await; + } + } + + async fn on_connection_closed(&mut self, peer_id: NodeId, connection_id: usize, ip: IpAddr) { + // TODO: actually track + let remaining_established = 0; + + // Remove IP from peer scoring system + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.remove_ip(&peer_id, &ip); + } + + if remaining_established != 0 { + // Remove the connection from the list + if let Some(connections) = self.connected_peers.get_mut(&peer_id) { + let index = connections + .connections + .iter() + .position(|v| v == &connection_id) + .expect("Previously established connection to peer must be present"); + connections.connections.remove(index); + + // If there are more connections and this peer is in a mesh, inform the first connection + // handler. + if !connections.connections.is_empty() { + if let Some(topics) = self.peer_topics.get(&peer_id) { + for topic in topics { + if let Some(mesh_peers) = self.mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + // TODO: await + connections + .connection_sender + .send(HandlerIn::JoinedMesh) + .await; + // TODO: multiple connections? + // handler: NotifyHandler::One(connections.connections[0]), + break; + } + } + } + } + } + } + } else { + // remove from mesh, topic_peers, peer_topic and the fanout + tracing::debug!(peer=%peer_id, "Peer disconnected"); + { + let Some(topics) = self.peer_topics.get(&peer_id) else { + debug_assert!( + self.blacklisted_peers.contains(&peer_id), + "Disconnected node not in connected list" + ); + return; + }; + + // remove peer from all mappings + for topic in topics { + // check the mesh for the topic + if let Some(mesh_peers) = self.mesh.get_mut(topic) { + // check if the peer is in the mesh and remove it + if mesh_peers.remove(&peer_id) { + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.peers_removed(topic, Churn::Dc, 1); + // m.set_mesh_peers(topic, mesh_peers.len()); + // } + }; + } + + // remove from topic_peers + if let Some(peer_list) = self.topic_peers.get_mut(topic) { + if !peer_list.remove(&peer_id) { + // debugging purposes + tracing::warn!( + peer=%peer_id, + "Disconnected node: peer not in topic_peers" + ); + } + // TODO: + // if let Some(m) = self.metrics.as_mut() { + // m.set_topic_peers(topic, peer_list.len()) + // } + } else { + tracing::warn!( + peer=%peer_id, + topic=%topic, + "Disconnected node: peer with topic not in topic_peers" + ); + } + + // remove from fanout + self.fanout + .get_mut(topic) + .map(|peers| peers.remove(&peer_id)); + } + } + + // Forget px and outbound status for this peer + self.px_peers.remove(&peer_id); + self.outbound_peers.remove(&peer_id); + + // Remove peer from peer_topics and connected_peers + // NOTE: It is possible the peer has already been removed from all mappings if it does not + // support the protocol. + self.peer_topics.remove(&peer_id); + + // If metrics are enabled, register the disconnection of a peer based on its protocol. + // TODO: + // if let Some(metrics) = self.metrics.as_mut() { + // metrics.peer_protocol_disconnected(); + // } + + self.connected_peers.remove(&peer_id); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.remove_peer(&peer_id); + } + } + } + + // TODO: add notify address changes from iroh-net + // fn on_address_change( + // &mut self, + // AddressChange { + // peer_id, + // old: endpoint_old, + // new: endpoint_new, + // .. + // }: AddressChange, + // ) { + // // Exchange IP in peer scoring system + // if let Some((peer_score, ..)) = &mut self.peer_score { + // if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { + // peer_score.remove_ip(&peer_id, &ip); + // } else { + // tracing::trace!( + // peer=%&peer_id, + // "Couldn't extract ip from endpoint of peer with endpoint {:?}", + // endpoint_old + // ) + // } + // if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { + // peer_score.add_ip(&peer_id, ip); + // } else { + // tracing::trace!( + // peer=%peer_id, + // "Couldn't extract ip from endpoint of peer with endpoint {:?}", + // endpoint_new + // ) + // } + // } + // } + + async fn on_handler_event(&mut self, propagation_source: NodeId, event: HandlerEvent) { + match event { + HandlerEvent::Message { + rpc, + invalid_messages, + } => { + // Handle the gossipsub RPC + + // Handle subscriptions + // Update connected peers topics + if !rpc.subscriptions.is_empty() { + self.handle_received_subscriptions(&rpc.subscriptions, &propagation_source) + .await; + } + + // Check if peer is graylisted in which case we ignore the event + if let (true, _) = + self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) + { + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); + return; + } + + // Handle any invalid messages from this peer + if self.peer_score.is_some() { + for (raw_message, validation_error) in invalid_messages { + self.handle_invalid_message( + &propagation_source, + &raw_message, + RejectReason::ValidationError(validation_error), + ) + } + } else { + // log the invalid messages + for (message, validation_error) in invalid_messages { + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", + validation_error, + ); + } + } + + // Handle messages + for (count, raw_message) in rpc.messages.into_iter().enumerate() { + // Only process the amount of messages the configuration allows. + if self.config.max_messages_per_rpc().is_some() + && Some(count) >= self.config.max_messages_per_rpc() + { + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + break; + } + self.handle_received_message(raw_message, &propagation_source) + .await; + } + + // Handle control messages + // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) + let mut ihave_msgs = vec![]; + let mut graft_msgs = vec![]; + let mut prune_msgs = vec![]; + for control_msg in rpc.control_msgs { + match control_msg { + ControlAction::IHave { + topic_hash, + message_ids, + } => { + ihave_msgs.push((topic_hash, message_ids)); + } + ControlAction::IWant { message_ids } => { + self.handle_iwant(&propagation_source, message_ids).await; + } + ControlAction::Graft { topic_hash } => graft_msgs.push(topic_hash), + ControlAction::Prune { + topic_hash, + peers, + backoff, + } => prune_msgs.push((topic_hash, peers, backoff)), + } + } + if !ihave_msgs.is_empty() { + self.handle_ihave(&propagation_source, ihave_msgs); + } + if !graft_msgs.is_empty() { + self.handle_graft(&propagation_source, graft_msgs).await; + } + if !prune_msgs.is_empty() { + self.handle_prune(&propagation_source, prune_msgs).await; + } + } + } + } + + async fn run(mut self, mut receiver: mpsc::Receiver) { + // TODO: handle events from handler + // if let Some(event) = self.events.pop_front() { + // return Poll::Ready(event); + // } + + async fn peer_score_interval( + peer_score: &mut Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + ) -> Option<&mut PeerScore> { + match peer_score { + Some((peer_score, _, interval, _)) => { + interval.next().await; + Some(peer_score) + } + None => None, + } + } + + loop { + tokio::select! { + biased; + Some(msg) = receiver.recv() => { + match msg { + ActorMessage::Connect { node_addr, response } => { + let res = self.connect(node_addr); + response.send(res).ok(); + } + } + } + // update scores + Some(peer_score) = peer_score_interval(&mut self.peer_score) => { + peer_score.refresh_scores(); + } + Some(_) = self.heartbeat.next() => { + self.heartbeat(); + } + else => {} + } + } + } +} + +impl ProtocolHandler for Behaviour { + fn accept( + self: std::sync::Arc, + conn: Connecting, + ) -> futures::future::BoxFuture<'static, anyhow::Result<()>> { + Box::pin(async move { todo!() }) + } +} + +/// This is called when peers are added to any mesh. It checks if the peer existed +/// in any other mesh. If this is the first mesh they have joined, it queues a message to notify +/// the appropriate connection handler to maintain a connection. +async fn peer_added_to_mesh( + peer_id: NodeId, + new_topics: Vec<&TopicHash>, + mesh: &HashMap>, + known_topics: Option<&BTreeSet>, + connections: &HashMap, +) { + // Ensure there is an active connection + let connection_id = { + let conn = connections.get(&peer_id).expect("To be connected to peer."); + assert!( + !conn.connections.is_empty(), + "Must have at least one connection" + ); + conn.connections[0] + }; + + if let Some(topics) = known_topics { + for topic in topics { + if !new_topics.contains(&topic) { + if let Some(mesh_peers) = mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + // the peer is already in a mesh for another topic + return; + } + } + } + } + } + // This is the first mesh the peer has joined, inform the handler + if let Some(conn) = connections.get(&peer_id) { + // TODO: handle multiple connections + // NotifyHandler::One(connection_id), + conn.connection_sender.send(HandlerIn::JoinedMesh).await; + } else { + // TODO: handle unknown peer + } +} + +/// This is called when peers are removed from a mesh. It checks if the peer exists +/// in any other mesh. If this is the last mesh they have joined, we return true, in order to +/// notify the handler to no longer maintain a connection. +async fn peer_removed_from_mesh( + peer_id: NodeId, + old_topic: &TopicHash, + mesh: &HashMap>, + known_topics: Option<&BTreeSet>, + connections: &HashMap, +) { + // Ensure there is an active connection + let connection_id = connections + .get(&peer_id) + .expect("To be connected to peer.") + .connections + .first() + .expect("There should be at least one connection to a peer."); + + if let Some(topics) = known_topics { + for topic in topics { + if topic != old_topic { + if let Some(mesh_peers) = mesh.get(topic) { + if mesh_peers.contains(&peer_id) { + // the peer exists in another mesh still + return; + } + } + } + } + } + // The peer is not in any other mesh, inform the handler + if let Some(conn) = connections.get(&peer_id) { + // TODO: handle multiple connections + // NotifyHandler::One(connection_id), + conn.connection_sender.send(HandlerIn::LeftMesh).await; + } else { + // TODO: handle unknown peer + } +} + +/// Helper function to get a subset of random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. The number of peers to get equals the output of `n_map` +/// that gets as input the number of filtered peers. +fn get_random_peers_dynamic( + topic_peers: &HashMap>, + connected_peers: &HashMap, + topic_hash: &TopicHash, + // maps the number of total peers to the number of selected peers + n_map: impl Fn(usize) -> usize, + mut f: impl FnMut(&NodeId) -> bool, +) -> BTreeSet { + let mut gossip_peers = match topic_peers.get(topic_hash) { + // if they exist, filter the peers by `f` + Some(peer_list) => peer_list.iter().copied().filter(f).collect(), + None => Vec::new(), + }; + + // if we have less than needed, return them + let n = n_map(gossip_peers.len()); + if gossip_peers.len() <= n { + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + return gossip_peers.into_iter().collect(); + } + + // we have more peers than needed, shuffle them and return n of them + let mut rng = thread_rng(); + gossip_peers.partial_shuffle(&mut rng, n); + + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); + + gossip_peers.into_iter().take(n).collect() +} + +/// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. +fn get_random_peers( + topic_peers: &HashMap>, + connected_peers: &HashMap, + topic_hash: &TopicHash, + n: usize, + f: impl FnMut(&NodeId) -> bool, +) -> BTreeSet { + get_random_peers_dynamic(topic_peers, connected_peers, topic_hash, |_| n, f) +} + +/// Validates the combination of signing, privacy and message validation to ensure the +/// configuration will not reject published messages. +fn validate_config( + authenticity: &MessageAuthenticity, + validation_mode: &ValidationMode, +) -> Result<(), &'static str> { + match validation_mode { + ValidationMode::Anonymous => { + if authenticity.is_signing() { + return Err("Cannot enable message signing with an Anonymous validation mode. Consider changing either the ValidationMode or MessageAuthenticity"); + } + + if !authenticity.is_anonymous() { + return Err("Published messages contain an author but incoming messages with an author will be rejected. Consider adjusting the validation or privacy settings in the config"); + } + } + ValidationMode::Strict => { + if !authenticity.is_signing() { + return Err( + "Messages will be + published unsigned and incoming unsigned messages will be rejected. Consider adjusting + the validation or privacy settings in the config" + ); + } + } + _ => {} + } + Ok(()) +} + +#[cfg(test)] +mod local_test { + use super::*; + use crate::IdentTopic; + use quickcheck::*; + + fn test_message() -> RawMessage { + RawMessage { + source: Some(SecretKey::generate().public()), + data: vec![0; 100], + sequence_number: None, + topic: TopicHash::from_raw("test_topic"), + signature: None, + validated: false, + } + } + + fn test_control() -> ControlAction { + ControlAction::IHave { + topic_hash: IdentTopic::new("TestTopic").hash(), + message_ids: vec![MessageId(vec![12u8]); 5], + } + } + + impl Arbitrary for RpcOut { + fn arbitrary(g: &mut Gen) -> Self { + match u8::arbitrary(g) % 5 { + 0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), + 1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), + 2 => RpcOut::Publish(test_message()), + 3 => RpcOut::Forward(test_message()), + 4 => RpcOut::Control(test_control()), + _ => panic!("outside range"), + } + } + } +} diff --git a/gossipsub/src/behaviour/tests.rs b/gossipsub/src/behaviour/tests.rs new file mode 100644 index 0000000..f186042 --- /dev/null +++ b/gossipsub/src/behaviour/tests.rs @@ -0,0 +1,5089 @@ +// Collection of tests for the gossipsub network behaviour + +// use super::*; +// use crate::subscription_filter::WhitelistSubscriptionFilter; +// use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; +// use std::net::Ipv4Addr; +// use byteorder::{BigEndian, ByteOrder}; +// use rand::Rng; +// use std::thread::sleep; + +// #[derive(Default, Debug)] +// struct InjectNodes +// // TODO: remove trait bound Default when this issue is fixed: +// // https://github.com/colin-kiegel/rust-derive-builder/issues/93 +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// peer_no: usize, +// topics: Vec, +// to_subscribe: bool, +// gs_config: Config, +// explicit: usize, +// outbound: usize, +// scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, +// data_transform: D, +// subscription_filter: F, +// } + +// impl InjectNodes +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// pub(crate) fn create_network(self) -> (Behaviour, Vec, Vec) { +// let keypair = libp2p_identity::Keypair::generate_ed25519(); +// // create a gossipsub struct +// let mut gs: Behaviour = Behaviour::new_with_subscription_filter_and_transform( +// MessageAuthenticity::Signed(keypair), +// self.gs_config, +// None, +// self.subscription_filter, +// self.data_transform, +// ) +// .unwrap(); + +// if let Some((scoring_params, scoring_thresholds)) = self.scoring { +// gs.with_peer_score(scoring_params, scoring_thresholds) +// .unwrap(); +// } + +// let mut topic_hashes = vec![]; + +// // subscribe to the topics +// for t in self.topics { +// let topic = Topic::new(t); +// gs.subscribe(&topic).unwrap(); +// topic_hashes.push(topic.hash().clone()); +// } + +// // build and connect peer_no random peers +// let mut peers = vec![]; + +// let empty = vec![]; +// for i in 0..self.peer_no { +// peers.push(add_peer( +// &mut gs, +// if self.to_subscribe { +// &topic_hashes +// } else { +// &empty +// }, +// i < self.outbound, +// i < self.explicit, +// )); +// } + +// (gs, peers, topic_hashes) +// } + +// fn peer_no(mut self, peer_no: usize) -> Self { +// self.peer_no = peer_no; +// self +// } + +// fn topics(mut self, topics: Vec) -> Self { +// self.topics = topics; +// self +// } + +// #[allow(clippy::wrong_self_convention)] +// fn to_subscribe(mut self, to_subscribe: bool) -> Self { +// self.to_subscribe = to_subscribe; +// self +// } + +// fn gs_config(mut self, gs_config: Config) -> Self { +// self.gs_config = gs_config; +// self +// } + +// fn explicit(mut self, explicit: usize) -> Self { +// self.explicit = explicit; +// self +// } + +// fn outbound(mut self, outbound: usize) -> Self { +// self.outbound = outbound; +// self +// } + +// fn scoring(mut self, scoring: Option<(PeerScoreParams, PeerScoreThresholds)>) -> Self { +// self.scoring = scoring; +// self +// } + +// fn subscription_filter(mut self, subscription_filter: F) -> Self { +// self.subscription_filter = subscription_filter; +// self +// } +// } + +// fn inject_nodes() -> InjectNodes +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// InjectNodes::default() +// } + +// fn inject_nodes1() -> InjectNodes { +// InjectNodes::::default() +// } + +// // helper functions for testing + +// fn add_peer( +// gs: &mut Behaviour, +// topic_hashes: &[TopicHash], +// outbound: bool, +// explicit: bool, +// ) -> PeerId +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) +// } + +// fn add_peer_with_addr( +// gs: &mut Behaviour, +// topic_hashes: &[TopicHash], +// outbound: bool, +// explicit: bool, +// address: Multiaddr, +// ) -> PeerId +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// add_peer_with_addr_and_kind( +// gs, +// topic_hashes, +// outbound, +// explicit, +// address, +// Some(PeerKind::Gossipsubv1_1), +// ) +// } + +// fn add_peer_with_addr_and_kind( +// gs: &mut Behaviour, +// topic_hashes: &[TopicHash], +// outbound: bool, +// explicit: bool, +// address: Multiaddr, +// kind: Option, +// ) -> PeerId +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// let peer = PeerId::random(); +// let endpoint = if outbound { +// ConnectedPoint::Dialer { +// address, +// role_override: Endpoint::Dialer, +// } +// } else { +// ConnectedPoint::Listener { +// local_addr: Multiaddr::empty(), +// send_back_addr: address, +// } +// }; + +// gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { +// peer_id: peer, +// connection_id: ConnectionId::new_unchecked(0), +// endpoint: &endpoint, +// failed_addresses: &[], +// other_established: 0, // first connection +// })); +// if let Some(kind) = kind { +// gs.on_connection_handler_event( +// peer, +// ConnectionId::new_unchecked(0), +// HandlerEvent::PeerKind(kind), +// ); +// } +// if explicit { +// gs.add_explicit_peer(&peer); +// } +// if !topic_hashes.is_empty() { +// gs.handle_received_subscriptions( +// &topic_hashes +// .iter() +// .cloned() +// .map(|t| Subscription { +// action: SubscriptionAction::Subscribe, +// topic_hash: t, +// }) +// .collect::>(), +// &peer, +// ); +// } +// peer +// } + +// fn disconnect_peer(gs: &mut Behaviour, peer_id: &PeerId) +// where +// D: DataTransform + Default + Clone + Send + 'static, +// F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +// { +// if let Some(peer_connections) = gs.connected_peers.get(peer_id) { +// let fake_endpoint = ConnectedPoint::Dialer { +// address: Multiaddr::empty(), +// role_override: Endpoint::Dialer, +// }; // this is not relevant +// // peer_connections.connections should never be empty. + +// let mut active_connections = peer_connections.connections.len(); +// for connection_id in peer_connections.connections.clone() { +// active_connections = active_connections.checked_sub(1).unwrap(); + +// gs.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { +// peer_id: *peer_id, +// connection_id, +// endpoint: &fake_endpoint, +// remaining_established: active_connections, +// })); +// } +// } +// } + +// // Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. +// fn proto_to_message(rpc: &proto::RPC) -> Rpc { +// // Store valid messages. +// let mut messages = Vec::with_capacity(rpc.publish.len()); +// let rpc = rpc.clone(); +// for message in rpc.publish.into_iter() { +// messages.push(RawMessage { +// source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), +// data: message.data.unwrap_or_default(), +// sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application +// topic: TopicHash::from_raw(message.topic), +// signature: message.signature, // don't inform the application +// key: None, +// validated: false, +// }); +// } +// let mut control_msgs = Vec::new(); +// if let Some(rpc_control) = rpc.control { +// // Collect the gossipsub control messages +// let ihave_msgs: Vec = rpc_control +// .ihave +// .into_iter() +// .map(|ihave| ControlAction::IHave { +// topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), +// message_ids: ihave +// .message_ids +// .into_iter() +// .map(MessageId::from) +// .collect::>(), +// }) +// .collect(); + +// let iwant_msgs: Vec = rpc_control +// .iwant +// .into_iter() +// .map(|iwant| ControlAction::IWant { +// message_ids: iwant +// .message_ids +// .into_iter() +// .map(MessageId::from) +// .collect::>(), +// }) +// .collect(); + +// let graft_msgs: Vec = rpc_control +// .graft +// .into_iter() +// .map(|graft| ControlAction::Graft { +// topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), +// }) +// .collect(); + +// let mut prune_msgs = Vec::new(); + +// for prune in rpc_control.prune { +// // filter out invalid peers +// let peers = prune +// .peers +// .into_iter() +// .filter_map(|info| { +// info.peer_id +// .and_then(|id| PeerId::from_bytes(&id).ok()) +// .map(|peer_id| +// //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 +// PeerInfo { +// peer_id: Some(peer_id), +// }) +// }) +// .collect::>(); + +// let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); +// prune_msgs.push(ControlAction::Prune { +// topic_hash, +// peers, +// backoff: prune.backoff, +// }); +// } + +// control_msgs.extend(ihave_msgs); +// control_msgs.extend(iwant_msgs); +// control_msgs.extend(graft_msgs); +// control_msgs.extend(prune_msgs); +// } + +// Rpc { +// messages, +// subscriptions: rpc +// .subscriptions +// .into_iter() +// .map(|sub| Subscription { +// action: if Some(true) == sub.subscribe { +// SubscriptionAction::Subscribe +// } else { +// SubscriptionAction::Unsubscribe +// }, +// topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), +// }) +// .collect(), +// control_msgs, +// } +// } + +// #[test] +// /// Test local node subscribing to a topic +// fn test_subscribe() { +// // The node should: +// // - Create an empty vector in mesh[topic] +// // - Send subscription request to all peers +// // - run JOIN(topic) + +// let subscribe_topic = vec![String::from("test_subscribe")]; +// let (gs, _, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(subscribe_topic) +// .to_subscribe(true) +// .create_network(); + +// assert!( +// gs.mesh.contains_key(&topic_hashes[0]), +// "Subscribe should add a new entry to the mesh[topic] hashmap" +// ); + +// // collect all the subscriptions +// let subscriptions = gs +// .events +// .iter() +// .filter(|e| { +// matches!( +// e, +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Subscribe(_)), +// .. +// } +// ) +// }) +// .count(); + +// // we sent a subscribe to all known peers +// assert_eq!(subscriptions, 20); +// } + +// #[test] +// /// Test unsubscribe. +// fn test_unsubscribe() { +// // Unsubscribe should: +// // - Remove the mesh entry for topic +// // - Send UNSUBSCRIBE to all known peers +// // - Call Leave + +// let topic_strings = vec![String::from("topic1"), String::from("topic2")]; +// let topics = topic_strings +// .iter() +// .map(|t| Topic::new(t.clone())) +// .collect::>(); + +// // subscribe to topic_strings +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(topic_strings) +// .to_subscribe(true) +// .create_network(); + +// for topic_hash in &topic_hashes { +// assert!( +// gs.topic_peers.contains_key(topic_hash), +// "Topic_peers contain a topic entry" +// ); +// assert!( +// gs.mesh.contains_key(topic_hash), +// "mesh should contain a topic entry" +// ); +// } + +// // unsubscribe from both topics +// assert!( +// gs.unsubscribe(&topics[0]).unwrap(), +// "should be able to unsubscribe successfully from each topic", +// ); +// assert!( +// gs.unsubscribe(&topics[1]).unwrap(), +// "should be able to unsubscribe successfully from each topic", +// ); + +// // collect all the subscriptions +// let subscriptions = gs +// .events +// .iter() +// .fold(0, |collected_subscriptions, e| match e { +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Subscribe(_)), +// .. +// } => collected_subscriptions + 1, +// _ => collected_subscriptions, +// }); + +// // we sent a unsubscribe to all known peers, for two topics +// assert_eq!(subscriptions, 40); + +// // check we clean up internal structures +// for topic_hash in &topic_hashes { +// assert!( +// !gs.mesh.contains_key(topic_hash), +// "All topics should have been removed from the mesh" +// ); +// } +// } + +// #[test] +// /// Test JOIN(topic) functionality. +// fn test_join() { +// // The Join function should: +// // - Remove peers from fanout[topic] +// // - Add any fanout[topic] peers to the mesh (up to mesh_n) +// // - Fill up to mesh_n peers from known gossipsub peers in the topic +// // - Send GRAFT messages to all nodes added to the mesh + +// // This test is not an isolated unit test, rather it uses higher level, +// // subscribe/unsubscribe to perform the test. + +// let topic_strings = vec![String::from("topic1"), String::from("topic2")]; +// let topics = topic_strings +// .iter() +// .map(|t| Topic::new(t.clone())) +// .collect::>(); + +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(topic_strings) +// .to_subscribe(true) +// .create_network(); + +// // unsubscribe, then call join to invoke functionality +// assert!( +// gs.unsubscribe(&topics[0]).unwrap(), +// "should be able to unsubscribe successfully" +// ); +// assert!( +// gs.unsubscribe(&topics[1]).unwrap(), +// "should be able to unsubscribe successfully" +// ); + +// // re-subscribe - there should be peers associated with the topic +// assert!( +// gs.subscribe(&topics[0]).unwrap(), +// "should be able to subscribe successfully" +// ); + +// // should have added mesh_n nodes to the mesh +// assert!( +// gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, +// "Should have added 6 nodes to the mesh" +// ); + +// fn collect_grafts( +// mut collected_grafts: Vec, +// (_, controls): (&PeerId, &Vec), +// ) -> Vec { +// for c in controls.iter() { +// if let ControlAction::Graft { topic_hash: _ } = c { +// collected_grafts.push(c.clone()) +// } +// } +// collected_grafts +// } + +// // there should be mesh_n GRAFT messages. +// let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + +// assert_eq!( +// graft_messages.len(), +// 6, +// "There should be 6 grafts messages sent to peers" +// ); + +// // verify fanout nodes +// // add 3 random peers to the fanout[topic1] +// gs.fanout +// .insert(topic_hashes[1].clone(), Default::default()); +// let mut new_peers: Vec = vec![]; +// for _ in 0..3 { +// let random_peer = PeerId::random(); +// // inform the behaviour of a new peer +// gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { +// peer_id: random_peer, +// connection_id: ConnectionId::new_unchecked(0), +// endpoint: &ConnectedPoint::Dialer { +// address: "/ip4/127.0.0.1".parse::().unwrap(), +// role_override: Endpoint::Dialer, +// }, +// failed_addresses: &[], +// other_established: 0, +// })); + +// // add the new peer to the fanout +// let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); +// fanout_peers.insert(random_peer); +// new_peers.push(random_peer); +// } + +// // subscribe to topic1 +// gs.subscribe(&topics[1]).unwrap(); + +// // the three new peers should have been added, along with 3 more from the pool. +// assert!( +// gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, +// "Should have added 6 nodes to the mesh" +// ); +// let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); +// for new_peer in new_peers { +// assert!( +// mesh_peers.contains(&new_peer), +// "Fanout peer should be included in the mesh" +// ); +// } + +// // there should now be 12 graft messages to be sent +// let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + +// assert!( +// graft_messages.len() == 12, +// "There should be 12 grafts messages sent to peers" +// ); +// } + +// /// Test local node publish to subscribed topic +// #[test] +// fn test_publish_without_flood_publishing() { +// // node should: +// // - Send publish message to all peers +// // - Insert message into gs.mcache and gs.received + +// //turn off flood publish to test old behaviour +// let config = ConfigBuilder::default() +// .flood_publish(false) +// .build() +// .unwrap(); + +// let publish_topic = String::from("test_publish"); +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![publish_topic.clone()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// assert!( +// gs.mesh.contains_key(&topic_hashes[0]), +// "Subscribe should add a new entry to the mesh[topic] hashmap" +// ); + +// // all peers should be subscribed to the topic +// assert_eq!( +// gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()), +// Some(20), +// "Peers should be subscribed to the topic" +// ); + +// // publish on topic +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new(publish_topic), publish_data).unwrap(); + +// // Collect all publish messages +// let publishes = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Publish(message)), +// .. +// } => { +// collected_publish.push(message); +// collected_publish +// } +// _ => collected_publish, +// }); + +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform( +// publishes +// .first() +// .expect("Should contain > 0 entries") +// .clone(), +// ) +// .unwrap(); + +// let msg_id = gs.config.message_id(message); + +// let config: Config = Config::default(); +// assert_eq!( +// publishes.len(), +// config.mesh_n_low(), +// "Should send a publish message to all known peers" +// ); + +// assert!( +// gs.mcache.get(&msg_id).is_some(), +// "Message cache should contain published message" +// ); +// } + +// /// Test local node publish to unsubscribed topic +// #[test] +// fn test_fanout() { +// // node should: +// // - Populate fanout peers +// // - Send publish message to fanout peers +// // - Insert message into gs.mcache and gs.received + +// //turn off flood publish to test fanout behaviour +// let config = ConfigBuilder::default() +// .flood_publish(false) +// .build() +// .unwrap(); + +// let fanout_topic = String::from("test_fanout"); +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![fanout_topic.clone()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// assert!( +// gs.mesh.contains_key(&topic_hashes[0]), +// "Subscribe should add a new entry to the mesh[topic] hashmap" +// ); +// // Unsubscribe from topic +// assert!( +// gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), +// "should be able to unsubscribe successfully from topic" +// ); + +// // Publish on unsubscribed topic +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new(fanout_topic.clone()), publish_data) +// .unwrap(); + +// assert_eq!( +// gs.fanout +// .get(&TopicHash::from_raw(fanout_topic)) +// .unwrap() +// .len(), +// gs.config.mesh_n(), +// "Fanout should contain `mesh_n` peers for fanout topic" +// ); + +// // Collect all publish messages +// let publishes = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Publish(message)), +// .. +// } => { +// collected_publish.push(message); +// collected_publish +// } +// _ => collected_publish, +// }); + +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform( +// publishes +// .first() +// .expect("Should contain > 0 entries") +// .clone(), +// ) +// .unwrap(); + +// let msg_id = gs.config.message_id(message); + +// assert_eq!( +// publishes.len(), +// gs.config.mesh_n(), +// "Should send a publish message to `mesh_n` fanout peers" +// ); + +// assert!( +// gs.mcache.get(&msg_id).is_some(), +// "Message cache should contain published message" +// ); +// } + +// #[test] +// /// Test the gossipsub NetworkBehaviour peer connection logic. +// fn test_inject_connected() { +// let (gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1"), String::from("topic2")]) +// .to_subscribe(true) +// .create_network(); + +// // check that our subscriptions are sent to each of the peers +// // collect all the SendEvents +// let subscriptions = gs +// .events +// .into_iter() +// .filter_map(|e| match e { +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Subscribe(topic)), +// peer_id, +// .. +// } => Some((peer_id, topic)), +// _ => None, +// }) +// .fold( +// HashMap::>::new(), +// |mut subs, (peer, sub)| { +// let mut peer_subs = subs.remove(&peer).unwrap_or_default(); +// peer_subs.push(sub.into_string()); +// subs.insert(peer, peer_subs); +// subs +// }, +// ); + +// // check that there are two subscriptions sent to each peer +// for peer_subs in subscriptions.values() { +// assert!(peer_subs.contains(&String::from("topic1"))); +// assert!(peer_subs.contains(&String::from("topic2"))); +// assert_eq!(peer_subs.len(), 2); +// } + +// // check that there are 20 send events created +// assert_eq!(subscriptions.len(), 20); + +// // should add the new peers to `peer_topics` with an empty vec as a gossipsub node +// for peer in peers { +// let known_topics = gs.peer_topics.get(&peer).unwrap(); +// assert!( +// known_topics == &topic_hashes.iter().cloned().collect(), +// "The topics for each node should all topics" +// ); +// } +// } + +// #[test] +// /// Test subscription handling +// fn test_handle_received_subscriptions() { +// // For every subscription: +// // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. +// // - Add peer to topics_peer. +// // UNSUBSCRIBE - Remove topic from peer_topics for peer. +// // - Remove peer from topic_peers. + +// let topics = ["topic1", "topic2", "topic3", "topic4"] +// .iter() +// .map(|&t| String::from(t)) +// .collect(); +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(topics) +// .to_subscribe(false) +// .create_network(); + +// // The first peer sends 3 subscriptions and 1 unsubscription +// let mut subscriptions = topic_hashes[..3] +// .iter() +// .map(|topic_hash| Subscription { +// action: SubscriptionAction::Subscribe, +// topic_hash: topic_hash.clone(), +// }) +// .collect::>(); + +// subscriptions.push(Subscription { +// action: SubscriptionAction::Unsubscribe, +// topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), +// }); + +// let unknown_peer = PeerId::random(); +// // process the subscriptions +// // first and second peers send subscriptions +// gs.handle_received_subscriptions(&subscriptions, &peers[0]); +// gs.handle_received_subscriptions(&subscriptions, &peers[1]); +// // unknown peer sends the same subscriptions +// gs.handle_received_subscriptions(&subscriptions, &unknown_peer); + +// // verify the result + +// let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); +// assert!( +// peer_topics == topic_hashes.iter().take(3).cloned().collect(), +// "First peer should be subscribed to three topics" +// ); +// let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); +// assert!( +// peer_topics == topic_hashes.iter().take(3).cloned().collect(), +// "Second peer should be subscribed to three topics" +// ); + +// assert!( +// !gs.peer_topics.contains_key(&unknown_peer), +// "Unknown peer should not have been added" +// ); + +// for topic_hash in topic_hashes[..3].iter() { +// let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); +// assert!( +// topic_peers == peers[..2].iter().cloned().collect(), +// "Two peers should be added to the first three topics" +// ); +// } + +// // Peer 0 unsubscribes from the first topic + +// gs.handle_received_subscriptions( +// &[Subscription { +// action: SubscriptionAction::Unsubscribe, +// topic_hash: topic_hashes[0].clone(), +// }], +// &peers[0], +// ); + +// let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); +// assert!( +// peer_topics == topic_hashes[1..3].iter().cloned().collect(), +// "Peer should be subscribed to two topics" +// ); + +// let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment +// assert!( +// topic_peers == peers[1..2].iter().cloned().collect(), +// "Only the second peers should be in the first topic" +// ); +// } + +// #[test] +// /// Test Gossipsub.get_random_peers() function +// fn test_get_random_peers() { +// // generate a default Config +// let gs_config = ConfigBuilder::default() +// .validation_mode(ValidationMode::Anonymous) +// .build() +// .unwrap(); +// // create a gossipsub struct +// let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); + +// // create a topic and fill it with some peers +// let topic_hash = Topic::new("Test").hash(); +// let mut peers = vec![]; +// for _ in 0..20 { +// peers.push(PeerId::random()) +// } + +// gs.topic_peers +// .insert(topic_hash.clone(), peers.iter().cloned().collect()); + +// gs.connected_peers = peers +// .iter() +// .map(|p| { +// ( +// *p, +// PeerConnections { +// kind: PeerKind::Gossipsubv1_1, +// connections: vec![ConnectionId::new_unchecked(0)], +// }, +// ) +// }) +// .collect(); + +// let random_peers = +// get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { +// true +// }); +// assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); +// let random_peers = get_random_peers( +// &gs.topic_peers, +// &gs.connected_peers, +// &topic_hash, +// 30, +// |_| true, +// ); +// assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); +// assert!( +// random_peers == peers.iter().cloned().collect(), +// "Expected no shuffling" +// ); +// let random_peers = get_random_peers( +// &gs.topic_peers, +// &gs.connected_peers, +// &topic_hash, +// 20, +// |_| true, +// ); +// assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); +// assert!( +// random_peers == peers.iter().cloned().collect(), +// "Expected no shuffling" +// ); +// let random_peers = +// get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { +// true +// }); +// assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); +// // test the filter +// let random_peers = +// get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { +// false +// }); +// assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); +// let random_peers = get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { +// |peer| peers.contains(peer) +// }); +// assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); +// } + +// /// Tests that the correct message is sent when a peer asks for a message in our cache. +// #[test] +// fn test_handle_iwant_msg_cached() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(20) +// .topics(Vec::new()) +// .to_subscribe(true) +// .create_network(); + +// let raw_message = RawMessage { +// source: Some(peers[11]), +// data: vec![1, 2, 3, 4], +// sequence_number: Some(1u64), +// topic: TopicHash::from_raw("topic"), +// signature: None, +// key: None, +// validated: true, +// }; + +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform(raw_message.clone()) +// .unwrap(); + +// let msg_id = gs.config.message_id(message); +// gs.mcache.put(&msg_id, raw_message); + +// gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + +// // the messages we are sending +// let sent_messages = gs.events.into_iter().fold( +// Vec::::new(), +// |mut collected_messages, e| match e { +// ToSwarm::NotifyHandler { event, .. } => { +// if let HandlerIn::Message(RpcOut::Forward(message)) = event { +// collected_messages.push(message); +// } +// collected_messages +// } +// _ => collected_messages, +// }, +// ); + +// assert!( +// sent_messages +// .iter() +// .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) +// .any(|msg| gs.config.message_id(&msg) == msg_id), +// "Expected the cached message to be sent to an IWANT peer" +// ); +// } + +// /// Tests that messages are sent correctly depending on the shifting of the message cache. +// #[test] +// fn test_handle_iwant_msg_cached_shifted() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(20) +// .topics(Vec::new()) +// .to_subscribe(true) +// .create_network(); + +// // perform 10 memshifts and check that it leaves the cache +// for shift in 1..10 { +// let raw_message = RawMessage { +// source: Some(peers[11]), +// data: vec![1, 2, 3, 4], +// sequence_number: Some(shift), +// topic: TopicHash::from_raw("topic"), +// signature: None, +// key: None, +// validated: true, +// }; + +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform(raw_message.clone()) +// .unwrap(); + +// let msg_id = gs.config.message_id(message); +// gs.mcache.put(&msg_id, raw_message); +// for _ in 0..shift { +// gs.mcache.shift(); +// } + +// gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + +// // is the message is being sent? +// let message_exists = gs.events.iter().any(|e| match e { +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Forward(message)), +// .. +// } => { +// gs.config.message_id( +// &gs.data_transform +// .inbound_transform(message.clone()) +// .unwrap(), +// ) == msg_id +// } +// _ => false, +// }); +// // default history_length is 5, expect no messages after shift > 5 +// if shift < 5 { +// assert!( +// message_exists, +// "Expected the cached message to be sent to an IWANT peer before 5 shifts" +// ); +// } else { +// assert!( +// !message_exists, +// "Expected the cached message to not be sent to an IWANT peer after 5 shifts" +// ); +// } +// } +// } + +// #[test] +// // tests that an event is not created when a peers asks for a message not in our cache +// fn test_handle_iwant_msg_not_cached() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(20) +// .topics(Vec::new()) +// .to_subscribe(true) +// .create_network(); + +// let events_before = gs.events.len(); +// gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); +// let events_after = gs.events.len(); + +// assert_eq!( +// events_before, events_after, +// "Expected event count to stay the same" +// ); +// } + +// #[test] +// // tests that an event is created when a peer shares that it has a message we want +// fn test_handle_ihave_subscribed_and_msg_not_cached() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); + +// gs.handle_ihave( +// &peers[7], +// vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], +// ); + +// // check that we sent an IWANT request for `unknown id` +// let iwant_exists = match gs.control_pool.get(&peers[7]) { +// Some(controls) => controls.iter().any(|c| match c { +// ControlAction::IWant { message_ids } => message_ids +// .iter() +// .any(|m| *m == MessageId::new(b"unknown id")), +// _ => false, +// }), +// _ => false, +// }; + +// assert!( +// iwant_exists, +// "Expected to send an IWANT control message for unknown message id" +// ); +// } + +// #[test] +// // tests that an event is not created when a peer shares that it has a message that +// // we already have +// fn test_handle_ihave_subscribed_and_msg_cached() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); + +// let msg_id = MessageId::new(b"known id"); + +// let events_before = gs.events.len(); +// gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); +// let events_after = gs.events.len(); + +// assert_eq!( +// events_before, events_after, +// "Expected event count to stay the same" +// ) +// } + +// #[test] +// // test that an event is not created when a peer shares that it has a message in +// // a topic that we are not subscribed to +// fn test_handle_ihave_not_subscribed() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(20) +// .topics(vec![]) +// .to_subscribe(true) +// .create_network(); + +// let events_before = gs.events.len(); +// gs.handle_ihave( +// &peers[7], +// vec![( +// TopicHash::from_raw(String::from("unsubscribed topic")), +// vec![MessageId::new(b"irrelevant id")], +// )], +// ); +// let events_after = gs.events.len(); + +// assert_eq!( +// events_before, events_after, +// "Expected event count to stay the same" +// ) +// } + +// #[test] +// // tests that a peer is added to our mesh when we are both subscribed +// // to the same topic +// fn test_handle_graft_is_subscribed() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); + +// gs.handle_graft(&peers[7], topic_hashes.clone()); + +// assert!( +// gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), +// "Expected peer to have been added to mesh" +// ); +// } + +// #[test] +// // tests that a peer is not added to our mesh when they are subscribed to +// // a topic that we are not +// fn test_handle_graft_is_not_subscribed() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); + +// gs.handle_graft( +// &peers[7], +// vec![TopicHash::from_raw(String::from("unsubscribed topic"))], +// ); + +// assert!( +// !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), +// "Expected peer to have been added to mesh" +// ); +// } + +// #[test] +// // tests multiple topics in a single graft message +// fn test_handle_graft_multiple_topics() { +// let topics: Vec = ["topic1", "topic2", "topic3", "topic4"] +// .iter() +// .map(|&t| String::from(t)) +// .collect(); + +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(topics) +// .to_subscribe(true) +// .create_network(); + +// let mut their_topics = topic_hashes.clone(); +// // their_topics = [topic1, topic2, topic3] +// // our_topics = [topic1, topic2, topic4] +// their_topics.pop(); +// gs.leave(&their_topics[2]); + +// gs.handle_graft(&peers[7], their_topics.clone()); + +// for hash in topic_hashes.iter().take(2) { +// assert!( +// gs.mesh.get(hash).unwrap().contains(&peers[7]), +// "Expected peer to be in the mesh for the first 2 topics" +// ); +// } + +// assert!( +// !gs.mesh.contains_key(&topic_hashes[2]), +// "Expected the second topic to not be in the mesh" +// ); +// } + +// #[test] +// // tests that a peer is removed from our mesh +// fn test_handle_prune_peer_in_mesh() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(20) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); + +// // insert peer into our mesh for 'topic1' +// gs.mesh +// .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); +// assert!( +// gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), +// "Expected peer to be in mesh" +// ); + +// gs.handle_prune( +// &peers[7], +// topic_hashes +// .iter() +// .map(|h| (h.clone(), vec![], None)) +// .collect(), +// ); +// assert!( +// !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), +// "Expected peer to be removed from mesh" +// ); +// } + +// fn count_control_msgs( +// gs: &Behaviour, +// mut filter: impl FnMut(&PeerId, &ControlAction) -> bool, +// ) -> usize { +// gs.control_pool +// .iter() +// .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) +// .sum::() +// + gs.events +// .iter() +// .filter(|e| match e { +// ToSwarm::NotifyHandler { +// peer_id, +// event: HandlerIn::Message(RpcOut::Control(action)), +// .. +// } => filter(peer_id, action), +// _ => false, +// }) +// .count() +// } + +// fn flush_events(gs: &mut Behaviour) { +// gs.control_pool.clear(); +// gs.events.clear(); +// } + +// #[test] +// // tests that a peer added as explicit peer gets connected to +// fn test_explicit_peer_gets_connected() { +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(0) +// .topics(Vec::new()) +// .to_subscribe(true) +// .create_network(); + +// //create new peer +// let peer = PeerId::random(); + +// //add peer as explicit peer +// gs.add_explicit_peer(&peer); + +// let num_events = gs +// .events +// .iter() +// .filter(|e| match e { +// ToSwarm::Dial { opts } => opts.get_peer_id() == Some(peer), +// _ => false, +// }) +// .count(); + +// assert_eq!( +// num_events, 1, +// "There was no dial peer event for the explicit peer" +// ); +// } + +// #[test] +// fn test_explicit_peer_reconnects() { +// let config = ConfigBuilder::default() +// .check_explicit_peers_ticks(2) +// .build() +// .unwrap(); +// let (mut gs, others, _) = inject_nodes1() +// .peer_no(1) +// .topics(Vec::new()) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// let peer = others.first().unwrap(); + +// //add peer as explicit peer +// gs.add_explicit_peer(peer); + +// flush_events(&mut gs); + +// //disconnect peer +// disconnect_peer(&mut gs, peer); + +// gs.heartbeat(); + +// //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` +// assert_eq!( +// gs.events +// .iter() +// .filter(|e| match e { +// ToSwarm::Dial { opts } => opts.get_peer_id() == Some(*peer), +// _ => false, +// }) +// .count(), +// 0, +// "There was a dial peer event before explicit_peer_ticks heartbeats" +// ); + +// gs.heartbeat(); + +// //check that there is a reconnect after second heartbeat +// assert!( +// gs.events +// .iter() +// .filter(|e| match e { +// ToSwarm::Dial { opts } => opts.get_peer_id() == Some(*peer), +// _ => false, +// }) +// .count() +// >= 1, +// "There was no dial peer event for the explicit peer" +// ); +// } + +// #[test] +// fn test_handle_graft_explicit_peer() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(1) +// .topics(vec![String::from("topic1"), String::from("topic2")]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// let peer = peers.first().unwrap(); + +// gs.handle_graft(peer, topic_hashes.clone()); + +// //peer got not added to mesh +// assert!(gs.mesh[&topic_hashes[0]].is_empty()); +// assert!(gs.mesh[&topic_hashes[1]].is_empty()); + +// //check prunes +// assert!( +// count_control_msgs(&gs, |peer_id, m| peer_id == peer +// && match m { +// ControlAction::Prune { topic_hash, .. } => +// topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], +// _ => false, +// }) +// >= 2, +// "Not enough prunes sent when grafting from explicit peer" +// ); +// } + +// #[test] +// fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { +// let (gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(2) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// //only peer 1 is in the mesh not peer 0 (which is an explicit peer) +// assert_eq!( +// gs.mesh[&topic_hashes[0]], +// vec![peers[1]].into_iter().collect() +// ); + +// //assert that graft gets created to non-explicit peer +// assert!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] +// && matches!(m, ControlAction::Graft { .. })) +// >= 1, +// "No graft message got created to non-explicit peer" +// ); + +// //assert that no graft gets created to explicit peer +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && matches!(m, ControlAction::Graft { .. })), +// 0, +// "A graft message got created to an explicit peer" +// ); +// } + +// #[test] +// fn do_not_graft_explicit_peer() { +// let (mut gs, others, topic_hashes) = inject_nodes1() +// .peer_no(1) +// .topics(vec![String::from("topic")]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// gs.heartbeat(); + +// //mesh stays empty +// assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); + +// //assert that no graft gets created to explicit peer +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] +// && matches!(m, ControlAction::Graft { .. })), +// 0, +// "A graft message got created to an explicit peer" +// ); +// } + +// #[test] +// fn do_forward_messages_to_explicit_peers() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(2) +// .topics(vec![String::from("topic1"), String::from("topic2")]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// let local_id = PeerId::random(); + +// let message = RawMessage { +// source: Some(peers[1]), +// data: vec![12], +// sequence_number: Some(0), +// topic: topic_hashes[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; +// gs.handle_received_message(message.clone(), &local_id); + +// assert_eq!( +// gs.events +// .iter() +// .filter(|e| match e { +// ToSwarm::NotifyHandler { +// peer_id, +// event: HandlerIn::Message(RpcOut::Forward(m)), +// .. +// } => { +// peer_id == &peers[0] && m.data == message.data +// } +// _ => false, +// }) +// .count(), +// 1, +// "The message did not get forwarded to the explicit peer" +// ); +// } + +// #[test] +// fn explicit_peers_not_added_to_mesh_on_subscribe() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(2) +// .topics(Vec::new()) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// //create new topic, both peers subscribing to it but we do not subscribe to it +// let topic = Topic::new(String::from("t")); +// let topic_hash = topic.hash(); +// for peer in peers.iter().take(2) { +// gs.handle_received_subscriptions( +// &[Subscription { +// action: SubscriptionAction::Subscribe, +// topic_hash: topic_hash.clone(), +// }], +// peer, +// ); +// } + +// //subscribe now to topic +// gs.subscribe(&topic).unwrap(); + +// //only peer 1 is in the mesh not peer 0 (which is an explicit peer) +// assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + +// //assert that graft gets created to non-explicit peer +// assert!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] +// && matches!(m, ControlAction::Graft { .. })) +// > 0, +// "No graft message got created to non-explicit peer" +// ); + +// //assert that no graft gets created to explicit peer +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && matches!(m, ControlAction::Graft { .. })), +// 0, +// "A graft message got created to an explicit peer" +// ); +// } + +// #[test] +// fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(2) +// .topics(Vec::new()) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// //create new topic, both peers subscribing to it but we do not subscribe to it +// let topic = Topic::new(String::from("t")); +// let topic_hash = topic.hash(); +// for peer in peers.iter().take(2) { +// gs.handle_received_subscriptions( +// &[Subscription { +// action: SubscriptionAction::Subscribe, +// topic_hash: topic_hash.clone(), +// }], +// peer, +// ); +// } + +// //we send a message for this topic => this will initialize the fanout +// gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); + +// //subscribe now to topic +// gs.subscribe(&topic).unwrap(); + +// //only peer 1 is in the mesh not peer 0 (which is an explicit peer) +// assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + +// //assert that graft gets created to non-explicit peer +// assert!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] +// && matches!(m, ControlAction::Graft { .. })) +// >= 1, +// "No graft message got created to non-explicit peer" +// ); + +// //assert that no graft gets created to explicit peer +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && matches!(m, ControlAction::Graft { .. })), +// 0, +// "A graft message got created to an explicit peer" +// ); +// } + +// #[test] +// fn no_gossip_gets_sent_to_explicit_peers() { +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(2) +// .topics(vec![String::from("topic1"), String::from("topic2")]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(1) +// .create_network(); + +// let local_id = PeerId::random(); + +// let message = RawMessage { +// source: Some(peers[1]), +// data: vec![], +// sequence_number: Some(0), +// topic: topic_hashes[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// //forward the message +// gs.handle_received_message(message, &local_id); + +// //simulate multiple gossip calls (for randomness) +// for _ in 0..3 { +// gs.emit_gossip(); +// } + +// //assert that no gossip gets sent to explicit peer +// assert_eq!( +// gs.control_pool +// .get(&peers[0]) +// .unwrap_or(&Vec::new()) +// .iter() +// .filter(|m| matches!(m, ControlAction::IHave { .. })) +// .count(), +// 0, +// "Gossip got emitted to explicit peer" +// ); +// } + +// // Tests the mesh maintenance addition +// #[test] +// fn test_mesh_addition() { +// let config: Config = Config::default(); + +// // Adds mesh_low peers and PRUNE 2 giving us a deficit. +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n() + 1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; + +// for peer in peers.iter().take(to_remove_peers) { +// gs.handle_prune( +// peer, +// topics.iter().map(|h| (h.clone(), vec![], None)).collect(), +// ); +// } + +// // Verify the pruned peers are removed from the mesh. +// assert_eq!( +// gs.mesh.get(&topics[0]).unwrap().len(), +// config.mesh_n_low() - 1 +// ); + +// // run a heartbeat +// gs.heartbeat(); + +// // Peers should be added to reach mesh_n +// assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +// } + +// // Tests the mesh maintenance subtraction +// #[test] +// fn test_mesh_subtraction() { +// let config = Config::default(); + +// // Adds mesh_low peers and PRUNE 2 giving us a deficit. +// let n = config.mesh_n_high() + 10; +// //make all outbound connections so that we allow grafting to all +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(n) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .outbound(n) +// .create_network(); + +// // graft all the peers +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// // run a heartbeat +// gs.heartbeat(); + +// // Peers should be removed to reach mesh_n +// assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +// } + +// #[test] +// fn test_connect_to_px_peers_on_handle_prune() { +// let config: Config = Config::default(); + +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// //handle prune from single peer with px peers + +// let mut px = Vec::new(); +// //propose more px peers than config.prune_peers() +// for _ in 0..config.prune_peers() + 5 { +// px.push(PeerInfo { +// peer_id: Some(PeerId::random()), +// }); +// } + +// gs.handle_prune( +// &peers[0], +// vec![( +// topics[0].clone(), +// px.clone(), +// Some(config.prune_backoff().as_secs()), +// )], +// ); + +// //Check DialPeer events for px peers +// let dials: Vec<_> = gs +// .events +// .iter() +// .filter_map(|e| match e { +// ToSwarm::Dial { opts } => opts.get_peer_id(), +// _ => None, +// }) +// .collect(); + +// // Exactly config.prune_peers() many random peers should be dialled +// assert_eq!(dials.len(), config.prune_peers()); + +// let dials_set: HashSet<_> = dials.into_iter().collect(); + +// // No duplicates +// assert_eq!(dials_set.len(), config.prune_peers()); + +// //all dial peers must be in px +// assert!(dials_set.is_subset( +// &px.iter() +// .map(|i| *i.peer_id.as_ref().unwrap()) +// .collect::>() +// )); +// } + +// #[test] +// fn test_send_px_and_backoff_in_prune() { +// let config: Config = Config::default(); + +// //build mesh with enough peers for px +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.prune_peers() + 1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// //send prune to peer +// gs.send_graft_prune( +// HashMap::new(), +// vec![(peers[0], vec![topics[0].clone()])] +// .into_iter() +// .collect(), +// HashSet::new(), +// ); + +// //check prune message +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && match m { +// ControlAction::Prune { +// topic_hash, +// peers, +// backoff, +// } => +// topic_hash == &topics[0] && +// peers.len() == config.prune_peers() && +// //all peers are different +// peers.iter().collect::>().len() == +// config.prune_peers() && +// backoff.unwrap() == config.prune_backoff().as_secs(), +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_prune_backoffed_peer_on_graft() { +// let config: Config = Config::default(); + +// //build mesh with enough peers for px +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.prune_peers() + 1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// //remove peer from mesh and send prune to peer => this adds a backoff for this peer +// gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); +// gs.send_graft_prune( +// HashMap::new(), +// vec![(peers[0], vec![topics[0].clone()])] +// .into_iter() +// .collect(), +// HashSet::new(), +// ); + +// //ignore all messages until now +// gs.events.clear(); + +// //handle graft +// gs.handle_graft(&peers[0], vec![topics[0].clone()]); + +// //check prune message +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && match m { +// ControlAction::Prune { +// topic_hash, +// peers, +// backoff, +// } => +// topic_hash == &topics[0] && +// //no px in this case +// peers.is_empty() && +// backoff.unwrap() == config.prune_backoff().as_secs(), +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_do_not_graft_within_backoff_period() { +// let config = ConfigBuilder::default() +// .backoff_slack(1) +// .heartbeat_interval(Duration::from_millis(100)) +// .build() +// .unwrap(); +// //only one peer => mesh too small and will try to regraft as early as possible +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// //handle prune from peer with backoff of one second +// gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); + +// //forget all events until now +// flush_events(&mut gs); + +// //call heartbeat +// gs.heartbeat(); + +// //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). +// for _ in 0..10 { +// sleep(Duration::from_millis(100)); +// gs.heartbeat(); +// } + +// //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat +// // is needed). +// assert_eq!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), +// 0, +// "Graft message created too early within backoff period" +// ); + +// //Heartbeat one more time this should graft now +// sleep(Duration::from_millis(100)); +// gs.heartbeat(); + +// //check that graft got created +// assert!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, +// "No graft message was created after backoff period" +// ); +// } + +// #[test] +// fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { +// //set default backoff period to 1 second +// let config = ConfigBuilder::default() +// .prune_backoff(Duration::from_millis(90)) +// .backoff_slack(1) +// .heartbeat_interval(Duration::from_millis(100)) +// .build() +// .unwrap(); +// //only one peer => mesh too small and will try to regraft as early as possible +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// //handle prune from peer without a specified backoff +// gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); + +// //forget all events until now +// flush_events(&mut gs); + +// //call heartbeat +// gs.heartbeat(); + +// //Apply one more heartbeat +// sleep(Duration::from_millis(100)); +// gs.heartbeat(); + +// //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat +// // is needed). +// assert_eq!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), +// 0, +// "Graft message created too early within backoff period" +// ); + +// //Heartbeat one more time this should graft now +// sleep(Duration::from_millis(100)); +// gs.heartbeat(); + +// //check that graft got created +// assert!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, +// "No graft message was created after backoff period" +// ); +// } + +// #[test] +// fn test_unsubscribe_backoff() { +// const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); +// let config = ConfigBuilder::default() +// .backoff_slack(1) +// // ensure a prune_backoff > unsubscribe_backoff +// .prune_backoff(Duration::from_secs(5)) +// .unsubscribe_backoff(1) +// .heartbeat_interval(HEARTBEAT_INTERVAL) +// .build() +// .unwrap(); + +// let topic = String::from("test"); +// // only one peer => mesh too small and will try to regraft as early as possible +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec![topic.clone()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// let _ = gs.unsubscribe(&Topic::new(topic)); + +// assert_eq!( +// count_control_msgs(&gs, |_, m| match m { +// ControlAction::Prune { backoff, .. } => backoff == &Some(1), +// _ => false, +// }), +// 1, +// "Peer should be pruned with `unsubscribe_backoff`." +// ); + +// let _ = gs.subscribe(&Topic::new(topics[0].to_string())); + +// // forget all events until now +// flush_events(&mut gs); + +// // call heartbeat +// gs.heartbeat(); + +// // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). +// for _ in 0..10 { +// sleep(HEARTBEAT_INTERVAL); +// gs.heartbeat(); +// } + +// // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat +// // is needed). +// assert_eq!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })), +// 0, +// "Graft message created too early within backoff period" +// ); + +// // Heartbeat one more time this should graft now +// sleep(HEARTBEAT_INTERVAL); +// gs.heartbeat(); + +// // check that graft got created +// assert!( +// count_control_msgs(&gs, |_, m| matches!(m, ControlAction::Graft { .. })) > 0, +// "No graft message was created after backoff period" +// ); +// } + +// #[test] +// fn test_flood_publish() { +// let config: Config = Config::default(); + +// let topic = "test"; +// // Adds more peers than mesh can hold to test flood publishing +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(config.mesh_n_high() + 10) +// .topics(vec![topic.into()]) +// .to_subscribe(true) +// .create_network(); + +// //publish message +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new(topic), publish_data).unwrap(); + +// // Collect all publish messages +// let publishes = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { event, .. } => { +// if let HandlerIn::Message(RpcOut::Publish(message)) = event { +// collected_publish.push(message); +// } +// collected_publish +// } +// _ => collected_publish, +// }); + +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform( +// publishes +// .first() +// .expect("Should contain > 0 entries") +// .clone(), +// ) +// .unwrap(); + +// let msg_id = gs.config.message_id(message); + +// let config: Config = Config::default(); +// assert_eq!( +// publishes.len(), +// config.mesh_n_high() + 10, +// "Should send a publish message to all known peers" +// ); + +// assert!( +// gs.mcache.get(&msg_id).is_some(), +// "Message cache should contain published message" +// ); +// } + +// #[test] +// fn test_gossip_to_at_least_gossip_lazy_peers() { +// let config: Config = Config::default(); + +// //add more peers than in mesh to test gossipping +// //by default only mesh_n_low peers will get added to mesh +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) +// .topics(vec!["topic".into()]) +// .to_subscribe(true) +// .create_network(); + +// //receive message +// let raw_message = RawMessage { +// source: Some(PeerId::random()), +// data: vec![], +// sequence_number: Some(0), +// topic: topic_hashes[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; +// gs.handle_received_message(raw_message.clone(), &PeerId::random()); + +// //emit gossip +// gs.emit_gossip(); + +// // Transform the inbound message +// let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + +// let msg_id = gs.config.message_id(message); + +// //check that exactly config.gossip_lazy() many gossip messages were sent. +// assert_eq!( +// count_control_msgs(&gs, |_, action| match action { +// ControlAction::IHave { +// topic_hash, +// message_ids, +// } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), +// _ => false, +// }), +// config.gossip_lazy() +// ); +// } + +// #[test] +// fn test_gossip_to_at_most_gossip_factor_peers() { +// let config: Config = Config::default(); + +// //add a lot of peers +// let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; +// let (mut gs, _, topic_hashes) = inject_nodes1() +// .peer_no(m) +// .topics(vec!["topic".into()]) +// .to_subscribe(true) +// .create_network(); + +// //receive message +// let raw_message = RawMessage { +// source: Some(PeerId::random()), +// data: vec![], +// sequence_number: Some(0), +// topic: topic_hashes[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; +// gs.handle_received_message(raw_message.clone(), &PeerId::random()); + +// //emit gossip +// gs.emit_gossip(); + +// // Transform the inbound message +// let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + +// let msg_id = gs.config.message_id(message); +// //check that exactly config.gossip_lazy() many gossip messages were sent. +// assert_eq!( +// count_control_msgs(&gs, |_, action| match action { +// ControlAction::IHave { +// topic_hash, +// message_ids, +// } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), +// _ => false, +// }), +// ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize +// ); +// } + +// #[test] +// fn test_accept_only_outbound_peer_grafts_when_mesh_full() { +// let config: Config = Config::default(); + +// //enough peers to fill the mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// // graft all the peers => this will fill the mesh +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// //assert current mesh size +// assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); + +// //create an outbound and an inbound peer +// let inbound = add_peer(&mut gs, &topics, false, false); +// let outbound = add_peer(&mut gs, &topics, true, false); + +// //send grafts +// gs.handle_graft(&inbound, vec![topics[0].clone()]); +// gs.handle_graft(&outbound, vec![topics[0].clone()]); + +// //assert mesh size +// assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); + +// //inbound is not in mesh +// assert!(!gs.mesh[&topics[0]].contains(&inbound)); + +// //outbound is in mesh +// assert!(gs.mesh[&topics[0]].contains(&outbound)); +// } + +// #[test] +// fn test_do_not_remove_too_many_outbound_peers() { +// //use an extreme case to catch errors with high probability +// let m = 50; +// let n = 2 * m; +// let config = ConfigBuilder::default() +// .mesh_n_high(n) +// .mesh_n(n) +// .mesh_n_low(n) +// .mesh_outbound_min(m) +// .build() +// .unwrap(); + +// //fill the mesh with inbound connections +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(n) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .create_network(); + +// // graft all the peers +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// //create m outbound connections and graft (we will accept the graft) +// let mut outbound = HashSet::new(); +// for _ in 0..m { +// let peer = add_peer(&mut gs, &topics, true, false); +// outbound.insert(peer); +// gs.handle_graft(&peer, topics.clone()); +// } + +// //mesh is overly full +// assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); + +// // run a heartbeat +// gs.heartbeat(); + +// // Peers should be removed to reach n +// assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); + +// //all outbound peers are still in the mesh +// assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); +// } + +// #[test] +// fn test_add_outbound_peers_if_min_is_not_satisfied() { +// let config: Config = Config::default(); + +// // Fill full mesh with inbound peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// // graft all the peers +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// //create config.mesh_outbound_min() many outbound connections without grafting +// for _ in 0..config.mesh_outbound_min() { +// add_peer(&mut gs, &topics, true, false); +// } + +// // Nothing changed in the mesh yet +// assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); + +// // run a heartbeat +// gs.heartbeat(); + +// // The outbound peers got additionally added +// assert_eq!( +// gs.mesh[&topics[0]].len(), +// config.mesh_n_high() + config.mesh_outbound_min() +// ); +// } + +// #[test] +// fn test_prune_negative_scored_peers() { +// let config = Config::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// //add penalty to peer +// gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + +// //execute heartbeat +// gs.heartbeat(); + +// //peer should not be in mesh anymore +// assert!(gs.mesh[&topics[0]].is_empty()); + +// //check prune message +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] +// && match m { +// ControlAction::Prune { +// topic_hash, +// peers, +// backoff, +// } => +// topic_hash == &topics[0] && +// //no px in this case +// peers.is_empty() && +// backoff.unwrap() == config.prune_backoff().as_secs(), +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_dont_graft_to_negative_scored_peers() { +// let config = Config::default(); +// //init full mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// //add two additional peers that will not be part of the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //reduce score of p1 to negative +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); + +// //handle prunes of all other peers +// for p in peers { +// gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); +// } + +// //heartbeat +// gs.heartbeat(); + +// //assert that mesh only contains p2 +// assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); +// assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); +// } + +// ///Note that in this test also without a penalty the px would be ignored because of the +// /// acceptPXThreshold, but the spec still explicitly states the rule that px from negative +// /// peers should get ignored, therefore we test it here. +// #[test] +// fn test_ignore_px_from_negative_scored_peer() { +// let config = Config::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// //penalize peer +// gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + +// //handle prune from single peer with px peers +// let px = vec![PeerInfo { +// peer_id: Some(PeerId::random()), +// }]; + +// gs.handle_prune( +// &peers[0], +// vec![( +// topics[0].clone(), +// px, +// Some(config.prune_backoff().as_secs()), +// )], +// ); + +// //assert no dials +// assert_eq!( +// gs.events +// .iter() +// .filter(|e| matches!(e, ToSwarm::Dial { .. })) +// .count(), +// 0 +// ); +// } + +// #[test] +// fn test_only_send_nonnegative_scoring_peers_in_px() { +// let config = ConfigBuilder::default() +// .prune_peers(16) +// .do_px() +// .build() +// .unwrap(); + +// // Build mesh with three peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(3) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// // Penalize first peer +// gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + +// // Prune second peer +// gs.send_graft_prune( +// HashMap::new(), +// vec![(peers[1], vec![topics[0].clone()])] +// .into_iter() +// .collect(), +// HashSet::new(), +// ); + +// // Check that px in prune message only contains third peer +// assert_eq!( +// count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] +// && match m { +// ControlAction::Prune { +// topic_hash, +// peers: px, +// .. +// } => +// topic_hash == &topics[0] +// && px.len() == 1 +// && px[0].peer_id.as_ref().unwrap() == &peers[2], +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_do_not_gossip_to_peers_below_gossip_threshold() { +// let config = Config::default(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; + +// // Build full mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// // Graft all the peer +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// // Add two additional peers that will not be part of the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// // Reduce score of p1 below peer_score_thresholds.gossip_threshold +// // note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// // Receive message +// let raw_message = RawMessage { +// source: Some(PeerId::random()), +// data: vec![], +// sequence_number: Some(0), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; +// gs.handle_received_message(raw_message.clone(), &PeerId::random()); + +// // Transform the inbound message +// let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + +// let msg_id = gs.config.message_id(message); + +// // Emit gossip +// gs.emit_gossip(); + +// // Check that exactly one gossip messages got sent and it got sent to p2 +// assert_eq!( +// count_control_msgs(&gs, |peer, action| match action { +// ControlAction::IHave { +// topic_hash, +// message_ids, +// } => { +// if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { +// assert_eq!(peer, &p2); +// true +// } else { +// false +// } +// } +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { +// let config = Config::default(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; + +// // Build full mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// // Graft all the peer +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// // Add two additional peers that will not be part of the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// // Reduce score of p1 below peer_score_thresholds.gossip_threshold +// // note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// // Receive message +// let raw_message = RawMessage { +// source: Some(PeerId::random()), +// data: vec![], +// sequence_number: Some(0), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; +// gs.handle_received_message(raw_message.clone(), &PeerId::random()); + +// // Transform the inbound message +// let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + +// let msg_id = gs.config.message_id(message); + +// gs.handle_iwant(&p1, vec![msg_id.clone()]); +// gs.handle_iwant(&p2, vec![msg_id.clone()]); + +// // the messages we are sending +// let sent_messages = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_messages, e| match e { +// ToSwarm::NotifyHandler { event, peer_id, .. } => { +// if let HandlerIn::Message(RpcOut::Forward(message)) = event { +// collected_messages.push((peer_id, message)); +// } +// collected_messages +// } +// _ => collected_messages, +// }); + +// //the message got sent to p2 +// assert!(sent_messages +// .iter() +// .map(|(peer_id, msg)| ( +// peer_id, +// gs.data_transform.inbound_transform(msg.clone()).unwrap() +// )) +// .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); +// //the message got not sent to p1 +// assert!(sent_messages +// .iter() +// .map(|(peer_id, msg)| ( +// peer_id, +// gs.data_transform.inbound_transform(msg.clone()).unwrap() +// )) +// .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); +// } + +// #[test] +// fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { +// let config = Config::default(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; +// //build full mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// // graft all the peer +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// //add two additional peers that will not be part of the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //reduce score of p1 below peer_score_thresholds.gossip_threshold +// //note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// //message that other peers have +// let raw_message = RawMessage { +// source: Some(PeerId::random()), +// data: vec![], +// sequence_number: Some(0), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// // Transform the inbound message +// let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + +// let msg_id = gs.config.message_id(message); + +// gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); +// gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); + +// // check that we sent exactly one IWANT request to p2 +// assert_eq!( +// count_control_msgs(&gs, |peer, c| match c { +// ControlAction::IWant { message_ids } => +// if message_ids.iter().any(|m| m == &msg_id) { +// assert_eq!(peer, &p2); +// true +// } else { +// false +// }, +// _ => false, +// }), +// 1 +// ); +// } + +// #[test] +// fn test_do_not_publish_to_peer_below_publish_threshold() { +// let config = ConfigBuilder::default() +// .flood_publish(false) +// .build() +// .unwrap(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, +// publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; + +// //build mesh with no peers and no subscribed topics +// let (mut gs, _, _) = inject_nodes1() +// .gs_config(config) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// //create a new topic for which we are not subscribed +// let topic = Topic::new("test"); +// let topics = vec![topic.hash()]; + +// //add two additional peers that will be added to the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //reduce score of p1 below peer_score_thresholds.publish_threshold +// //note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// //a heartbeat will remove the peers from the mesh +// gs.heartbeat(); + +// // publish on topic +// let publish_data = vec![0; 42]; +// gs.publish(topic, publish_data).unwrap(); + +// // Collect all publish messages +// let publishes = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { event, peer_id, .. } => { +// if let HandlerIn::Message(RpcOut::Publish(message)) = event { +// collected_publish.push((peer_id, message)); +// } +// collected_publish +// } +// _ => collected_publish, +// }); + +// //assert only published to p2 +// assert_eq!(publishes.len(), 1); +// assert_eq!(publishes[0].0, p2); +// } + +// #[test] +// fn test_do_not_flood_publish_to_peer_below_publish_threshold() { +// let config = Config::default(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, +// publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; +// //build mesh with no peers +// let (mut gs, _, topics) = inject_nodes1() +// .topics(vec!["test".into()]) +// .gs_config(config) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// //add two additional peers that will be added to the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //reduce score of p1 below peer_score_thresholds.publish_threshold +// //note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// //a heartbeat will remove the peers from the mesh +// gs.heartbeat(); + +// // publish on topic +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new("test"), publish_data).unwrap(); + +// // Collect all publish messages +// let publishes = gs +// .events +// .into_iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { event, peer_id, .. } => { +// if let HandlerIn::Message(RpcOut::Publish(message)) = event { +// collected_publish.push((peer_id, message)); +// } +// collected_publish +// } +// _ => collected_publish, +// }); + +// //assert only published to p2 +// assert_eq!(publishes.len(), 1); +// assert!(publishes[0].0 == p2); +// } + +// #[test] +// fn test_ignore_rpc_from_peers_below_graylist_threshold() { +// let config = Config::default(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, +// publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, +// graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, +// ..PeerScoreThresholds::default() +// }; + +// //build mesh with no peers +// let (mut gs, _, topics) = inject_nodes1() +// .topics(vec!["test".into()]) +// .gs_config(config.clone()) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// //add two additional peers that will be added to the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //reduce score of p1 below peer_score_thresholds.graylist_threshold +// //note that penalties get squared so two penalties means a score of +// // 4 * peer_score_params.behaviour_penalty_weight. +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + +// //reduce score of p2 below publish_threshold but not below graylist_threshold +// gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// let raw_message1 = RawMessage { +// source: Some(PeerId::random()), +// data: vec![1, 2, 3, 4], +// sequence_number: Some(1u64), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// let raw_message2 = RawMessage { +// source: Some(PeerId::random()), +// data: vec![1, 2, 3, 4, 5], +// sequence_number: Some(2u64), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// let raw_message3 = RawMessage { +// source: Some(PeerId::random()), +// data: vec![1, 2, 3, 4, 5, 6], +// sequence_number: Some(3u64), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// let raw_message4 = RawMessage { +// source: Some(PeerId::random()), +// data: vec![1, 2, 3, 4, 5, 6, 7], +// sequence_number: Some(4u64), +// topic: topics[0].clone(), +// signature: None, +// key: None, +// validated: true, +// }; + +// // Transform the inbound message +// let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); + +// // Transform the inbound message +// let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); + +// let subscription = Subscription { +// action: SubscriptionAction::Subscribe, +// topic_hash: topics[0].clone(), +// }; + +// let control_action = ControlAction::IHave { +// topic_hash: topics[0].clone(), +// message_ids: vec![config.message_id(message2)], +// }; + +// //clear events +// gs.events.clear(); + +// //receive from p1 +// gs.on_connection_handler_event( +// p1, +// ConnectionId::new_unchecked(0), +// HandlerEvent::Message { +// rpc: Rpc { +// messages: vec![raw_message1], +// subscriptions: vec![subscription.clone()], +// control_msgs: vec![control_action], +// }, +// invalid_messages: Vec::new(), +// }, +// ); + +// //only the subscription event gets processed, the rest is dropped +// assert_eq!(gs.events.len(), 1); +// assert!(matches!( +// gs.events[0], +// ToSwarm::GenerateEvent(Event::Subscribed { .. }) +// )); + +// let control_action = ControlAction::IHave { +// topic_hash: topics[0].clone(), +// message_ids: vec![config.message_id(message4)], +// }; + +// //receive from p2 +// gs.on_connection_handler_event( +// p2, +// ConnectionId::new_unchecked(0), +// HandlerEvent::Message { +// rpc: Rpc { +// messages: vec![raw_message3], +// subscriptions: vec![subscription], +// control_msgs: vec![control_action], +// }, +// invalid_messages: Vec::new(), +// }, +// ); + +// //events got processed +// assert!(gs.events.len() > 1); +// } + +// #[test] +// fn test_ignore_px_from_peers_below_accept_px_threshold() { +// let config = ConfigBuilder::default().prune_peers(16).build().unwrap(); +// let peer_score_params = PeerScoreParams::default(); +// let peer_score_thresholds = PeerScoreThresholds { +// accept_px_threshold: peer_score_params.app_specific_weight, +// ..PeerScoreThresholds::default() +// }; +// // Build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// // Decrease score of first peer to less than accept_px_threshold +// gs.set_application_score(&peers[0], 0.99); + +// // Increase score of second peer to accept_px_threshold +// gs.set_application_score(&peers[1], 1.0); + +// // Handle prune from peer peers[0] with px peers +// let px = vec![PeerInfo { +// peer_id: Some(PeerId::random()), +// }]; +// gs.handle_prune( +// &peers[0], +// vec![( +// topics[0].clone(), +// px, +// Some(config.prune_backoff().as_secs()), +// )], +// ); + +// // Assert no dials +// assert_eq!( +// gs.events +// .iter() +// .filter(|e| matches!(e, ToSwarm::Dial { .. })) +// .count(), +// 0 +// ); + +// //handle prune from peer peers[1] with px peers +// let px = vec![PeerInfo { +// peer_id: Some(PeerId::random()), +// }]; +// gs.handle_prune( +// &peers[1], +// vec![( +// topics[0].clone(), +// px, +// Some(config.prune_backoff().as_secs()), +// )], +// ); + +// //assert there are dials now +// assert!( +// gs.events +// .iter() +// .filter(|e| matches!(e, ToSwarm::Dial { .. })) +// .count() +// > 0 +// ); +// } + +// #[test] +// fn test_keep_best_scoring_peers_on_oversubscription() { +// let config = ConfigBuilder::default() +// .mesh_n_low(15) +// .mesh_n(30) +// .mesh_n_high(60) +// .retain_scores(29) +// .build() +// .unwrap(); + +// //build mesh with more peers than mesh can hold +// let n = config.mesh_n_high() + 1; +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(n) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(n) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// // graft all, will be accepted since the are outbound +// for peer in &peers { +// gs.handle_graft(peer, topics.clone()); +// } + +// //assign scores to peers equalling their index + +// //set random positive scores +// for (index, peer) in peers.iter().enumerate() { +// gs.set_application_score(peer, index as f64); +// } + +// assert_eq!(gs.mesh[&topics[0]].len(), n); + +// //heartbeat to prune some peers +// gs.heartbeat(); + +// assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); + +// //mesh contains retain_scores best peers +// assert!(gs.mesh[&topics[0]].is_superset( +// &peers[(n - config.retain_scores())..] +// .iter() +// .cloned() +// .collect() +// )); +// } + +// #[test] +// fn test_scoring_p1() { +// let config = Config::default(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 2.0, +// time_in_mesh_quantum: Duration::from_millis(50), +// time_in_mesh_cap: 10.0, +// topic_weight: 0.7, +// ..TopicScoreParams::default() +// }; +// peer_score_params +// .topics +// .insert(topic_hash, topic_params.clone()); +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// //sleep for 2 times the mesh_quantum +// sleep(topic_params.time_in_mesh_quantum * 2); +// //refresh scores +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// assert!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]) +// >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, +// "score should be at least 2 * time_in_mesh_weight * topic_weight" +// ); +// assert!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]) +// < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, +// "score should be less than 3 * time_in_mesh_weight * topic_weight" +// ); + +// //sleep again for 2 times the mesh_quantum +// sleep(topic_params.time_in_mesh_quantum * 2); +// //refresh scores +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// assert!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]) +// >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, +// "score should be at least 4 * time_in_mesh_weight * topic_weight" +// ); + +// //sleep for enough periods to reach maximum +// sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); +// //refresh scores +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// topic_params.time_in_mesh_cap +// * topic_params.time_in_mesh_weight +// * topic_params.topic_weight, +// "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" +// ); +// } + +// fn random_message(seq: &mut u64, topics: &[TopicHash]) -> RawMessage { +// let mut rng = rand::thread_rng(); +// *seq += 1; +// RawMessage { +// source: Some(PeerId::random()), +// data: (0..rng.gen_range(10..30)).map(|_| rng.gen()).collect(), +// sequence_number: Some(*seq), +// topic: topics[rng.gen_range(0..topics.len())].clone(), +// signature: None, +// key: None, +// validated: true, +// } +// } + +// #[test] +// fn test_scoring_p2() { +// let config = Config::default(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 2.0, +// first_message_deliveries_cap: 10.0, +// first_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..TopicScoreParams::default() +// }; +// peer_score_params +// .topics +// .insert(topic_hash, topic_params.clone()); +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// let m1 = random_message(&mut seq, &topics); +// //peer 0 delivers message first +// deliver_message(&mut gs, 0, m1.clone()); +// //peer 1 delivers message second +// deliver_message(&mut gs, 1, m1); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, +// "score should be exactly first_message_deliveries_weight * topic_weight" +// ); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// 0.0, +// "there should be no score for second message deliveries * topic_weight" +// ); + +// //peer 2 delivers two new messages +// deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); +// deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, +// "score should be exactly 2 * first_message_deliveries_weight * topic_weight" +// ); + +// //test decaying +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 1.0 * topic_params.first_message_deliveries_decay +// * topic_params.first_message_deliveries_weight +// * topic_params.topic_weight, +// "score should be exactly first_message_deliveries_decay * \ +// first_message_deliveries_weight * topic_weight" +// ); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// 2.0 * topic_params.first_message_deliveries_decay +// * topic_params.first_message_deliveries_weight +// * topic_params.topic_weight, +// "score should be exactly 2 * first_message_deliveries_decay * \ +// first_message_deliveries_weight * topic_weight" +// ); + +// //test cap +// for _ in 0..topic_params.first_message_deliveries_cap as u64 { +// deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); +// } + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// topic_params.first_message_deliveries_cap +// * topic_params.first_message_deliveries_weight +// * topic_params.topic_weight, +// "score should be exactly first_message_deliveries_cap * \ +// first_message_deliveries_weight * topic_weight" +// ); +// } + +// #[test] +// fn test_scoring_p3() { +// let config = Config::default(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: -2.0, +// mesh_message_deliveries_decay: 0.9, +// mesh_message_deliveries_cap: 10.0, +// mesh_message_deliveries_threshold: 5.0, +// mesh_message_deliveries_activation: Duration::from_secs(1), +// mesh_message_deliveries_window: Duration::from_millis(100), +// topic_weight: 0.7, +// ..TopicScoreParams::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// let mut expected_message_deliveries = 0.0; + +// //messages used to test window +// let m1 = random_message(&mut seq, &topics); +// let m2 = random_message(&mut seq, &topics); + +// //peer 1 delivers m1 +// deliver_message(&mut gs, 1, m1.clone()); + +// //peer 0 delivers two message +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// expected_message_deliveries += 2.0; + +// sleep(Duration::from_millis(60)); + +// //peer 1 delivers m2 +// deliver_message(&mut gs, 1, m2.clone()); + +// sleep(Duration::from_millis(70)); +// //peer 0 delivers m1 and m2 only m2 gets counted +// deliver_message(&mut gs, 0, m1); +// deliver_message(&mut gs, 0, m2); +// expected_message_deliveries += 1.0; + +// sleep(Duration::from_millis(900)); + +// //message deliveries penalties get activated, peer 0 has only delivered 3 messages and +// // therefore gets a penalty +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// expected_message_deliveries *= 0.9; //decay + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 +// ); + +// // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 +// for _ in 0..20 { +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// } + +// expected_message_deliveries = 10.0; + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// //apply 10 decays +// for _ in 0..10 { +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// expected_message_deliveries *= 0.9; //decay +// } + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p3b() { +// let config = ConfigBuilder::default() +// .prune_backoff(Duration::from_millis(100)) +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: -2.0, +// mesh_message_deliveries_decay: 0.9, +// mesh_message_deliveries_cap: 10.0, +// mesh_message_deliveries_threshold: 5.0, +// mesh_message_deliveries_activation: Duration::from_secs(1), +// mesh_message_deliveries_window: Duration::from_millis(100), +// mesh_failure_penalty_weight: -3.0, +// mesh_failure_penalty_decay: 0.95, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// let mut expected_message_deliveries = 0.0; + +// //add some positive score +// gs.peer_score +// .as_mut() +// .unwrap() +// .0 +// .set_application_score(&peers[0], 100.0); + +// //peer 0 delivers two message +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// expected_message_deliveries += 2.0; + +// sleep(Duration::from_millis(1050)); + +// //activation kicks in +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// expected_message_deliveries *= 0.9; //decay + +// //prune peer +// gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); + +// //wait backoff +// sleep(Duration::from_millis(130)); + +// //regraft peer +// gs.handle_graft(&peers[0], topics.clone()); + +// //the score should now consider p3b +// let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 100.0 + expected_b3 * -3.0 * 0.7 +// ); + +// //we can also add a new p3 to the score + +// //peer 0 delivers one message +// deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); +// expected_message_deliveries += 1.0; + +// sleep(Duration::from_millis(1050)); +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); +// expected_message_deliveries *= 0.9; //decay +// expected_b3 *= 0.95; + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 100.0 + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_valid_message() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers valid message +// let m1 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// //message m1 gets validated +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Accept, +// ) +// .unwrap(); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +// } + +// #[test] +// fn test_scoring_p4_invalid_signature() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; + +// //peer 0 delivers message with invalid signature +// let m = random_message(&mut seq, &topics); + +// gs.on_connection_handler_event( +// peers[0], +// ConnectionId::new_unchecked(0), +// HandlerEvent::Message { +// rpc: Rpc { +// messages: vec![], +// subscriptions: vec![], +// control_msgs: vec![], +// }, +// invalid_messages: vec![(m, ValidationError::InvalidSignature)], +// }, +// ); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_message_from_self() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers invalid message from self +// let mut m = random_message(&mut seq, &topics); +// m.source = Some(*gs.publish_config.get_own_id().unwrap()); + +// deliver_message(&mut gs, 0, m); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_ignored_message() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers ignored message +// let m1 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + +// //message m1 gets ignored +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Ignore, +// ) +// .unwrap(); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +// } + +// #[test] +// fn test_scoring_p4_application_invalidated_message() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers invalid message +// let m1 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + +// //message m1 gets rejected +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_application_invalid_message_from_two_peers() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with two peers +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers invalid message +// let m1 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + +// //peer 1 delivers same message +// deliver_message(&mut gs, 1, m1); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + +// //message m1 gets rejected +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// -2.0 * 0.7 +// ); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_three_application_invalid_messages() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers two invalid message +// let m1 = random_message(&mut seq, &topics); +// let m2 = random_message(&mut seq, &topics); +// let m3 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); +// deliver_message(&mut gs, 0, m2.clone()); +// deliver_message(&mut gs, 0, m3.clone()); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + +// // Transform the inbound message +// let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); +// // Transform the inbound message +// let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); + +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// //messages gets rejected +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); +// gs.report_message_validation_result( +// &config.message_id(message2), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); +// gs.report_message_validation_result( +// &config.message_id(message3), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); + +// //number of invalid messages gets squared +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 9.0 * -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p4_decay() { +// let config = ConfigBuilder::default() +// .validate_messages() +// .build() +// .unwrap(); +// let mut peer_score_params = PeerScoreParams::default(); +// let topic = Topic::new("test"); +// let topic_hash = topic.hash(); +// let topic_params = TopicScoreParams { +// time_in_mesh_weight: 0.0, //deactivate time in mesh +// first_message_deliveries_weight: 0.0, //deactivate first time deliveries +// mesh_message_deliveries_weight: 0.0, //deactivate message deliveries +// mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties +// invalid_message_deliveries_weight: -2.0, +// invalid_message_deliveries_decay: 0.9, +// topic_weight: 0.7, +// ..Default::default() +// }; +// peer_score_params.topics.insert(topic_hash, topic_params); +// peer_score_params.app_specific_weight = 1.0; +// let peer_score_thresholds = PeerScoreThresholds::default(); + +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, peer_score_thresholds))) +// .create_network(); + +// let mut seq = 0; +// let deliver_message = |gs: &mut Behaviour, index: usize, msg: RawMessage| { +// gs.handle_received_message(msg, &peers[index]); +// }; + +// //peer 0 delivers invalid message +// let m1 = random_message(&mut seq, &topics); +// deliver_message(&mut gs, 0, m1.clone()); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + +// //message m1 gets rejected +// gs.report_message_validation_result( +// &config.message_id(message1), +// &peers[0], +// MessageAcceptance::Reject, +// ) +// .unwrap(); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// -2.0 * 0.7 +// ); + +// //we decay +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); + +// // the number of invalids gets decayed to 0.9 and then squared in the score +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 0.9 * 0.9 * -2.0 * 0.7 +// ); +// } + +// #[test] +// fn test_scoring_p5() { +// let peer_score_params = PeerScoreParams { +// app_specific_weight: 2.0, +// ..PeerScoreParams::default() +// }; + +// //build mesh with one peer +// let (mut gs, peers, _) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .gs_config(Config::default()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) +// .create_network(); + +// gs.set_application_score(&peers[0], 1.1); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 1.1 * 2.0 +// ); +// } + +// #[test] +// fn test_scoring_p6() { +// let peer_score_params = PeerScoreParams { +// ip_colocation_factor_threshold: 5.0, +// ip_colocation_factor_weight: -2.0, +// ..Default::default() +// }; + +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(0) +// .topics(vec![]) +// .to_subscribe(false) +// .gs_config(Config::default()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) +// .create_network(); + +// //create 5 peers with the same ip +// let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); +// let peers = vec![ +// add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), +// add_peer_with_addr(&mut gs, &[], false, false, addr.clone()), +// add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), +// add_peer_with_addr(&mut gs, &[], true, false, addr.clone()), +// add_peer_with_addr(&mut gs, &[], true, true, addr.clone()), +// ]; + +// //create 4 other peers with other ip +// let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); +// let others = vec![ +// add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), +// add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()), +// add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), +// add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()), +// ]; + +// //no penalties yet +// for peer in peers.iter().chain(others.iter()) { +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); +// } + +// //add additional connection for 3 others with addr +// for id in others.iter().take(3) { +// gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { +// peer_id: *id, +// connection_id: ConnectionId::new_unchecked(0), +// endpoint: &ConnectedPoint::Dialer { +// address: addr.clone(), +// role_override: Endpoint::Dialer, +// }, +// failed_addresses: &[], +// other_established: 0, +// })); +// } + +// //penalties apply squared +// for peer in peers.iter().chain(others.iter().take(3)) { +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); +// } +// //fourth other peer still no penalty +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); + +// //add additional connection for 3 of the peers to addr2 +// for peer in peers.iter().take(3) { +// gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { +// peer_id: *peer, +// connection_id: ConnectionId::new_unchecked(0), +// endpoint: &ConnectedPoint::Dialer { +// address: addr2.clone(), +// role_override: Endpoint::Dialer, +// }, +// failed_addresses: &[], +// other_established: 1, +// })); +// } + +// //double penalties for the first three of each +// for peer in peers.iter().take(3).chain(others.iter().take(3)) { +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(peer), +// (9.0 + 4.0) * -2.0 +// ); +// } + +// //single penalties for the rest +// for peer in peers.iter().skip(3) { +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); +// } +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&others[3]), +// 4.0 * -2.0 +// ); + +// //two times same ip doesn't count twice +// gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { +// peer_id: peers[0], +// connection_id: ConnectionId::new_unchecked(0), +// endpoint: &ConnectedPoint::Dialer { +// address: addr, +// role_override: Endpoint::Dialer, +// }, +// failed_addresses: &[], +// other_established: 2, +// })); + +// //nothing changed +// //double penalties for the first three of each +// for peer in peers.iter().take(3).chain(others.iter().take(3)) { +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(peer), +// (9.0 + 4.0) * -2.0 +// ); +// } + +// //single penalties for the rest +// for peer in peers.iter().skip(3) { +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); +// } +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&others[3]), +// 4.0 * -2.0 +// ); +// } + +// #[test] +// fn test_scoring_p7_grafts_before_backoff() { +// let config = ConfigBuilder::default() +// .prune_backoff(Duration::from_millis(200)) +// .graft_flood_threshold(Duration::from_millis(100)) +// .build() +// .unwrap(); +// let peer_score_params = PeerScoreParams { +// behaviour_penalty_weight: -2.0, +// behaviour_penalty_decay: 0.9, +// ..Default::default() +// }; + +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) +// .create_network(); + +// //remove peers from mesh and send prune to them => this adds a backoff for the peers +// for peer in peers.iter().take(2) { +// gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); +// gs.send_graft_prune( +// HashMap::new(), +// HashMap::from([(*peer, vec![topics[0].clone()])]), +// HashSet::new(), +// ); +// } + +// //wait 50 millisecs +// sleep(Duration::from_millis(50)); + +// //first peer tries to graft +// gs.handle_graft(&peers[0], vec![topics[0].clone()]); + +// //double behaviour penalty for first peer (squared) +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 4.0 * -2.0 +// ); + +// //wait 100 millisecs +// sleep(Duration::from_millis(100)); + +// //second peer tries to graft +// gs.handle_graft(&peers[1], vec![topics[0].clone()]); + +// //single behaviour penalty for second peer +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// 1.0 * -2.0 +// ); + +// //test decay +// gs.peer_score.as_mut().unwrap().0.refresh_scores(); + +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[0]), +// 4.0 * 0.9 * 0.9 * -2.0 +// ); +// assert_eq!( +// gs.peer_score.as_ref().unwrap().0.score(&peers[1]), +// 1.0 * 0.9 * 0.9 * -2.0 +// ); +// } + +// #[test] +// fn test_opportunistic_grafting() { +// let config = ConfigBuilder::default() +// .mesh_n_low(3) +// .mesh_n(5) +// .mesh_n_high(7) +// .mesh_outbound_min(0) //deactivate outbound handling +// .opportunistic_graft_ticks(2) +// .opportunistic_graft_peers(2) +// .build() +// .unwrap(); +// let peer_score_params = PeerScoreParams { +// app_specific_weight: 1.0, +// ..Default::default() +// }; +// let thresholds = PeerScoreThresholds { +// opportunistic_graft_threshold: 2.0, +// ..Default::default() +// }; + +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(5) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, thresholds))) +// .create_network(); + +// //fill mesh with 5 peers +// for peer in &peers { +// gs.handle_graft(peer, topics.clone()); +// } + +// //add additional 5 peers +// let others: Vec<_> = (0..5) +// .map(|_| add_peer(&mut gs, &topics, false, false)) +// .collect(); + +// //currently mesh equals peers +// assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); + +// //give others high scores (but the first two have not high enough scores) +// for (i, peer) in peers.iter().enumerate().take(5) { +// gs.set_application_score(peer, 0.0 + i as f64); +// } + +// //set scores for peers in the mesh +// for (i, peer) in others.iter().enumerate().take(5) { +// gs.set_application_score(peer, 0.0 + i as f64); +// } + +// //this gives a median of exactly 2.0 => should not apply opportunistic grafting +// gs.heartbeat(); +// gs.heartbeat(); + +// assert_eq!( +// gs.mesh[&topics[0]].len(), +// 5, +// "should not apply opportunistic grafting" +// ); + +// //reduce middle score to 1.0 giving a median of 1.0 +// gs.set_application_score(&peers[2], 1.0); + +// //opportunistic grafting after two heartbeats + +// gs.heartbeat(); +// assert_eq!( +// gs.mesh[&topics[0]].len(), +// 5, +// "should not apply opportunistic grafting after first tick" +// ); + +// gs.heartbeat(); + +// assert_eq!( +// gs.mesh[&topics[0]].len(), +// 7, +// "opportunistic grafting should have added 2 peers" +// ); + +// assert!( +// gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), +// "old peers are still part of the mesh" +// ); + +// assert!( +// gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), +// "peers below or equal to median should not be added in opportunistic grafting" +// ); +// } + +// #[test] +// fn test_ignore_graft_from_unknown_topic() { +// //build gossipsub without subscribing to any topics +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(0) +// .topics(vec![]) +// .to_subscribe(false) +// .create_network(); + +// //handle an incoming graft for some topic +// gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); + +// //assert that no prune got created +// assert_eq!( +// count_control_msgs(&gs, |_, a| matches!(a, ControlAction::Prune { .. })), +// 0, +// "we should not prune after graft in unknown topic" +// ); +// } + +// #[test] +// fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { +// let config = Config::default(); +// //build gossipsub with full mesh +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .create_network(); + +// //add another peer not in the mesh +// let peer = add_peer(&mut gs, &topics, false, false); + +// //receive a message +// let mut seq = 0; +// let m1 = random_message(&mut seq, &topics); + +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + +// let id = config.message_id(message1); + +// gs.handle_received_message(m1, &PeerId::random()); + +// //clear events +// gs.events.clear(); + +// //the first gossip_retransimission many iwants return the valid message, all others are +// // ignored. +// for _ in 0..(2 * config.gossip_retransimission() + 10) { +// gs.handle_iwant(&peer, vec![id.clone()]); +// } + +// assert_eq!( +// gs.events +// .iter() +// .filter(|e| matches!( +// e, +// ToSwarm::NotifyHandler { +// event: HandlerIn::Message(RpcOut::Forward(_)), +// .. +// } +// )) +// .count(), +// config.gossip_retransimission() as usize, +// "not more then gossip_retransmission many messages get sent back" +// ); +// } + +// #[test] +// fn test_ignore_too_many_ihaves() { +// let config = ConfigBuilder::default() +// .max_ihave_messages(10) +// .build() +// .unwrap(); +// //build gossipsub with full mesh +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config.clone()) +// .create_network(); + +// //add another peer not in the mesh +// let peer = add_peer(&mut gs, &topics, false, false); + +// //peer has 20 messages +// let mut seq = 0; +// let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); + +// //peer sends us one ihave for each message in order +// for raw_message in &messages { +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform(raw_message.clone()) +// .unwrap(); + +// gs.handle_ihave( +// &peer, +// vec![(topics[0].clone(), vec![config.message_id(message)])], +// ); +// } + +// let first_ten: HashSet<_> = messages +// .iter() +// .take(10) +// .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) +// .map(|m| config.message_id(&m)) +// .collect(); + +// //we send iwant only for the first 10 messages +// assert_eq!( +// count_control_msgs(&gs, |p, action| p == &peer +// && matches!(action, ControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), +// 10, +// "exactly the first ten ihaves should be processed and one iwant for each created" +// ); + +// //after a heartbeat everything is forgotten +// gs.heartbeat(); +// for raw_message in messages[10..].iter() { +// // Transform the inbound message +// let message = &gs +// .data_transform +// .inbound_transform(raw_message.clone()) +// .unwrap(); + +// gs.handle_ihave( +// &peer, +// vec![(topics[0].clone(), vec![config.message_id(message)])], +// ); +// } + +// //we sent iwant for all 20 messages +// assert_eq!( +// count_control_msgs(&gs, |p, action| p == &peer +// && matches!(action, ControlAction::IWant { message_ids } if message_ids.len() == 1)), +// 20, +// "all 20 should get sent" +// ); +// } + +// #[test] +// fn test_ignore_too_many_messages_in_ihave() { +// let config = ConfigBuilder::default() +// .max_ihave_messages(10) +// .max_ihave_length(10) +// .build() +// .unwrap(); +// //build gossipsub with full mesh +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config.clone()) +// .create_network(); + +// //add another peer not in the mesh +// let peer = add_peer(&mut gs, &topics, false, false); + +// //peer has 20 messages +// let mut seq = 0; +// let message_ids: Vec<_> = (0..20) +// .map(|_| random_message(&mut seq, &topics)) +// .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) +// .map(|msg| config.message_id(&msg)) +// .collect(); + +// //peer sends us three ihaves +// gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); +// gs.handle_ihave( +// &peer, +// vec![(topics[0].clone(), message_ids[0..12].to_vec())], +// ); +// gs.handle_ihave( +// &peer, +// vec![(topics[0].clone(), message_ids[0..20].to_vec())], +// ); + +// let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + +// //we send iwant only for the first 10 messages +// let mut sum = 0; +// assert_eq!( +// count_control_msgs(&gs, |p, action| match action { +// ControlAction::IWant { message_ids } => +// p == &peer && { +// assert!(first_twelve.is_superset(&message_ids.iter().collect())); +// sum += message_ids.len(); +// true +// }, +// _ => false, +// }), +// 2, +// "the third ihave should get ignored and no iwant sent" +// ); + +// assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + +// //after a heartbeat everything is forgotten +// gs.heartbeat(); +// gs.handle_ihave( +// &peer, +// vec![(topics[0].clone(), message_ids[10..20].to_vec())], +// ); + +// //we sent 20 iwant messages +// let mut sum = 0; +// assert_eq!( +// count_control_msgs(&gs, |p, action| match action { +// ControlAction::IWant { message_ids } => +// p == &peer && { +// sum += message_ids.len(); +// true +// }, +// _ => false, +// }), +// 3 +// ); +// assert_eq!(sum, 20, "exactly 20 iwants should get sent"); +// } + +// #[test] +// fn test_limit_number_of_message_ids_inside_ihave() { +// let config = ConfigBuilder::default() +// .max_ihave_messages(10) +// .max_ihave_length(100) +// .build() +// .unwrap(); +// //build gossipsub with full mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(config.mesh_n_high()) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config) +// .create_network(); + +// //graft to all peers to really fill the mesh with all the peers +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// //add two other peers not in the mesh +// let p1 = add_peer(&mut gs, &topics, false, false); +// let p2 = add_peer(&mut gs, &topics, false, false); + +// //receive 200 messages from another peer +// let mut seq = 0; +// for _ in 0..200 { +// gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); +// } + +// //emit gossip +// gs.emit_gossip(); + +// // both peers should have gotten 100 random ihave messages, to assert the randomness, we +// // assert that both have not gotten the same set of messages, but have an intersection +// // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + +// let mut ihaves1 = HashSet::new(); +// let mut ihaves2 = HashSet::new(); + +// assert_eq!( +// count_control_msgs(&gs, |p, action| match action { +// ControlAction::IHave { message_ids, .. } => { +// if p == &p1 { +// ihaves1 = message_ids.iter().cloned().collect(); +// true +// } else if p == &p2 { +// ihaves2 = message_ids.iter().cloned().collect(); +// true +// } else { +// false +// } +// } +// _ => false, +// }), +// 2, +// "should have emitted one ihave to p1 and one to p2" +// ); + +// assert_eq!( +// ihaves1.len(), +// 100, +// "should have sent 100 message ids in ihave to p1" +// ); +// assert_eq!( +// ihaves2.len(), +// 100, +// "should have sent 100 message ids in ihave to p2" +// ); +// assert!( +// ihaves1 != ihaves2, +// "should have sent different random messages to p1 and p2 \ +// (this may fail with a probability < 10^-58" +// ); +// assert!( +// ihaves1.intersection(&ihaves2).count() > 0, +// "should have sent random messages with some common messages to p1 and p2 \ +// (this may fail with a probability < 10^-58" +// ); +// } + +// #[test] +// fn test_iwant_penalties() { +// use tracing_subscriber::EnvFilter; +// let _ = tracing_subscriber::fmt() +// .with_env_filter(EnvFilter::from_default_env()) +// .try_init(); + +// let config = ConfigBuilder::default() +// .iwant_followup_time(Duration::from_secs(4)) +// .build() +// .unwrap(); +// let peer_score_params = PeerScoreParams { +// behaviour_penalty_weight: -1.0, +// ..Default::default() +// }; + +// // fill the mesh +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(2) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config.clone()) +// .explicit(0) +// .outbound(0) +// .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) +// .create_network(); + +// // graft to all peers to really fill the mesh with all the peers +// for peer in peers { +// gs.handle_graft(&peer, topics.clone()); +// } + +// // add 100 more peers +// let other_peers: Vec<_> = (0..100) +// .map(|_| add_peer(&mut gs, &topics, false, false)) +// .collect(); + +// // each peer sends us an ihave containing each two message ids +// let mut first_messages = Vec::new(); +// let mut second_messages = Vec::new(); +// let mut seq = 0; +// for peer in &other_peers { +// let msg1 = random_message(&mut seq, &topics); +// let msg2 = random_message(&mut seq, &topics); + +// // Decompress the raw message and calculate the message id. +// // Transform the inbound message +// let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); + +// // Transform the inbound message +// let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); + +// first_messages.push(msg1.clone()); +// second_messages.push(msg2.clone()); +// gs.handle_ihave( +// peer, +// vec![( +// topics[0].clone(), +// vec![config.message_id(message1), config.message_id(message2)], +// )], +// ); +// } + +// // the peers send us all the first message ids in time +// for (index, peer) in other_peers.iter().enumerate() { +// gs.handle_received_message(first_messages[index].clone(), peer); +// } + +// // now we do a heartbeat no penalization should have been applied yet +// gs.heartbeat(); + +// for peer in &other_peers { +// assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); +// } + +// // receive the first twenty of the other peers then send their response +// for (index, peer) in other_peers.iter().enumerate().take(20) { +// gs.handle_received_message(second_messages[index].clone(), peer); +// } + +// // sleep for the promise duration +// sleep(Duration::from_secs(4)); + +// // now we do a heartbeat to apply penalization +// gs.heartbeat(); + +// // now we get the second messages from the last 80 peers. +// for (index, peer) in other_peers.iter().enumerate() { +// if index > 19 { +// gs.handle_received_message(second_messages[index].clone(), peer); +// } +// } + +// // no further penalizations should get applied +// gs.heartbeat(); + +// // Only the last 80 peers should be penalized for not responding in time +// let mut not_penalized = 0; +// let mut single_penalized = 0; +// let mut double_penalized = 0; + +// for (i, peer) in other_peers.iter().enumerate() { +// let score = gs.peer_score.as_ref().unwrap().0.score(peer); +// if score == 0.0 { +// not_penalized += 1; +// } else if score == -1.0 { +// assert!(i > 9); +// single_penalized += 1; +// } else if score == -4.0 { +// assert!(i > 9); +// double_penalized += 1 +// } else { +// println!("{peer}"); +// println!("{score}"); +// panic!("Invalid score of peer"); +// } +// } + +// assert_eq!(not_penalized, 20); +// assert_eq!(single_penalized, 80); +// assert_eq!(double_penalized, 0); +// } + +// #[test] +// fn test_publish_to_floodsub_peers_without_flood_publish() { +// let config = ConfigBuilder::default() +// .flood_publish(false) +// .build() +// .unwrap(); +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(config.mesh_n_low() - 1) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .gs_config(config) +// .create_network(); + +// //add two floodsub peer, one explicit, one implicit +// let p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// false, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Floodsub), +// ); +// let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + +// //p1 and p2 are not in the mesh +// assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + +// //publish a message +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new("test"), publish_data).unwrap(); + +// // Collect publish messages to floodsub peers +// let publishes = gs +// .events +// .iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { peer_id, event, .. } => { +// if peer_id == &p1 || peer_id == &p2 { +// if let HandlerIn::Message(RpcOut::Publish(message)) = event { +// collected_publish.push(message); +// } +// } +// collected_publish +// } +// _ => collected_publish, +// }); + +// assert_eq!( +// publishes.len(), +// 2, +// "Should send a publish message to all floodsub peers" +// ); +// } + +// #[test] +// fn test_do_not_use_floodsub_in_fanout() { +// let config = ConfigBuilder::default() +// .flood_publish(false) +// .build() +// .unwrap(); +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(config.mesh_n_low() - 1) +// .topics(Vec::new()) +// .to_subscribe(false) +// .gs_config(config) +// .create_network(); + +// let topic = Topic::new("test"); +// let topics = vec![topic.hash()]; + +// //add two floodsub peer, one explicit, one implicit +// let p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// false, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Floodsub), +// ); +// let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + +// //publish a message +// let publish_data = vec![0; 42]; +// gs.publish(Topic::new("test"), publish_data).unwrap(); + +// // Collect publish messages to floodsub peers +// let publishes = gs +// .events +// .iter() +// .fold(vec![], |mut collected_publish, e| match e { +// ToSwarm::NotifyHandler { peer_id, event, .. } => { +// if peer_id == &p1 || peer_id == &p2 { +// if let HandlerIn::Message(RpcOut::Publish(message)) = event { +// collected_publish.push(message); +// } +// } +// collected_publish +// } +// _ => collected_publish, +// }); + +// assert_eq!( +// publishes.len(), +// 2, +// "Should send a publish message to all floodsub peers" +// ); + +// assert!( +// !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), +// "Floodsub peers are not allowed in fanout" +// ); +// } + +// #[test] +// fn test_dont_add_floodsub_peers_to_mesh_on_join() { +// let (mut gs, _, _) = inject_nodes1() +// .peer_no(0) +// .topics(Vec::new()) +// .to_subscribe(false) +// .create_network(); + +// let topic = Topic::new("test"); +// let topics = vec![topic.hash()]; + +// //add two floodsub peer, one explicit, one implicit +// let _p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// false, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Floodsub), +// ); +// let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + +// gs.join(&topics[0]); + +// assert!( +// gs.mesh[&topics[0]].is_empty(), +// "Floodsub peers should not get added to mesh" +// ); +// } + +// #[test] +// fn test_dont_send_px_to_old_gossipsub_peers() { +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(0) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .create_network(); + +// //add an old gossipsub peer +// let p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// false, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Gossipsub), +// ); + +// //prune the peer +// gs.send_graft_prune( +// HashMap::new(), +// vec![(p1, topics.clone())].into_iter().collect(), +// HashSet::new(), +// ); + +// //check that prune does not contain px +// assert_eq!( +// count_control_msgs(&gs, |_, m| match m { +// ControlAction::Prune { peers: px, .. } => !px.is_empty(), +// _ => false, +// }), +// 0, +// "Should not send px to floodsub peers" +// ); +// } + +// #[test] +// fn test_dont_send_floodsub_peers_in_px() { +// //build mesh with one peer +// let (mut gs, peers, topics) = inject_nodes1() +// .peer_no(1) +// .topics(vec!["test".into()]) +// .to_subscribe(true) +// .create_network(); + +// //add two floodsub peers +// let _p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// false, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Floodsub), +// ); +// let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + +// //prune only mesh node +// gs.send_graft_prune( +// HashMap::new(), +// vec![(peers[0], topics.clone())].into_iter().collect(), +// HashSet::new(), +// ); + +// //check that px in prune message is empty +// assert_eq!( +// count_control_msgs(&gs, |_, m| match m { +// ControlAction::Prune { peers: px, .. } => !px.is_empty(), +// _ => false, +// }), +// 0, +// "Should not include floodsub peers in px" +// ); +// } + +// #[test] +// fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { +// let (mut gs, _, topics) = inject_nodes1() +// .peer_no(0) +// .topics(vec!["test".into()]) +// .to_subscribe(false) +// .create_network(); + +// //add two floodsub peer, one explicit, one implicit +// let _p1 = add_peer_with_addr_and_kind( +// &mut gs, +// &topics, +// true, +// false, +// Multiaddr::empty(), +// Some(PeerKind::Floodsub), +// ); +// let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + +// gs.heartbeat(); + +// assert!( +// gs.mesh[&topics[0]].is_empty(), +// "Floodsub peers should not get added to mesh" +// ); +// } + +// // Some very basic test of public api methods. +// #[test] +// fn test_public_api() { +// let (gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(4) +// .topics(vec![String::from("topic1")]) +// .to_subscribe(true) +// .create_network(); +// let peers = peers.into_iter().collect::>(); + +// assert_eq!( +// gs.topics().cloned().collect::>(), +// topic_hashes, +// "Expected topics to match registered topic." +// ); + +// assert_eq!( +// gs.mesh_peers(&TopicHash::from_raw("topic1")) +// .cloned() +// .collect::>(), +// peers, +// "Expected peers for a registered topic to contain all peers." +// ); + +// assert_eq!( +// gs.all_mesh_peers().cloned().collect::>(), +// peers, +// "Expected all_peers to contain all peers." +// ); +// } + +// #[test] +// fn test_subscribe_to_invalid_topic() { +// let t1 = Topic::new("t1"); +// let t2 = Topic::new("t2"); +// let (mut gs, _, _) = inject_nodes::() +// .subscription_filter(WhitelistSubscriptionFilter( +// vec![t1.hash()].into_iter().collect(), +// )) +// .to_subscribe(false) +// .create_network(); + +// assert!(gs.subscribe(&t1).is_ok()); +// assert!(gs.subscribe(&t2).is_err()); +// } + +// #[test] +// fn test_subscribe_and_graft_with_negative_score() { +// //simulate a communication between two gossipsub instances +// let (mut gs1, _, topic_hashes) = inject_nodes1() +// .topics(vec!["test".into()]) +// .scoring(Some(( +// PeerScoreParams::default(), +// PeerScoreThresholds::default(), +// ))) +// .create_network(); + +// let (mut gs2, _, _) = inject_nodes1().create_network(); + +// let connection_id = ConnectionId::new_unchecked(0); + +// let topic = Topic::new("test"); + +// let p2 = add_peer(&mut gs1, &Vec::new(), true, false); +// let p1 = add_peer(&mut gs2, &topic_hashes, false, false); + +// //add penalty to peer p2 +// gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + +// let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); + +// //subscribe to topic in gs2 +// gs2.subscribe(&topic).unwrap(); + +// let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, gs2: &mut Behaviour<_, _>| { +// //collect messages to p1 +// let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { +// ToSwarm::NotifyHandler { peer_id, event, .. } => { +// if peer_id == p1 { +// if let HandlerIn::Message(m) = event { +// Some(m) +// } else { +// None +// } +// } else { +// None +// } +// } +// _ => None, +// }); +// for message in messages_to_p1 { +// gs1.on_connection_handler_event( +// p2, +// connection_id, +// HandlerEvent::Message { +// rpc: proto_to_message(&message.into_protobuf()), +// invalid_messages: vec![], +// }, +// ); +// } +// }; + +// //forward the subscribe message +// forward_messages_to_p1(&mut gs1, &mut gs2); + +// //heartbeats on both +// gs1.heartbeat(); +// gs2.heartbeat(); + +// //forward messages again +// forward_messages_to_p1(&mut gs1, &mut gs2); + +// //nobody got penalized +// assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); +// } + +// #[test] +// /// Test nodes that send grafts without subscriptions. +// fn test_graft_without_subscribe() { +// // The node should: +// // - Create an empty vector in mesh[topic] +// // - Send subscription request to all peers +// // - run JOIN(topic) + +// let topic = String::from("test_subscribe"); +// let subscribe_topic = vec![topic.clone()]; +// let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; +// let (mut gs, peers, topic_hashes) = inject_nodes1() +// .peer_no(1) +// .topics(subscribe_topic) +// .to_subscribe(false) +// .create_network(); + +// assert!( +// gs.mesh.contains_key(&topic_hashes[0]), +// "Subscribe should add a new entry to the mesh[topic] hashmap" +// ); + +// // The node sends a graft for the subscribe topic. +// gs.handle_graft(&peers[0], subscribe_topic_hash); + +// // The node disconnects +// disconnect_peer(&mut gs, &peers[0]); + +// // We unsubscribe from the topic. +// let _ = gs.unsubscribe(&Topic::new(topic)); +// } diff --git a/gossipsub/src/config.rs b/gossipsub/src/config.rs new file mode 100644 index 0000000..b5aa51f --- /dev/null +++ b/gossipsub/src/config.rs @@ -0,0 +1,837 @@ +use std::sync::Arc; +use std::time::Duration; + +use iroh::net::key::SecretKey; + +use crate::error::ConfigBuilderError; +use crate::protocol::ProtocolConfig; +use crate::types::{Message, MessageId}; + +/// The types of message validation that can be employed by gossipsub. +#[derive(Debug, Clone)] +pub enum ValidationMode { + /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to + /// be present as well as the sequence number. All messages must have valid signatures. + /// + /// NOTE: This setting will reject messages from nodes using + /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have + /// signatures. + Strict, + /// This setting permits messages that have no author, sequence number or signature. If any of + /// these fields exist in the message these are validated. + Permissive, + /// This setting requires the author, sequence number and signature fields of a message to be + /// empty. Any message that contains these fields is considered invalid. + Anonymous, + /// This setting does not check the author, sequence number or signature fields of incoming + /// messages. If these fields contain data, they are simply ignored. + /// + /// NOTE: This setting will consider messages with invalid signatures as valid messages. + None, +} + +/// Selector for custom Protocol Id +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Version { + V1_0, + V1_1, +} + +/// Configuration parameters that define the performance of the gossipsub network. +#[derive(Clone)] +pub struct Config { + protocol: ProtocolConfig, + history_length: usize, + history_gossip: usize, + mesh_n: usize, + mesh_n_low: usize, + mesh_n_high: usize, + retain_scores: usize, + gossip_lazy: usize, + gossip_factor: f64, + heartbeat_initial_delay: Duration, + heartbeat_interval: Duration, + fanout_ttl: Duration, + check_explicit_peers_ticks: u64, + duplicate_cache_time: Duration, + validate_messages: bool, + message_id_fn: Arc MessageId + Send + Sync + 'static>, + allow_self_origin: bool, + do_px: bool, + prune_peers: usize, + prune_backoff: Duration, + unsubscribe_backoff: Duration, + backoff_slack: u32, + flood_publish: bool, + graft_flood_threshold: Duration, + mesh_outbound_min: usize, + opportunistic_graft_ticks: u64, + opportunistic_graft_peers: usize, + gossip_retransimission: u32, + max_messages_per_rpc: Option, + max_ihave_length: usize, + max_ihave_messages: usize, + iwant_followup_time: Duration, + published_message_ids_cache_time: Duration, +} + +impl Config { + pub(crate) fn protocol_config(&self) -> ProtocolConfig { + self.protocol.clone() + } + + // Overlay network parameters. + /// Number of heartbeats to keep in the `memcache` (default is 5). + pub fn history_length(&self) -> usize { + self.history_length + } + + /// Number of past heartbeats to gossip about (default is 3). + pub fn history_gossip(&self) -> usize { + self.history_gossip + } + + /// Target number of peers for the mesh network (D in the spec, default is 6). + pub fn mesh_n(&self) -> usize { + self.mesh_n + } + + /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 5). + pub fn mesh_n_low(&self) -> usize { + self.mesh_n_low + } + + /// Maximum number of peers in mesh network before removing some (D_high in the spec, default + /// is 12). + pub fn mesh_n_high(&self) -> usize { + self.mesh_n_high + } + + /// Affects how peers are selected when pruning a mesh due to over subscription. + /// + /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are + /// chosen randomly (D_score in the spec, default is 4). + pub fn retain_scores(&self) -> usize { + self.retain_scores + } + + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, + /// default is 6). + pub fn gossip_lazy(&self) -> usize { + self.gossip_lazy + } + + /// Affects how many peers we will emit gossip to at each heartbeat. + /// + /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or + /// `gossip_lazy`, whichever is greater. The default is 0.25. + pub fn gossip_factor(&self) -> f64 { + self.gossip_factor + } + + /// Initial delay in each heartbeat (default is 5 seconds). + pub fn heartbeat_initial_delay(&self) -> Duration { + self.heartbeat_initial_delay + } + + /// Time between each heartbeat (default is 1 second). + pub fn heartbeat_interval(&self) -> Duration { + self.heartbeat_interval + } + + /// Time to live for fanout peers (default is 60 seconds). + pub fn fanout_ttl(&self) -> Duration { + self.fanout_ttl + } + + /// The number of heartbeat ticks until we recheck the connection to explicit peers and + /// reconnecting if necessary (default 300). + pub fn check_explicit_peers_ticks(&self) -> u64 { + self.check_explicit_peers_ticks + } + + /// The maximum byte size for each gossipsub RPC (default is 65536 bytes). + /// + /// This represents the maximum size of the entire protobuf payload. It must be at least + /// large enough to support basic control messages. If Peer eXchange is enabled, this + /// must be large enough to transmit the desired peer information on pruning. It must be at + /// least 100 bytes. Default is 65536 bytes. + pub fn max_transmit_size(&self) -> usize { + self.protocol.max_transmit_size + } + + /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. + /// This settings sets the time period that messages are stored in the cache. Duplicates can be + /// received if duplicate messages are sent at a time greater than this setting apart. The + /// default is 1 minute. + pub fn duplicate_cache_time(&self) -> Duration { + self.duplicate_cache_time + } + + /// When set to `true`, prevents automatic forwarding of all received messages. This setting + /// allows a user to validate the messages before propagating them to their peers. If set to + /// true, the user must manually call [`crate::Behaviour::report_message_validation_result()`] + /// on the behaviour to forward message once validated (default is `false`). + /// The default is `false`. + pub fn validate_messages(&self) -> bool { + self.validate_messages + } + + /// Determines the level of validation used when receiving messages. See [`ValidationMode`] + /// for the available types. The default is ValidationMode::Strict. + pub fn validation_mode(&self) -> &ValidationMode { + &self.protocol.validation_mode + } + + /// A user-defined function allowing the user to specify the message id of a gossipsub message. + /// The default value is to concatenate the source peer id with a sequence number. Setting this + /// parameter allows the user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This would prevent messages + /// of the same content from being duplicated. + /// + /// The function takes a [`Message`] as input and outputs a String to be interpreted as + /// the message id. + pub fn message_id(&self, message: &Message) -> MessageId { + (self.message_id_fn)(message) + } + + /// By default, gossipsub will reject messages that are sent to us that have the same message + /// source as we have specified locally. Enabling this, allows these messages and prevents + /// penalizing the peer that sent us the message. Default is false. + pub fn allow_self_origin(&self) -> bool { + self.allow_self_origin + } + + /// Whether Peer eXchange is enabled; this should be enabled in bootstrappers and other well + /// connected/trusted nodes. The default is false. + /// + /// Note: Peer exchange is not implemented today, see + /// . + pub fn do_px(&self) -> bool { + self.do_px + } + + /// Controls the number of peers to include in prune Peer eXchange. + /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to + /// send them signed peer records for up to `prune_peers` other peers that we + /// know of. It is recommended that this value is larger than `mesh_n_high` so that the pruned + /// peer can reliably form a full mesh. The default is typically 16 however until signed + /// records are spec'd this is disabled and set to 0. + pub fn prune_peers(&self) -> usize { + self.prune_peers + } + + /// Controls the backoff time for pruned peers. This is how long + /// a peer must wait before attempting to graft into our mesh again after being pruned. + /// When pruning a peer, we send them our value of `prune_backoff` so they know + /// the minimum time to wait. Peers running older versions may not send a backoff time, + /// so if we receive a prune message without one, we will wait at least `prune_backoff` + /// before attempting to re-graft. The default is one minute. + pub fn prune_backoff(&self) -> Duration { + self.prune_backoff + } + + /// Controls the backoff time when unsubscribing from a topic. + /// + /// This is how long to wait before resubscribing to the topic. A short backoff period in case + /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default + /// is 10 seconds. + pub fn unsubscribe_backoff(&self) -> Duration { + self.unsubscribe_backoff + } + + /// Number of heartbeat slots considered as slack for backoffs. This guarantees that we wait + /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This + /// solves problems occurring through high latencies. In particular if + /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this guarantees that we + /// get not punished for too early grafting. The default is 1. + pub fn backoff_slack(&self) -> u32 { + self.backoff_slack + } + + /// Whether to do flood publishing or not. If enabled newly created messages will always be + /// sent to all peers that are subscribed to the topic and have a good enough score. + /// The default is true. + pub fn flood_publish(&self) -> bool { + self.flood_publish + } + + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, + /// then there is an extra score penalty applied to the peer through P7. + pub fn graft_flood_threshold(&self) -> Duration { + self.graft_flood_threshold + } + + /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). + /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. + /// The default is 2. + pub fn mesh_outbound_min(&self) -> usize { + self.mesh_outbound_min + } + + /// Number of heartbeat ticks that specify the interval in which opportunistic grafting is + /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh + /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a + /// threshold (see ). + /// The default is 60. + pub fn opportunistic_graft_ticks(&self) -> u64 { + self.opportunistic_graft_ticks + } + + /// Controls how many times we will allow a peer to request the same message id through IWANT + /// gossip before we start ignoring them. This is designed to prevent peers from spamming us + /// with requests and wasting our resources. The default is 3. + pub fn gossip_retransimission(&self) -> u32 { + self.gossip_retransimission + } + + /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + pub fn opportunistic_graft_peers(&self) -> usize { + self.opportunistic_graft_peers + } + + /// The maximum number of messages we will process in a given RPC. If this is unset, there is + /// no limit. The default is None. + pub fn max_messages_per_rpc(&self) -> Option { + self.max_messages_per_rpc + } + + /// The maximum number of messages to include in an IHAVE message. + /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + pub fn max_ihave_length(&self) -> usize { + self.max_ihave_length + } + + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer + /// within a heartbeat. + pub fn max_ihave_messages(&self) -> usize { + self.max_ihave_messages + } + + /// Time to wait for a message requested through IWANT following an IHAVE advertisement. + /// If the message is not received within this window, a broken promise is declared and + /// the router may apply behavioural penalties. The default is 3 seconds. + pub fn iwant_followup_time(&self) -> Duration { + self.iwant_followup_time + } + + /// Published message ids time cache duration. The default is 10 seconds. + pub fn published_message_ids_cache_time(&self) -> Duration { + self.published_message_ids_cache_time + } +} + +impl Default for Config { + fn default() -> Self { + // use ConfigBuilder to also validate defaults + ConfigBuilder::default() + .build() + .expect("Default config parameters should be valid parameters") + } +} + +/// The builder struct for constructing a gossipsub configuration. +pub struct ConfigBuilder { + config: Config, + invalid_protocol: bool, // This is a bit of a hack to only expose one error to the user. +} + +impl Default for ConfigBuilder { + fn default() -> Self { + ConfigBuilder { + config: Config { + protocol: ProtocolConfig::default(), + history_length: 5, + history_gossip: 3, + mesh_n: 6, + mesh_n_low: 5, + mesh_n_high: 12, + retain_scores: 4, + gossip_lazy: 6, // default to mesh_n + gossip_factor: 0.25, + heartbeat_initial_delay: Duration::from_secs(5), + heartbeat_interval: Duration::from_secs(1), + fanout_ttl: Duration::from_secs(60), + check_explicit_peers_ticks: 300, + duplicate_cache_time: Duration::from_secs(60), + validate_messages: false, + message_id_fn: Arc::new(|message| { + // default message id is: source + sequence number + // NOTE: If either the peer_id or source is not provided, we set to 0; + let mut source_string = if let Some(peer_id) = message.source.as_ref() { + peer_id.to_string() + } else { + SecretKey::from_bytes(&[1u8; 32]).public().to_string() + }; + source_string + .push_str(&message.sequence_number.unwrap_or_default().to_string()); + MessageId::from(source_string) + }), + allow_self_origin: false, + do_px: false, + prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. + prune_backoff: Duration::from_secs(60), + unsubscribe_backoff: Duration::from_secs(10), + backoff_slack: 1, + flood_publish: true, + graft_flood_threshold: Duration::from_secs(10), + mesh_outbound_min: 2, + opportunistic_graft_ticks: 60, + opportunistic_graft_peers: 2, + gossip_retransimission: 3, + max_messages_per_rpc: None, + max_ihave_length: 5000, + max_ihave_messages: 10, + iwant_followup_time: Duration::from_secs(3), + published_message_ids_cache_time: Duration::from_secs(10), + }, + invalid_protocol: false, + } + } +} + +impl From for ConfigBuilder { + fn from(config: Config) -> Self { + ConfigBuilder { + config, + invalid_protocol: false, + } + } +} + +impl ConfigBuilder { + /// Number of heartbeats to keep in the `memcache` (default is 5). + pub fn history_length(&mut self, history_length: usize) -> &mut Self { + self.config.history_length = history_length; + self + } + + /// Number of past heartbeats to gossip about (default is 3). + pub fn history_gossip(&mut self, history_gossip: usize) -> &mut Self { + self.config.history_gossip = history_gossip; + self + } + + /// Target number of peers for the mesh network (D in the spec, default is 6). + pub fn mesh_n(&mut self, mesh_n: usize) -> &mut Self { + self.config.mesh_n = mesh_n; + self + } + + /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 4). + pub fn mesh_n_low(&mut self, mesh_n_low: usize) -> &mut Self { + self.config.mesh_n_low = mesh_n_low; + self + } + + /// Maximum number of peers in mesh network before removing some (D_high in the spec, default + /// is 12). + pub fn mesh_n_high(&mut self, mesh_n_high: usize) -> &mut Self { + self.config.mesh_n_high = mesh_n_high; + self + } + + /// Affects how peers are selected when pruning a mesh due to over subscription. + /// + /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are + /// chosen randomly (D_score in the spec, default is 4). + pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { + self.config.retain_scores = retain_scores; + self + } + + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, + /// default is 6). + pub fn gossip_lazy(&mut self, gossip_lazy: usize) -> &mut Self { + self.config.gossip_lazy = gossip_lazy; + self + } + + /// Affects how many peers we will emit gossip to at each heartbeat. + /// + /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or + /// `gossip_lazy`, whichever is greater. The default is 0.25. + pub fn gossip_factor(&mut self, gossip_factor: f64) -> &mut Self { + self.config.gossip_factor = gossip_factor; + self + } + + /// Initial delay in each heartbeat (default is 5 seconds). + pub fn heartbeat_initial_delay(&mut self, heartbeat_initial_delay: Duration) -> &mut Self { + self.config.heartbeat_initial_delay = heartbeat_initial_delay; + self + } + + /// Time between each heartbeat (default is 1 second). + pub fn heartbeat_interval(&mut self, heartbeat_interval: Duration) -> &mut Self { + self.config.heartbeat_interval = heartbeat_interval; + self + } + + /// The number of heartbeat ticks until we recheck the connection to explicit peers and + /// reconnecting if necessary (default 300). + pub fn check_explicit_peers_ticks(&mut self, check_explicit_peers_ticks: u64) -> &mut Self { + self.config.check_explicit_peers_ticks = check_explicit_peers_ticks; + self + } + + /// Time to live for fanout peers (default is 60 seconds). + pub fn fanout_ttl(&mut self, fanout_ttl: Duration) -> &mut Self { + self.config.fanout_ttl = fanout_ttl; + self + } + + /// The maximum byte size for each gossip (default is 2048 bytes). + pub fn max_transmit_size(&mut self, max_transmit_size: usize) -> &mut Self { + self.config.protocol.max_transmit_size = max_transmit_size; + self + } + + /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. + /// This settings sets the time period that messages are stored in the cache. Duplicates can be + /// received if duplicate messages are sent at a time greater than this setting apart. The + /// default is 1 minute. + pub fn duplicate_cache_time(&mut self, cache_size: Duration) -> &mut Self { + self.config.duplicate_cache_time = cache_size; + self + } + + /// When set, prevents automatic forwarding of all received messages. This setting + /// allows a user to validate the messages before propagating them to their peers. If set, + /// the user must manually call [`crate::Behaviour::report_message_validation_result()`] on the + /// behaviour to forward a message once validated. + pub fn validate_messages(&mut self) -> &mut Self { + self.config.validate_messages = true; + self + } + + /// Determines the level of validation used when receiving messages. See [`ValidationMode`] + /// for the available types. The default is ValidationMode::Strict. + pub fn validation_mode(&mut self, validation_mode: ValidationMode) -> &mut Self { + self.config.protocol.validation_mode = validation_mode; + self + } + + /// A user-defined function allowing the user to specify the message id of a gossipsub message. + /// The default value is to concatenate the source peer id with a sequence number. Setting this + /// parameter allows the user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This would prevent messages + /// of the same content from being duplicated. + /// + /// The function takes a [`Message`] as input and outputs a String to be + /// interpreted as the message id. + pub fn message_id_fn(&mut self, id_fn: F) -> &mut Self + where + F: Fn(&Message) -> MessageId + Send + Sync + 'static, + { + self.config.message_id_fn = Arc::new(id_fn); + self + } + + /// Enables Peer eXchange. This should be enabled in bootstrappers and other well + /// connected/trusted nodes. The default is false. + /// + /// Note: Peer exchange is not implemented today, see + /// . + pub fn do_px(&mut self) -> &mut Self { + self.config.do_px = true; + self + } + + /// Controls the number of peers to include in prune Peer eXchange. + /// + /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to + /// send them signed peer records for up to [`Self::prune_peers] other peers that we + /// know of. It is recommended that this value is larger than [`Self::mesh_n_high`] so that the + /// pruned peer can reliably form a full mesh. The default is 16. + pub fn prune_peers(&mut self, prune_peers: usize) -> &mut Self { + self.config.prune_peers = prune_peers; + self + } + + /// Controls the backoff time for pruned peers. This is how long + /// a peer must wait before attempting to graft into our mesh again after being pruned. + /// When pruning a peer, we send them our value of [`Self::prune_backoff`] so they know + /// the minimum time to wait. Peers running older versions may not send a backoff time, + /// so if we receive a prune message without one, we will wait at least [`Self::prune_backoff`] + /// before attempting to re-graft. The default is one minute. + pub fn prune_backoff(&mut self, prune_backoff: Duration) -> &mut Self { + self.config.prune_backoff = prune_backoff; + self + } + + /// Controls the backoff time when unsubscribing from a topic. + /// + /// This is how long to wait before resubscribing to the topic. A short backoff period in case + /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default + /// is 10 seconds. + pub fn unsubscribe_backoff(&mut self, unsubscribe_backoff: u64) -> &mut Self { + self.config.unsubscribe_backoff = Duration::from_secs(unsubscribe_backoff); + self + } + + /// Number of heartbeat slots considered as slack for backoffs. This guarantees that we wait + /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This + /// solves problems occurring through high latencies. In particular if + /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this guarantees that we + /// get not punished for too early grafting. The default is 1. + pub fn backoff_slack(&mut self, backoff_slack: u32) -> &mut Self { + self.config.backoff_slack = backoff_slack; + self + } + + /// Whether to do flood publishing or not. If enabled newly created messages will always be + /// sent to all peers that are subscribed to the topic and have a good enough score. + /// The default is true. + pub fn flood_publish(&mut self, flood_publish: bool) -> &mut Self { + self.config.flood_publish = flood_publish; + self + } + + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, + /// then there is an extra score penalty applied to the peer through P7. + pub fn graft_flood_threshold(&mut self, graft_flood_threshold: Duration) -> &mut Self { + self.config.graft_flood_threshold = graft_flood_threshold; + self + } + + /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). + /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. + /// The default is 2. + pub fn mesh_outbound_min(&mut self, mesh_outbound_min: usize) -> &mut Self { + self.config.mesh_outbound_min = mesh_outbound_min; + self + } + + /// Number of heartbeat ticks that specify the interval in which opportunistic grafting is + /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh + /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a + /// threshold (see ). + /// The default is 60. + pub fn opportunistic_graft_ticks(&mut self, opportunistic_graft_ticks: u64) -> &mut Self { + self.config.opportunistic_graft_ticks = opportunistic_graft_ticks; + self + } + + /// Controls how many times we will allow a peer to request the same message id through IWANT + /// gossip before we start ignoring them. This is designed to prevent peers from spamming us + /// with requests and wasting our resources. + pub fn gossip_retransimission(&mut self, gossip_retransimission: u32) -> &mut Self { + self.config.gossip_retransimission = gossip_retransimission; + self + } + + /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + pub fn opportunistic_graft_peers(&mut self, opportunistic_graft_peers: usize) -> &mut Self { + self.config.opportunistic_graft_peers = opportunistic_graft_peers; + self + } + + /// The maximum number of messages we will process in a given RPC. If this is unset, there is + /// no limit. The default is None. + pub fn max_messages_per_rpc(&mut self, max: Option) -> &mut Self { + self.config.max_messages_per_rpc = max; + self + } + + /// The maximum number of messages to include in an IHAVE message. + /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + pub fn max_ihave_length(&mut self, max_ihave_length: usize) -> &mut Self { + self.config.max_ihave_length = max_ihave_length; + self + } + + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer + /// within a heartbeat. + pub fn max_ihave_messages(&mut self, max_ihave_messages: usize) -> &mut Self { + self.config.max_ihave_messages = max_ihave_messages; + self + } + + /// By default, gossipsub will reject messages that are sent to us that has the same message + /// source as we have specified locally. Enabling this, allows these messages and prevents + /// penalizing the peer that sent us the message. Default is false. + pub fn allow_self_origin(&mut self, allow_self_origin: bool) -> &mut Self { + self.config.allow_self_origin = allow_self_origin; + self + } + + /// Time to wait for a message requested through IWANT following an IHAVE advertisement. + /// If the message is not received within this window, a broken promise is declared and + /// the router may apply behavioural penalties. The default is 3 seconds. + pub fn iwant_followup_time(&mut self, iwant_followup_time: Duration) -> &mut Self { + self.config.iwant_followup_time = iwant_followup_time; + self + } + + pub fn published_message_ids_cache_time( + &mut self, + published_message_ids_cache_time: Duration, + ) -> &mut Self { + self.config.published_message_ids_cache_time = published_message_ids_cache_time; + self + } + + /// Constructs a [`Config`] from the given configuration and validates the settings. + pub fn build(&self) -> Result { + // check all constraints on config + + if self.config.protocol.max_transmit_size < 100 { + return Err(ConfigBuilderError::MaxTransmissionSizeTooSmall); + } + + if self.config.history_length < self.config.history_gossip { + return Err(ConfigBuilderError::HistoryLengthTooSmall); + } + + if !(self.config.mesh_outbound_min <= self.config.mesh_n_low + && self.config.mesh_n_low <= self.config.mesh_n + && self.config.mesh_n <= self.config.mesh_n_high) + { + return Err(ConfigBuilderError::MeshParametersInvalid); + } + + if self.config.mesh_outbound_min * 2 > self.config.mesh_n { + return Err(ConfigBuilderError::MeshOutboundInvalid); + } + + if self.config.unsubscribe_backoff.as_millis() == 0 { + return Err(ConfigBuilderError::UnsubscribeBackoffIsZero); + } + + if self.invalid_protocol { + return Err(ConfigBuilderError::InvalidProtocol); + } + + Ok(self.config.clone()) + } +} + +impl std::fmt::Debug for Config { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut builder = f.debug_struct("GossipsubConfig"); + let _ = builder.field("protocol", &self.protocol); + let _ = builder.field("history_length", &self.history_length); + let _ = builder.field("history_gossip", &self.history_gossip); + let _ = builder.field("mesh_n", &self.mesh_n); + let _ = builder.field("mesh_n_low", &self.mesh_n_low); + let _ = builder.field("mesh_n_high", &self.mesh_n_high); + let _ = builder.field("retain_scores", &self.retain_scores); + let _ = builder.field("gossip_lazy", &self.gossip_lazy); + let _ = builder.field("gossip_factor", &self.gossip_factor); + let _ = builder.field("heartbeat_initial_delay", &self.heartbeat_initial_delay); + let _ = builder.field("heartbeat_interval", &self.heartbeat_interval); + let _ = builder.field("fanout_ttl", &self.fanout_ttl); + let _ = builder.field("duplicate_cache_time", &self.duplicate_cache_time); + let _ = builder.field("validate_messages", &self.validate_messages); + let _ = builder.field("allow_self_origin", &self.allow_self_origin); + let _ = builder.field("do_px", &self.do_px); + let _ = builder.field("prune_peers", &self.prune_peers); + let _ = builder.field("prune_backoff", &self.prune_backoff); + let _ = builder.field("backoff_slack", &self.backoff_slack); + let _ = builder.field("flood_publish", &self.flood_publish); + let _ = builder.field("graft_flood_threshold", &self.graft_flood_threshold); + let _ = builder.field("mesh_outbound_min", &self.mesh_outbound_min); + let _ = builder.field("opportunistic_graft_ticks", &self.opportunistic_graft_ticks); + let _ = builder.field("opportunistic_graft_peers", &self.opportunistic_graft_peers); + let _ = builder.field("max_messages_per_rpc", &self.max_messages_per_rpc); + let _ = builder.field("max_ihave_length", &self.max_ihave_length); + let _ = builder.field("max_ihave_messages", &self.max_ihave_messages); + let _ = builder.field("iwant_followup_time", &self.iwant_followup_time); + let _ = builder.field( + "published_message_ids_cache_time", + &self.published_message_ids_cache_time, + ); + builder.finish() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::topic::IdentityHash; + use crate::Topic; + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + #[test] + fn create_config_with_message_id_as_plain_function() { + let config = ConfigBuilder::default() + .message_id_fn(message_id_plain_function) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + #[test] + fn create_config_with_message_id_as_closure() { + let config = ConfigBuilder::default() + .message_id_fn(|message: &Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push('e'); + MessageId::from(v) + }) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + #[test] + fn create_config_with_message_id_as_closure_with_variable_capture() { + let captured: char = 'e'; + + let config = ConfigBuilder::default() + .message_id_fn(move |message: &Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push(captured); + MessageId::from(v) + }) + .build() + .unwrap(); + + let result = config.message_id(&get_gossipsub_message()); + + assert_eq!(result, get_expected_message_id()); + } + + fn get_gossipsub_message() -> Message { + Message { + source: None, + data: vec![12, 34, 56], + sequence_number: None, + topic: Topic::::new("test").hash(), + } + } + + fn get_expected_message_id() -> MessageId { + MessageId::from([ + 49, 55, 56, 51, 56, 52, 49, 51, 52, 51, 52, 55, 51, 51, 53, 52, 54, 54, 52, 49, 101, + ]) + } + + fn message_id_plain_function(message: &Message) -> MessageId { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + let mut v = s.finish().to_string(); + v.push('e'); + MessageId::from(v) + } +} diff --git a/gossipsub/src/error.rs b/gossipsub/src/error.rs new file mode 100644 index 0000000..a9d9c09 --- /dev/null +++ b/gossipsub/src/error.rs @@ -0,0 +1,127 @@ +//! Error types that can result from gossipsub. + +use serde::{Deserialize, Serialize}; + +/// Error associated with publishing a gossipsub message. +#[derive(Debug)] +pub enum PublishError { + /// This message has already been published. + Duplicate, + /// There were no peers to send this message to. + InsufficientPeers, + /// The overall message was too large. This could be due to excessive topics or an excessive + /// message size. + MessageTooLarge, + /// The compression algorithm failed. + TransformFailed(std::io::Error), +} + +impl std::fmt::Display for PublishError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for PublishError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::TransformFailed(err) => Some(err), + _ => None, + } + } +} + +/// Error associated with subscribing to a topic. +#[derive(Debug)] +pub enum SubscriptionError { + /// Couldn't publish our subscription + PublishError(PublishError), + /// We are not allowed to subscribe to this topic by the subscription filter + NotAllowed, +} + +impl std::fmt::Display for SubscriptionError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for SubscriptionError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::PublishError(err) => Some(err), + _ => None, + } + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub enum ValidationError { + /// The message has an invalid signature, + InvalidSignature, + /// The sequence number was empty, expected a value. + EmptySequenceNumber, + /// The sequence number was the incorrect size + InvalidSequenceNumber, + /// The NodeId was invalid + InvalidNodeId, + /// Signature existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SignaturePresent, + /// Sequence number existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SequenceNumberPresent, + /// Message source existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + MessageSourcePresent, + /// The data transformation failed. + TransformFailed, +} + +impl std::fmt::Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl std::error::Error for ValidationError {} + +impl From for PublishError { + fn from(error: std::io::Error) -> PublishError { + PublishError::TransformFailed(error) + } +} + +/// Error associated with Config building. +#[derive(Debug)] +pub enum ConfigBuilderError { + /// Maximum transmission size is too small. + MaxTransmissionSizeTooSmall, + /// History length less than history gossip length. + HistoryLengthTooSmall, + /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high + MeshParametersInvalid, + /// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2 + MeshOutboundInvalid, + /// unsubscribe_backoff is zero + UnsubscribeBackoffIsZero, + /// Invalid protocol + InvalidProtocol, +} + +impl std::error::Error for ConfigBuilderError {} + +impl std::fmt::Display for ConfigBuilderError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MaxTransmissionSizeTooSmall => { + write!(f, "Maximum transmission size is too small") + } + Self::HistoryLengthTooSmall => write!(f, "History length less than history gossip length"), + Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"), + Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"), + Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"), + Self::InvalidProtocol => write!(f, "Invalid protocol"), + } + } +} diff --git a/gossipsub/src/generated/compat.proto b/gossipsub/src/generated/compat.proto new file mode 100644 index 0000000..b2753bf --- /dev/null +++ b/gossipsub/src/generated/compat.proto @@ -0,0 +1,12 @@ +syntax = "proto2"; + +package compat.pb; + +message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + repeated string topic_ids = 4; + optional bytes signature = 5; + optional bytes key = 6; +} \ No newline at end of file diff --git a/gossipsub/src/generated/compat/mod.rs b/gossipsub/src/generated/compat/mod.rs new file mode 100644 index 0000000..aec6164 --- /dev/null +++ b/gossipsub/src/generated/compat/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod pb; diff --git a/gossipsub/src/generated/compat/pb.rs b/gossipsub/src/generated/compat/pb.rs new file mode 100644 index 0000000..fd59c38 --- /dev/null +++ b/gossipsub/src/generated/compat/pb.rs @@ -0,0 +1,67 @@ +// Automatically generated rust module for 'compat.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub from: Option>, + pub data: Option>, + pub seqno: Option>, + pub topic_ids: Vec, + pub signature: Option>, + pub key: Option>, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.topic_ids.push(r.read_string(bytes)?.to_owned()), + Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()), + Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.topic_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + + self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + for s in &self.topic_ids { w.write_with_tag(34, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + diff --git a/gossipsub/src/generated/gossipsub/mod.rs b/gossipsub/src/generated/gossipsub/mod.rs new file mode 100644 index 0000000..aec6164 --- /dev/null +++ b/gossipsub/src/generated/gossipsub/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod pb; diff --git a/gossipsub/src/generated/gossipsub/pb.rs b/gossipsub/src/generated/gossipsub/pb.rs new file mode 100644 index 0000000..aecaed5 --- /dev/null +++ b/gossipsub/src/generated/gossipsub/pb.rs @@ -0,0 +1,566 @@ +// Automatically generated rust module for 'rpc.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct RPC { + pub subscriptions: Vec, + pub publish: Vec, + pub control: Option, +} + +impl<'a> MessageRead<'a> for RPC { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.subscriptions.push(r.read_message::(bytes)?), + Ok(18) => msg.publish.push(r.read_message::(bytes)?), + Ok(26) => msg.control = Some(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for RPC { + fn get_size(&self) -> usize { + 0 + + self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; } + for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; } + Ok(()) + } +} + +pub mod mod_RPC { + +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct SubOpts { + pub subscribe: Option, + pub topic_id: Option, +} + +impl<'a> MessageRead<'a> for SubOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?), + Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for SubOpts { + fn get_size(&self) -> usize { + 0 + + self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; } + Ok(()) + } +} + +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub from: Option<[u8; 32]>, + pub data: Option>, + pub seqno: Option>, + pub topic: String, + pub signature: Option>, + pub key: Option>, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.topic = r.read_string(bytes)?.to_owned(), + Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()), + Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + 1 + sizeof_len((&self.topic).len()) + + self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + w.write_with_tag(34, |w| w.write_string(&**&self.topic))?; + if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlMessage { + pub ihave: Vec, + pub iwant: Vec, + pub graft: Vec, + pub prune: Vec, +} + +impl<'a> MessageRead<'a> for ControlMessage { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.ihave.push(r.read_message::(bytes)?), + Ok(18) => msg.iwant.push(r.read_message::(bytes)?), + Ok(26) => msg.graft.push(r.read_message::(bytes)?), + Ok(34) => msg.prune.push(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlMessage { + fn get_size(&self) -> usize { + 0 + + self.ihave.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.ihave { w.write_with_tag(10, |w| w.write_message(s))?; } + for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; } + for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } + for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIHave { + pub topic_id: Option, + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIHave { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIHave { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + for s in &self.message_ids { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIWant { + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIWant { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIWant { + fn get_size(&self) -> usize { + 0 + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlGraft { + pub topic_id: Option, +} + +impl<'a> MessageRead<'a> for ControlGraft { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlGraft { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlPrune { + pub topic_id: Option, + pub peers: Vec, + pub backoff: Option, +} + +impl<'a> MessageRead<'a> for ControlPrune { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.peers.push(r.read_message::(bytes)?), + Ok(24) => msg.backoff = Some(r.read_uint64(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlPrune { + fn get_size(&self) -> usize { + 0 + + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.peers.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.backoff.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; } + for s in &self.peers { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.backoff { w.write_with_tag(24, |w| w.write_uint64(*s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PeerInfo { + pub peer_id: Option>, + pub signed_peer_record: Option>, +} + +impl<'a> MessageRead<'a> for PeerInfo { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.peer_id = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.signed_peer_record = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PeerInfo { + fn get_size(&self) -> usize { + 0 + + self.peer_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.signed_peer_record.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.peer_id { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.signed_peer_record { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct TopicDescriptor { + pub name: Option, + pub auth: Option, + pub enc: Option, +} + +impl<'a> MessageRead<'a> for TopicDescriptor { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.name = Some(r.read_string(bytes)?.to_owned()), + Ok(18) => msg.auth = Some(r.read_message::(bytes)?), + Ok(26) => msg.enc = Some(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for TopicDescriptor { + fn get_size(&self) -> usize { + 0 + + self.name.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.auth.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.enc.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.name { w.write_with_tag(10, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.auth { w.write_with_tag(18, |w| w.write_message(s))?; } + if let Some(ref s) = self.enc { w.write_with_tag(26, |w| w.write_message(s))?; } + Ok(()) + } +} + +pub mod mod_TopicDescriptor { + +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct AuthOpts { + pub mode: Option, + pub keys: Vec>, +} + +impl<'a> MessageRead<'a> for AuthOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.mode = Some(r.read_enum(bytes)?), + Ok(18) => msg.keys.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for AuthOpts { + fn get_size(&self) -> usize { + 0 + + self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.keys.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; } + for s in &self.keys { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +pub mod mod_AuthOpts { + + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum AuthMode { + NONE = 0, + KEY = 1, + WOT = 2, +} + +impl Default for AuthMode { + fn default() -> Self { + AuthMode::NONE + } +} + +impl From for AuthMode { + fn from(i: i32) -> Self { + match i { + 0 => AuthMode::NONE, + 1 => AuthMode::KEY, + 2 => AuthMode::WOT, + _ => Self::default(), + } + } +} + +impl<'a> From<&'a str> for AuthMode { + fn from(s: &'a str) -> Self { + match s { + "NONE" => AuthMode::NONE, + "KEY" => AuthMode::KEY, + "WOT" => AuthMode::WOT, + _ => Self::default(), + } + } +} + +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct EncOpts { + pub mode: Option, + pub key_hashes: Vec>, +} + +impl<'a> MessageRead<'a> for EncOpts { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(8) => msg.mode = Some(r.read_enum(bytes)?), + Ok(18) => msg.key_hashes.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for EncOpts { + fn get_size(&self) -> usize { + 0 + + self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.key_hashes.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; } + for s in &self.key_hashes { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +pub mod mod_EncOpts { + + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum EncMode { + NONE = 0, + SHAREDKEY = 1, + WOT = 2, +} + +impl Default for EncMode { + fn default() -> Self { + EncMode::NONE + } +} + +impl From for EncMode { + fn from(i: i32) -> Self { + match i { + 0 => EncMode::NONE, + 1 => EncMode::SHAREDKEY, + 2 => EncMode::WOT, + _ => Self::default(), + } + } +} + +impl<'a> From<&'a str> for EncMode { + fn from(s: &'a str) -> Self { + match s { + "NONE" => EncMode::NONE, + "SHAREDKEY" => EncMode::SHAREDKEY, + "WOT" => EncMode::WOT, + _ => Self::default(), + } + } +} + +} + +} diff --git a/gossipsub/src/generated/mod.rs b/gossipsub/src/generated/mod.rs new file mode 100644 index 0000000..7ac564f --- /dev/null +++ b/gossipsub/src/generated/mod.rs @@ -0,0 +1,3 @@ +// Automatically generated mod.rs +pub mod compat; +pub mod gossipsub; diff --git a/gossipsub/src/generated/rpc.proto b/gossipsub/src/generated/rpc.proto new file mode 100644 index 0000000..2ce12f3 --- /dev/null +++ b/gossipsub/src/generated/rpc.proto @@ -0,0 +1,84 @@ +syntax = "proto2"; + +package gossipsub.pb; + +message RPC { + repeated SubOpts subscriptions = 1; + repeated Message publish = 2; + + message SubOpts { + optional bool subscribe = 1; // subscribe or unsubscribe + optional string topic_id = 2; + } + + optional ControlMessage control = 3; +} + +message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + required string topic = 4; + optional bytes signature = 5; + optional bytes key = 6; +} + +message ControlMessage { + repeated ControlIHave ihave = 1; + repeated ControlIWant iwant = 2; + repeated ControlGraft graft = 3; + repeated ControlPrune prune = 4; +} + +message ControlIHave { + optional string topic_id = 1; + repeated bytes message_ids = 2; +} + +message ControlIWant { + repeated bytes message_ids= 1; +} + +message ControlGraft { + optional string topic_id = 1; +} + +message ControlPrune { + optional string topic_id = 1; + repeated PeerInfo peers = 2; // gossipsub v1.1 PX + optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds) +} + +message PeerInfo { + optional bytes peer_id = 1; + optional bytes signed_peer_record = 2; +} + +// topicID = hash(topicDescriptor); (not the topic.name) +message TopicDescriptor { + optional string name = 1; + optional AuthOpts auth = 2; + optional EncOpts enc = 3; + + message AuthOpts { + optional AuthMode mode = 1; + repeated bytes keys = 2; // root keys to trust + + enum AuthMode { + NONE = 0; // no authentication, anyone can publish + KEY = 1; // only messages signed by keys in the topic descriptor are accepted + WOT = 2; // web of trust, certificates can allow publisher set to grow + } + } + + message EncOpts { + optional EncMode mode = 1; + repeated bytes key_hashes = 2; // the hashes of the shared keys used (salted) + + enum EncMode { + NONE = 0; // no encryption, anyone can read + SHAREDKEY = 1; // messages are encrypted with shared key + WOT = 2; // web of trust, certificates can allow publisher set to grow + } + } +} diff --git a/gossipsub/src/gossip_promises.rs b/gossipsub/src/gossip_promises.rs new file mode 100644 index 0000000..89e7863 --- /dev/null +++ b/gossipsub/src/gossip_promises.rs @@ -0,0 +1,81 @@ +use crate::peer_score::RejectReason; +use crate::MessageId; +use crate::ValidationError; +use iroh::net::NodeId; +use std::collections::HashMap; +use web_time::Instant; + +/// Tracks recently sent `IWANT` messages and checks if peers respond to them. +#[derive(Debug, Default)] +pub(crate) struct GossipPromises { + /// Stores for each tracked message id and peer the instant when this promise expires. + /// + /// If the peer didn't respond until then we consider the promise as broken and penalize the + /// peer. + promises: HashMap>, +} + +impl GossipPromises { + /// Returns true if the message id exists in the promises. + pub(crate) fn contains(&self, message: &MessageId) -> bool { + self.promises.contains_key(message) + } + + /// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting. + pub(crate) fn add_promise(&mut self, peer: NodeId, messages: &[MessageId], expires: Instant) { + for message_id in messages { + // If a promise for this message id and peer already exists we don't update the expiry! + self.promises + .entry(message_id.clone()) + .or_default() + .entry(peer) + .or_insert(expires); + } + } + + pub(crate) fn message_delivered(&mut self, message_id: &MessageId) { + // Someone delivered a message, we can stop tracking all promises for it. + self.promises.remove(message_id); + } + + pub(crate) fn reject_message(&mut self, message_id: &MessageId, reason: &RejectReason) { + // A message got rejected, so we can stop tracking promises and let the score penalty apply + // from invalid message delivery. + // We do take exception and apply promise penalty regardless in the following cases, where + // the peer delivered an obviously invalid message. + match reason { + RejectReason::ValidationError(ValidationError::InvalidSignature) => (), + RejectReason::SelfOrigin => (), + _ => { + self.promises.remove(message_id); + } + }; + } + + /// Returns the number of broken promises for each peer who didn't follow up on an IWANT + /// request. + /// This should be called not too often relative to the expire times, since it iterates over + /// the whole stored data. + pub(crate) fn get_broken_promises(&mut self) -> HashMap { + let now = Instant::now(); + let mut result = HashMap::new(); + self.promises.retain(|msg, peers| { + peers.retain(|peer_id, expires| { + if *expires < now { + let count = result.entry(*peer_id).or_insert(0); + *count += 1; + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" + ); + false + } else { + true + } + }); + !peers.is_empty() + }); + result + } +} diff --git a/gossipsub/src/handler.rs b/gossipsub/src/handler.rs new file mode 100644 index 0000000..48f4895 --- /dev/null +++ b/gossipsub/src/handler.rs @@ -0,0 +1,304 @@ +use crate::protocol::{GossipFramed, ProtocolConfig}; +use crate::types::{self, RawMessage}; +use crate::ValidationError; +use futures::{SinkExt, StreamExt}; +use iroh::net::endpoint::{RecvStream, SendStream}; +use serde::{Deserialize, Serialize}; +use smallvec::SmallVec; +use std::pin::Pin; +use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf}; +use tokio::sync::mpsc; +use web_time::Instant; + +/// The event emitted by the Handler. This informs the behaviour of various events created +/// by the handler. +#[derive(Debug, Serialize, Deserialize)] +pub enum HandlerEvent { + /// A GossipsubRPC message has been received. This also contains a list of invalid messages (if + /// any) that were received. + Message { + /// The GossipsubRPC message excluding any invalid messages. + rpc: types::Rpc, + /// Any invalid messages that were received in the RPC, along with the associated + /// validation error. + invalid_messages: Vec<(RawMessage, ValidationError)>, + }, +} + +#[derive(Debug)] +pub struct Stream { + send: SendStream, + recv: RecvStream, +} + +impl AsyncWrite for Stream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + Pin::new(&mut self.send).poll_write(cx, buf) + } + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.send).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.send).poll_shutdown(cx) + } +} + +impl AsyncRead for Stream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.recv).poll_read(cx, buf) + } +} + +/// A message sent from the behaviour to the handler. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum HandlerIn { + /// A gossipsub message to send. + Message(types::RpcOut), + /// The peer has joined the mesh. + JoinedMesh, + /// The peer has left the mesh. + LeftMesh, +} + +/// The maximum number of inbound or outbound substreams attempts we allow. +/// +/// Gossipsub is supposed to have a single long-lived inbound and outbound substream. On failure we +/// attempt to recreate these. This imposes an upper bound of new substreams before we consider the +/// connection faulty and disable the handler. This also prevents against potential substream +/// creation loops. +const MAX_SUBSTREAM_ATTEMPTS: usize = 5; + +/// Protocol Handler that manages a single long-lived substream with a peer. +pub struct Handler { + handler_sender: mpsc::Sender, + handler_receiver: mpsc::Receiver, + + /// Upgrade configuration for the gossipsub protocol. + listen_protocol: ProtocolConfig, + + /// The single long-lived outbound substream. + outbound_substream: Option, + + /// The single long-lived inbound substream. + inbound_substream: Option, + + /// Queue of values that we want to send to the remote. + send_queue: SmallVec<[types::Rpc; 16]>, + + /// Flag indicating that an outbound substream is being established to prevent duplicate + /// requests. + outbound_substream_establishing: bool, + + /// The number of outbound substreams we have requested. + outbound_substream_attempts: usize, + + /// The number of inbound substreams that have been created by the peer. + inbound_substream_attempts: usize, + + last_io_activity: Instant, + + /// Keeps track of whether this connection is for a peer in the mesh. This is used to make + /// decisions about the keep alive state for this connection. + in_mesh: bool, +} + +/// State of the inbound substream, opened either by us or by the remote. +enum InboundSubstreamState { + /// Waiting for a message from the remote. The idle state for an inbound substream. + WaitingInput(GossipFramed), + /// The substream is being closed. + Closing(GossipFramed), + /// An error occurred during processing. + Poisoned, +} + +/// State of the outbound substream, opened either by us or by the remote. +enum OutboundSubstreamState { + /// Waiting for the user to send a message. The idle state for an outbound substream. + WaitingOutput(GossipFramed), + /// Waiting to send a message to the remote. + PendingSend(GossipFramed, types::Rpc), + /// Waiting to flush the substream so that the data arrives to the remote. + PendingFlush(GossipFramed), + /// An error occurred during processing. + Poisoned, +} + +impl Handler { + /// Builds a new [`Handler`]. + pub fn new( + protocol_config: ProtocolConfig, + handler_sender: mpsc::Sender, + handler_receiver: mpsc::Receiver, + ) -> Self { + Handler { + handler_sender, + handler_receiver, + listen_protocol: protocol_config, + inbound_substream: None, + outbound_substream: None, + outbound_substream_establishing: false, + outbound_substream_attempts: 0, + inbound_substream_attempts: 0, + send_queue: SmallVec::new(), + last_io_activity: Instant::now(), + in_mesh: false, + } + } + + fn on_fully_negotiated_inbound(&mut self, substream: GossipFramed) { + // new inbound substream. Replace the current one, if it exists. + tracing::trace!("New inbound substream request"); + self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); + } + + fn on_fully_negotiated_outbound(&mut self, substream: GossipFramed) { + assert!( + self.outbound_substream.is_none(), + "Established an outbound substream with one already available" + ); + self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)); + } + + async fn run(mut self) { + // TODO: use select! + + if let Ok(event) = self.handler_receiver.try_recv() { + match event { + HandlerIn::Message(m) => self.send_queue.push(m.into()), + HandlerIn::JoinedMesh => { + self.in_mesh = true; + } + HandlerIn::LeftMesh => { + self.in_mesh = false; + } + } + } + + // process outbound stream + loop { + match std::mem::replace( + &mut self.outbound_substream, + Some(OutboundSubstreamState::Poisoned), + ) { + // outbound idle state + Some(OutboundSubstreamState::WaitingOutput(substream)) => { + if let Some(message) = self.send_queue.pop() { + self.send_queue.shrink_to_fit(); + self.outbound_substream = + Some(OutboundSubstreamState::PendingSend(substream, message)); + continue; + } + + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)); + break; + } + Some(OutboundSubstreamState::PendingSend(mut substream, message)) => { + match substream.send(message).await { + Ok(()) => { + self.outbound_substream = + Some(OutboundSubstreamState::PendingFlush(substream)) + } + Err(e) => { + tracing::debug!("Failed to send message on outbound stream: {e}"); + self.outbound_substream = None; + break; + } + } + } + Some(OutboundSubstreamState::PendingFlush(mut substream)) => { + match substream.flush().await { + Ok(()) => { + self.last_io_activity = Instant::now(); + self.outbound_substream = + Some(OutboundSubstreamState::WaitingOutput(substream)) + } + Err(e) => { + tracing::debug!("Failed to flush outbound stream: {e}"); + self.outbound_substream = None; + break; + } + } + } + None => { + self.outbound_substream = None; + break; + } + Some(OutboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during outbound stream processing") + } + } + } + + loop { + match std::mem::replace( + &mut self.inbound_substream, + Some(InboundSubstreamState::Poisoned), + ) { + // inbound idle state + Some(InboundSubstreamState::WaitingInput(mut substream)) => { + match substream.next().await { + Some(Ok(message)) => { + self.last_io_activity = Instant::now(); + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + self.handler_sender.send(message).await; + break; + } + Some(Err(error)) => { + tracing::debug!("Failed to read from inbound stream: {error}"); + // Close this side of the stream. If the + // peer is still around, they will re-establish their + // outbound stream i.e. our inbound stream. + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + // peer closed the stream + None => { + tracing::debug!("Inbound stream closed by remote"); + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + } + } + Some(InboundSubstreamState::Closing(mut substream)) => { + match substream.close().await { + Err(e) => { + // Don't close the connection but just drop the inbound substream. + // In case the remote has more to send, they will open up a new + // substream. + tracing::debug!("Inbound substream error while closing: {e}"); + } + Ok(_) => {} + } + self.inbound_substream = None; + break; + } + None => { + self.inbound_substream = None; + break; + } + Some(InboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during inbound stream processing") + } + } + } + } +} diff --git a/gossipsub/src/lib.rs b/gossipsub/src/lib.rs new file mode 100644 index 0000000..84d8ede --- /dev/null +++ b/gossipsub/src/lib.rs @@ -0,0 +1,115 @@ +//! Implementation of the [Gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md) protocol. +//! +//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon +//! floodsub and meshsub routing protocols. +//! +//! # Overview +//! +//! *Note: The gossipsub protocol specifications +//! () provide an outline for the +//! routing protocol. They should be consulted for further detail.* +//! +//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded +//! degree and amplification factor with the meshsub construction and augments it using gossip +//! propagation of metadata with the randomsub technique. +//! +//! The router maintains an overlay mesh network of peers on which to efficiently send messages and +//! metadata. Peers use control messages to broadcast and request known messages and +//! subscribe/unsubscribe from topics in the mesh network. +//! +//! # Important Discrepancies +//! +//! This section outlines the current implementation's potential discrepancies from that of other +//! implementations, due to undefined elements in the current specification. +//! +//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. +//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this +//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 +//! encoded) by setting the `hash_topics` configuration parameter to true. +//! +//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in +//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned +//! integers. When messages are signed, they are monotonically increasing integers starting from a +//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. +//! NOTE: These numbers are sequential in the current go implementation. +//! +//! # Peer Discovery +//! +//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which +//! peers in a p2p network exchange information about each other among other reasons to become resistant +//! against the failure or replacement of the +//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. +//! +//! Peer +//! discovery can e.g. be implemented with the help of the [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) protocol +//! in combination with the [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol. See the +//! Kademlia implementation documentation for more information. +//! +//! # Using Gossipsub +//! +//! ## Gossipsub Config +//! +//! The [`Config`] struct specifies various network performance/tuning configuration +//! parameters. Specifically it specifies: +//! +//! [`Config`]: struct.Config.html +//! +//! This struct implements the [`Default`] trait and can be initialised via +//! [`Config::default()`]. +//! +//! +//! ## Behaviour +//! +//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to +//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of +//! [`PeerId`](libp2p_identity::PeerId) and [`Config`]. +//! +//! [`Behaviour`]: struct.Behaviour.html + +//! ## Example +//! +//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat). + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod backoff; +mod behaviour; +mod config; +mod error; +mod gossip_promises; +mod handler; +mod mcache; +mod metrics; +mod peer_score; +mod protocol; +// mod rpc_proto; +mod subscription_filter; +mod time_cache; +mod topic; +mod transform; +mod types; + +pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; +pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; +pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; +pub use self::protocol::GOSSIPSUB_1_1_0_PROTOCOL; +// pub use self::metrics::Config as MetricsConfig; +pub use self::peer_score::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, +}; +pub use self::subscription_filter::{ + AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, + MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, + WhitelistSubscriptionFilter, +}; +pub use self::topic::{Hasher, Topic, TopicHash}; +pub use self::transform::{DataTransform, IdentityTransform}; +pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage}; + +#[deprecated(note = "Will be removed from the public API.")] +pub type Rpc = self::types::Rpc; + +pub type IdentTopic = Topic; +pub type Sha256Topic = Topic; diff --git a/gossipsub/src/mcache.rs b/gossipsub/src/mcache.rs new file mode 100644 index 0000000..3d8d156 --- /dev/null +++ b/gossipsub/src/mcache.rs @@ -0,0 +1,367 @@ +use iroh::net::NodeId; + +use crate::topic::TopicHash; +use crate::types::{MessageId, RawMessage}; +use std::collections::hash_map::Entry; +use std::fmt::Debug; +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +/// CacheEntry stored in the history. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct CacheEntry { + mid: MessageId, + topic: TopicHash, +} + +/// MessageCache struct holding history of messages. +#[derive(Clone)] +pub(crate) struct MessageCache { + msgs: HashMap)>, + /// For every message and peer the number of times this peer asked for the message + iwant_counts: HashMap>, + history: Vec>, + /// The number of indices in the cache history used for gossiping. That means that a message + /// won't get gossiped anymore when shift got called `gossip` many times after inserting the + /// message in the cache. + gossip: usize, +} + +impl fmt::Debug for MessageCache { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MessageCache") + .field("msgs", &self.msgs) + .field("history", &self.history) + .field("gossip", &self.gossip) + .finish() + } +} + +/// Implementation of the MessageCache. +impl MessageCache { + pub(crate) fn new(gossip: usize, history_capacity: usize) -> Self { + MessageCache { + gossip, + msgs: HashMap::default(), + iwant_counts: HashMap::default(), + history: vec![Vec::new(); history_capacity], + } + } + + /// Put a message into the memory cache. + /// + /// Returns true if the message didn't already exist in the cache. + pub(crate) fn put(&mut self, message_id: &MessageId, msg: RawMessage) -> bool { + match self.msgs.entry(message_id.clone()) { + Entry::Occupied(_) => { + // Don't add duplicate entries to the cache. + false + } + Entry::Vacant(entry) => { + let cache_entry = CacheEntry { + mid: message_id.clone(), + topic: msg.topic.clone(), + }; + entry.insert((msg, HashSet::default())); + self.history[0].push(cache_entry); + + tracing::trace!(message=?message_id, "Put message in mcache"); + true + } + } + } + + /// Keeps track of peers we know have received the message to prevent forwarding to said peers. + pub(crate) fn observe_duplicate(&mut self, message_id: &MessageId, source: &NodeId) { + if let Some((message, originating_peers)) = self.msgs.get_mut(message_id) { + // if the message is already validated, we don't need to store extra peers sending us + // duplicates as the message has already been forwarded + if message.validated { + return; + } + + originating_peers.insert(*source); + } + } + + /// Get a message with `message_id` + #[cfg(test)] + pub(crate) fn get(&self, message_id: &MessageId) -> Option<&RawMessage> { + self.msgs.get(message_id).map(|(message, _)| message) + } + + /// Increases the iwant count for the given message by one and returns the message together + /// with the iwant if the message exists. + pub(crate) fn get_with_iwant_counts( + &mut self, + message_id: &MessageId, + peer: &NodeId, + ) -> Option<(&RawMessage, u32)> { + let iwant_counts = &mut self.iwant_counts; + self.msgs.get(message_id).and_then(|(message, _)| { + if !message.validated { + None + } else { + Some((message, { + let count = iwant_counts + .entry(message_id.clone()) + .or_default() + .entry(*peer) + .or_default(); + *count += 1; + *count + })) + } + }) + } + + /// Gets a message with [`MessageId`] and tags it as validated. + /// This function also returns the known peers that have sent us this message. This is used to + /// prevent us sending redundant messages to peers who have already propagated it. + pub(crate) fn validate( + &mut self, + message_id: &MessageId, + ) -> Option<(&RawMessage, HashSet)> { + self.msgs.get_mut(message_id).map(|(message, known_peers)| { + message.validated = true; + // Clear the known peers list (after a message is validated, it is forwarded and we no + // longer need to store the originating peers). + let originating_peers = std::mem::take(known_peers); + (&*message, originating_peers) + }) + } + + /// Get a list of [`MessageId`]s for a given topic. + pub(crate) fn get_gossip_message_ids(&self, topic: &TopicHash) -> Vec { + self.history[..self.gossip] + .iter() + .fold(vec![], |mut current_entries, entries| { + // search for entries with desired topic + let mut found_entries: Vec = entries + .iter() + .filter_map(|entry| { + if &entry.topic == topic { + let mid = &entry.mid; + // Only gossip validated messages + if let Some(true) = self.msgs.get(mid).map(|(msg, _)| msg.validated) { + Some(mid.clone()) + } else { + None + } + } else { + None + } + }) + .collect(); + + // generate the list + current_entries.append(&mut found_entries); + current_entries + }) + } + + /// Shift the history array down one and delete messages associated with the + /// last entry. + pub(crate) fn shift(&mut self) { + for entry in self.history.pop().expect("history is always > 1") { + if let Some((msg, _)) = self.msgs.remove(&entry.mid) { + if !msg.validated { + // If GossipsubConfig::validate_messages is true, the implementing + // application has to ensure that Gossipsub::validate_message gets called for + // each received message within the cache timeout time." + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." + ); + } + } + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); + + self.iwant_counts.remove(&entry.mid); + } + + // Insert an empty vec in position 0 + self.history.insert(0, Vec::new()); + } + + /// Removes a message from the cache and returns it if existent + pub(crate) fn remove( + &mut self, + message_id: &MessageId, + ) -> Option<(RawMessage, HashSet)> { + //We only remove the message from msgs and iwant_count and keep the message_id in the + // history vector. Zhe id in the history vector will simply be ignored on popping. + + self.iwant_counts.remove(message_id); + self.msgs.remove(message_id) + } +} + +#[cfg(test)] +mod tests { + use iroh::net::key::SecretKey; + + use super::*; + use crate::IdentTopic as Topic; + + fn gen_testm(x: u64, topic: TopicHash) -> (MessageId, RawMessage) { + let default_id = |message: &RawMessage| { + // default message id is: source + sequence number + let mut source_string = message.source.as_ref().unwrap().to_string(); + source_string.push_str(&message.sequence_number.unwrap().to_string()); + MessageId::from(source_string) + }; + let u8x: u8 = x as u8; + let source = Some(SecretKey::generate().public()); + let data: Vec = vec![u8x]; + let sequence_number = Some(x); + + let m = RawMessage { + source, + data, + sequence_number, + topic, + signature: None, + validated: false, + }; + + let id = default_id(&m); + (id, m) + } + + fn new_cache(gossip_size: usize, history: usize) -> MessageCache { + MessageCache::new(gossip_size, history) + } + + #[test] + /// Test that the message cache can be created. + fn test_new_cache() { + let x: usize = 3; + let mc = new_cache(x, 5); + + assert_eq!(mc.gossip, x); + } + + #[test] + /// Test you can put one message and get one. + fn test_put_get_one() { + let mut mc = new_cache(10, 15); + + let topic1_hash = Topic::new("topic1").hash(); + let (id, m) = gen_testm(10, topic1_hash); + + mc.put(&id, m.clone()); + + assert_eq!(mc.history[0].len(), 1); + + let fetched = mc.get(&id); + + assert_eq!(fetched.unwrap(), &m); + } + + #[test] + /// Test attempting to 'get' with a wrong id. + fn test_get_wrong() { + let mut mc = new_cache(10, 15); + + let topic1_hash = Topic::new("topic1").hash(); + let (id, m) = gen_testm(10, topic1_hash); + + mc.put(&id, m); + + // Try to get an incorrect ID + let wrong_id = MessageId::new(b"wrongid"); + let fetched = mc.get(&wrong_id); + assert!(fetched.is_none()); + } + + #[test] + /// Test attempting to 'get' empty message cache. + fn test_get_empty() { + let mc = new_cache(10, 15); + + // Try to get an incorrect ID + let wrong_string = MessageId::new(b"imempty"); + let fetched = mc.get(&wrong_string); + assert!(fetched.is_none()); + } + + #[test] + /// Test shift mechanism. + fn test_shift() { + let mut mc = new_cache(1, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + mc.shift(); + + // Ensure the shift occurred + assert!(mc.history[0].is_empty()); + assert!(mc.history[1].len() == 10); + + // Make sure no messages deleted + assert!(mc.msgs.len() == 10); + } + + #[test] + /// Test Shift with no additions. + fn test_empty_shift() { + let mut mc = new_cache(1, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + mc.shift(); + + // Ensure the shift occurred + assert!(mc.history[0].is_empty()); + assert!(mc.history[1].len() == 10); + + mc.shift(); + + assert!(mc.history[2].len() == 10); + assert!(mc.history[1].is_empty()); + assert!(mc.history[0].is_empty()); + } + + #[test] + /// Test shift to see if the last history messages are removed. + fn test_remove_last_from_shift() { + let mut mc = new_cache(4, 5); + + let topic1_hash = Topic::new("topic1").hash(); + + // Build the message + for i in 0..10 { + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); + } + + // Shift right until deleting messages + mc.shift(); + mc.shift(); + mc.shift(); + mc.shift(); + + assert_eq!(mc.history[mc.history.len() - 1].len(), 10); + + // Shift and delete the messages + mc.shift(); + assert_eq!(mc.history[mc.history.len() - 1].len(), 0); + assert_eq!(mc.history[0].len(), 0); + assert_eq!(mc.msgs.len(), 0); + } +} diff --git a/gossipsub/src/metrics.rs b/gossipsub/src/metrics.rs new file mode 100644 index 0000000..c6bba75 --- /dev/null +++ b/gossipsub/src/metrics.rs @@ -0,0 +1,583 @@ +//! A set of metrics used to help track and diagnose the network behaviour of the gossipsub +//! protocol. + +use std::collections::HashMap; + +use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; +use prometheus_client::metrics::counter::Counter; +use prometheus_client::metrics::family::{Family, MetricConstructor}; +use prometheus_client::metrics::gauge::Gauge; +use prometheus_client::metrics::histogram::{linear_buckets, Histogram}; +use prometheus_client::registry::Registry; + +use crate::topic::TopicHash; +use crate::types::MessageAcceptance; + +// Default value that limits for how many topics do we store metrics. +const DEFAULT_MAX_TOPICS: usize = 300; + +// Default value that limits how many topics for which there has never been a subscription do we +// store metrics. +const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 50; + +#[derive(Debug, Clone)] +pub struct Config { + /// This provides an upper bound to the number of mesh topics we create metrics for. It + /// prevents unbounded labels being created in the metrics. + pub max_topics: usize, + /// Mesh topics are controlled by the user via subscriptions whereas non-mesh topics are + /// determined by users on the network. This limit permits a fixed amount of topics to allow, + /// in-addition to the mesh topics. + pub max_never_subscribed_topics: usize, + /// Buckets used for the score histograms. + pub score_buckets: Vec, +} + +impl Config { + /// Create buckets for the score histograms based on score thresholds. + pub fn buckets_using_scoring_thresholds(&mut self, params: &crate::PeerScoreThresholds) { + self.score_buckets = vec![ + params.graylist_threshold, + params.publish_threshold, + params.gossip_threshold, + params.gossip_threshold / 2.0, + params.gossip_threshold / 4.0, + 0.0, + 1.0, + 10.0, + 100.0, + ]; + } +} + +impl Default for Config { + fn default() -> Self { + // Some sensible defaults + let gossip_threshold = -4000.0; + let publish_threshold = -8000.0; + let graylist_threshold = -16000.0; + let score_buckets: Vec = vec![ + graylist_threshold, + publish_threshold, + gossip_threshold, + gossip_threshold / 2.0, + gossip_threshold / 4.0, + 0.0, + 1.0, + 10.0, + 100.0, + ]; + Config { + max_topics: DEFAULT_MAX_TOPICS, + max_never_subscribed_topics: DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS, + score_buckets, + } + } +} + +/// Whether we have ever been subscribed to this topic. +type EverSubscribed = bool; + +/// A collection of metrics used throughout the Gossipsub behaviour. +pub(crate) struct Metrics { + /* Configuration parameters */ + /// Maximum number of topics for which we store metrics. This helps keep the metrics bounded. + max_topics: usize, + /// Maximum number of topics for which we store metrics, where the topic in not one to which we + /// have subscribed at some point. This helps keep the metrics bounded, since these topics come + /// from received messages and not explicit application subscriptions. + max_never_subscribed_topics: usize, + + /* Auxiliary variables */ + /// Information needed to decide if a topic is allowed or not. + topic_info: HashMap, + + /* Metrics per known topic */ + /// Status of our subscription to this topic. This metric allows analyzing other topic metrics + /// filtered by our current subscription status. + topic_subscription_status: Family, + /// Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour + /// regardless of our subscription status. + topic_peers_count: Family, + /// The number of invalid messages received for a given topic. + invalid_messages: Family, + /// The number of messages accepted by the application (validation result). + accepted_messages: Family, + /// The number of messages ignored by the application (validation result). + ignored_messages: Family, + /// The number of messages rejected by the application (validation result). + rejected_messages: Family, + + /* Metrics regarding mesh state */ + /// Number of peers in our mesh. This metric should be updated with the count of peers for a + /// topic in the mesh regardless of inclusion and churn events. + mesh_peer_counts: Family, + /// Number of times we include peers in a topic mesh for different reasons. + mesh_peer_inclusion_events: Family, + /// Number of times we remove peers in a topic mesh for different reasons. + mesh_peer_churn_events: Family, + + /* Metrics regarding messages sent/received */ + /// Number of gossip messages sent to each topic. + topic_msg_sent_counts: Family, + /// Bytes from gossip messages sent to each topic. + topic_msg_sent_bytes: Family, + /// Number of gossipsub messages published to each topic. + topic_msg_published: Family, + + /// Number of gossipsub messages received on each topic (without filtering duplicates). + topic_msg_recv_counts_unfiltered: Family, + /// Number of gossipsub messages received on each topic (after filtering duplicates). + topic_msg_recv_counts: Family, + /// Bytes received from gossip messages for each topic. + topic_msg_recv_bytes: Family, + + /* Metrics related to scoring */ + /// Histogram of the scores for each mesh topic. + score_per_mesh: Family, + /// A counter of the kind of penalties being applied to peers. + scoring_penalties: Family, + + /* General Metrics */ + /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based + /// on which protocol they support. This metric keeps track of the number of peers that are + /// connected of each type. + peers_per_protocol: Family, + /// The time it takes to complete one iteration of the heartbeat. + heartbeat_duration: Histogram, + + /* Performance metrics */ + /// When the user validates a message, it tries to re propagate it to its mesh peers. If the + /// message expires from the memcache before it can be validated, we count this a cache miss + /// and it is an indicator that the memcache size should be increased. + memcache_misses: Counter, + /// The number of times we have decided that an IWANT control message is required for this + /// topic. A very high metric might indicate an underperforming network. + topic_iwant_msgs: Family, +} + +impl Metrics { + pub(crate) fn new(registry: &mut Registry, config: Config) -> Self { + // Destructure the config to be sure everything is used. + let Config { + max_topics, + max_never_subscribed_topics, + score_buckets, + } = config; + + macro_rules! register_family { + ($name:expr, $help:expr) => {{ + let fam = Family::default(); + registry.register($name, $help, fam.clone()); + fam + }}; + } + + let topic_subscription_status = register_family!( + "topic_subscription_status", + "Subscription status per known topic" + ); + let topic_peers_count = register_family!( + "topic_peers_counts", + "Number of peers subscribed to each topic" + ); + + let invalid_messages = register_family!( + "invalid_messages_per_topic", + "Number of invalid messages received for each topic" + ); + + let accepted_messages = register_family!( + "accepted_messages_per_topic", + "Number of accepted messages received for each topic" + ); + + let ignored_messages = register_family!( + "ignored_messages_per_topic", + "Number of ignored messages received for each topic" + ); + + let rejected_messages = register_family!( + "rejected_messages_per_topic", + "Number of rejected messages received for each topic" + ); + + let mesh_peer_counts = register_family!( + "mesh_peer_counts", + "Number of peers in each topic in our mesh" + ); + let mesh_peer_inclusion_events = register_family!( + "mesh_peer_inclusion_events", + "Number of times a peer gets added to our mesh for different reasons" + ); + let mesh_peer_churn_events = register_family!( + "mesh_peer_churn_events", + "Number of times a peer gets removed from our mesh for different reasons" + ); + let topic_msg_sent_counts = register_family!( + "topic_msg_sent_counts", + "Number of gossip messages sent to each topic" + ); + let topic_msg_published = register_family!( + "topic_msg_published", + "Number of gossip messages published to each topic" + ); + let topic_msg_sent_bytes = register_family!( + "topic_msg_sent_bytes", + "Bytes from gossip messages sent to each topic" + ); + + let topic_msg_recv_counts_unfiltered = register_family!( + "topic_msg_recv_counts_unfiltered", + "Number of gossip messages received on each topic (without duplicates being filtered)" + ); + + let topic_msg_recv_counts = register_family!( + "topic_msg_recv_counts", + "Number of gossip messages received on each topic (after duplicates have been filtered)" + ); + let topic_msg_recv_bytes = register_family!( + "topic_msg_recv_bytes", + "Bytes received from gossip messages for each topic" + ); + + let hist_builder = HistBuilder { + buckets: score_buckets, + }; + + let score_per_mesh: Family<_, _, HistBuilder> = Family::new_with_constructor(hist_builder); + registry.register( + "score_per_mesh", + "Histogram of scores per mesh topic", + score_per_mesh.clone(), + ); + + let scoring_penalties = register_family!( + "scoring_penalties", + "Counter of types of scoring penalties given to peers" + ); + let peers_per_protocol = register_family!( + "peers_per_protocol", + "Number of connected peers by protocol type" + ); + + let heartbeat_duration = Histogram::new(linear_buckets(0.0, 50.0, 10)); + registry.register( + "heartbeat_duration", + "Histogram of observed heartbeat durations", + heartbeat_duration.clone(), + ); + + let topic_iwant_msgs = register_family!( + "topic_iwant_msgs", + "Number of times we have decided an IWANT is required for this topic" + ); + let memcache_misses = { + let metric = Counter::default(); + registry.register( + "memcache_misses", + "Number of times a message is not found in the duplicate cache when validating", + metric.clone(), + ); + metric + }; + + Self { + max_topics, + max_never_subscribed_topics, + topic_info: HashMap::default(), + topic_subscription_status, + topic_peers_count, + invalid_messages, + accepted_messages, + ignored_messages, + rejected_messages, + mesh_peer_counts, + mesh_peer_inclusion_events, + mesh_peer_churn_events, + topic_msg_sent_counts, + topic_msg_sent_bytes, + topic_msg_published, + topic_msg_recv_counts_unfiltered, + topic_msg_recv_counts, + topic_msg_recv_bytes, + score_per_mesh, + scoring_penalties, + peers_per_protocol, + heartbeat_duration, + memcache_misses, + topic_iwant_msgs, + } + } + + fn non_subscription_topics_count(&self) -> usize { + self.topic_info + .values() + .filter(|&ever_subscribed| !ever_subscribed) + .count() + } + + /// Registers a topic if not already known and if the bounds allow it. + fn register_topic(&mut self, topic: &TopicHash) -> Result<(), ()> { + if self.topic_info.contains_key(topic) { + Ok(()) + } else if self.topic_info.len() < self.max_topics + && self.non_subscription_topics_count() < self.max_never_subscribed_topics + { + // This is a topic without an explicit subscription and we register it if we are within + // the configured bounds. + self.topic_info.entry(topic.clone()).or_insert(false); + self.topic_subscription_status.get_or_create(topic).set(0); + Ok(()) + } else { + // We don't know this topic and there is no space left to store it + Err(()) + } + } + + /// Register how many peers do we known are subscribed to this topic. + pub(crate) fn set_topic_peers(&mut self, topic: &TopicHash, count: usize) { + if self.register_topic(topic).is_ok() { + self.topic_peers_count + .get_or_create(topic) + .set(count as i64); + } + } + + /* Mesh related methods */ + + /// Registers the subscription to a topic if the configured limits allow it. + /// Sets the registered number of peers in the mesh to 0. + pub(crate) fn joined(&mut self, topic: &TopicHash) { + if self.topic_info.contains_key(topic) || self.topic_info.len() < self.max_topics { + self.topic_info.insert(topic.clone(), true); + let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(1); + debug_assert_eq!(was_subscribed, 0); + self.mesh_peer_counts.get_or_create(topic).set(0); + } + } + + /// Registers the unsubscription to a topic if the topic was previously allowed. + /// Sets the registered number of peers in the mesh to 0. + pub(crate) fn left(&mut self, topic: &TopicHash) { + if self.topic_info.contains_key(topic) { + // Depending on the configured topic bounds we could miss a mesh topic. + // So, check first if the topic was previously allowed. + let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(0); + debug_assert_eq!(was_subscribed, 1); + self.mesh_peer_counts.get_or_create(topic).set(0); + } + } + + /// Register the inclusion of peers in our mesh due to some reason. + pub(crate) fn peers_included(&mut self, topic: &TopicHash, reason: Inclusion, count: usize) { + if self.register_topic(topic).is_ok() { + self.mesh_peer_inclusion_events + .get_or_create(&InclusionLabel { + hash: topic.to_string(), + reason, + }) + .inc_by(count as u64); + } + } + + /// Register the removal of peers in our mesh due to some reason. + pub(crate) fn peers_removed(&mut self, topic: &TopicHash, reason: Churn, count: usize) { + if self.register_topic(topic).is_ok() { + self.mesh_peer_churn_events + .get_or_create(&ChurnLabel { + hash: topic.to_string(), + reason, + }) + .inc_by(count as u64); + } + } + + /// Register the current number of peers in our mesh for this topic. + pub(crate) fn set_mesh_peers(&mut self, topic: &TopicHash, count: usize) { + if self.register_topic(topic).is_ok() { + // Due to limits, this topic could have not been allowed, so we check. + self.mesh_peer_counts.get_or_create(topic).set(count as i64); + } + } + + /// Register that an invalid message was received on a specific topic. + pub(crate) fn register_invalid_message(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.invalid_messages.get_or_create(topic).inc(); + } + } + + /// Register a score penalty. + pub(crate) fn register_score_penalty(&mut self, penalty: Penalty) { + self.scoring_penalties + .get_or_create(&PenaltyLabel { penalty }) + .inc(); + } + + /// Registers that a message was published on a specific topic. + pub(crate) fn register_published_message(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_msg_published.get_or_create(topic).inc(); + } + } + + /// Register sending a message over a topic. + pub(crate) fn msg_sent(&mut self, topic: &TopicHash, bytes: usize) { + if self.register_topic(topic).is_ok() { + self.topic_msg_sent_counts.get_or_create(topic).inc(); + self.topic_msg_sent_bytes + .get_or_create(topic) + .inc_by(bytes as u64); + } + } + + /// Register that a message was received (and was not a duplicate). + pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_msg_recv_counts.get_or_create(topic).inc(); + } + } + + /// Register that a message was received (could have been a duplicate). + pub(crate) fn msg_recvd_unfiltered(&mut self, topic: &TopicHash, bytes: usize) { + if self.register_topic(topic).is_ok() { + self.topic_msg_recv_counts_unfiltered + .get_or_create(topic) + .inc(); + self.topic_msg_recv_bytes + .get_or_create(topic) + .inc_by(bytes as u64); + } + } + + pub(crate) fn register_msg_validation( + &mut self, + topic: &TopicHash, + validation: &MessageAcceptance, + ) { + if self.register_topic(topic).is_ok() { + match validation { + MessageAcceptance::Accept => self.accepted_messages.get_or_create(topic).inc(), + MessageAcceptance::Ignore => self.ignored_messages.get_or_create(topic).inc(), + MessageAcceptance::Reject => self.rejected_messages.get_or_create(topic).inc(), + }; + } + } + + /// Register a memcache miss. + pub(crate) fn memcache_miss(&mut self) { + self.memcache_misses.inc(); + } + + /// Register sending an IWANT msg for this topic. + pub(crate) fn register_iwant(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.topic_iwant_msgs.get_or_create(topic).inc(); + } + } + + /// Observes a heartbeat duration. + pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) { + self.heartbeat_duration.observe(millis as f64); + } + + /// Observe a score of a mesh peer. + pub(crate) fn observe_mesh_peers_score(&mut self, topic: &TopicHash, score: f64) { + if self.register_topic(topic).is_ok() { + self.score_per_mesh.get_or_create(topic).observe(score); + } + } + + /// Register a new peers connection based on its protocol. + pub(crate) fn peer_protocol_connected(&mut self) { + self.peers_per_protocol + .get_or_create(&ProtocolLabel {}) + .inc(); + } + + /// Removes a peer from the counter based on its protocol when it disconnects. + pub(crate) fn peer_protocol_disconnected(&mut self) { + let metric = self.peers_per_protocol.get_or_create(&ProtocolLabel {}); + if metric.get() != 0 { + // decrement the counter + metric.set(metric.get() - 1); + } + } +} + +/// Reasons why a peer was included in the mesh. +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Inclusion { + /// Peer was a fanaout peer. + Fanout, + /// Included from random selection. + Random, + /// Peer subscribed. + Subscribed, + /// Peer was included to fill the outbound quota. + Outbound, +} + +/// Reasons why a peer was removed from the mesh. +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Churn { + /// Peer disconnected. + Dc, + /// Peer had a bad score. + BadScore, + /// Peer sent a PRUNE. + Prune, + /// Peer unsubscribed. + Unsub, + /// Too many peers. + Excess, +} + +/// Kinds of reasons a peer's score has been penalized +#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)] +pub(crate) enum Penalty { + /// A peer grafted before waiting the back-off time. + GraftBackoff, + /// A Peer did not respond to an IWANT request in time. + BrokenPromise, + /// A Peer did not send enough messages as expected. + MessageDeficit, + /// Too many peers under one IP address. + IPColocation, +} + +/// Label for the mesh inclusion event metrics. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct InclusionLabel { + hash: String, + reason: Inclusion, +} + +/// Label for the mesh churn event metrics. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct ChurnLabel { + hash: String, + reason: Churn, +} + +/// Label for the kinds of protocols peers can connect as. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct ProtocolLabel {} + +/// Label for the kinds of scoring penalties that can occur +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct PenaltyLabel { + penalty: Penalty, +} + +#[derive(Clone)] +struct HistBuilder { + buckets: Vec, +} + +impl MetricConstructor for HistBuilder { + fn new_metric(&self) -> Histogram { + Histogram::new(self.buckets.clone().into_iter()) + } +} diff --git a/gossipsub/src/peer_score.rs b/gossipsub/src/peer_score.rs new file mode 100644 index 0000000..f75d81c --- /dev/null +++ b/gossipsub/src/peer_score.rs @@ -0,0 +1,898 @@ +//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour. + +// use crate::metrics::{Metrics, Penalty}; +use crate::time_cache::TimeCache; +use crate::{MessageId, TopicHash}; +use iroh::net::NodeId; +use std::collections::{hash_map, HashMap, HashSet}; +use std::net::IpAddr; +use std::time::Duration; +use web_time::Instant; + +mod params; +use crate::ValidationError; +pub use params::{ + score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, + TopicScoreParams, +}; + +#[cfg(test)] +mod tests; + +/// The number of seconds delivery messages are stored in the cache. +const TIME_CACHE_DURATION: u64 = 120; + +#[derive(Debug)] +pub(crate) struct PeerScore { + params: PeerScoreParams, + /// The score parameters. + peer_stats: HashMap, + /// Tracking peers per IP. + peer_ips: HashMap>, + /// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s. + deliveries: TimeCache, + /// callback for monitoring message delivery times + message_delivery_time_callback: Option, +} + +/// General statistics for a given gossipsub peer. +#[derive(Debug)] +struct PeerStats { + /// Connection status of the peer. + status: ConnectionStatus, + /// Stats per topic. + topics: HashMap, + /// IP tracking for individual peers. + known_ips: HashSet, + /// Behaviour penalty that is applied to the peer, assigned by the behaviour. + behaviour_penalty: f64, + /// Application specific score. Can be manipulated by calling PeerScore::set_application_score + application_score: f64, +} + +#[derive(Debug)] +enum ConnectionStatus { + /// The peer is connected. + Connected, + /// The peer is disconnected + Disconnected { + /// Expiration time of the score state for disconnected peers. + expire: Instant, + }, +} + +impl Default for PeerStats { + fn default() -> Self { + PeerStats { + status: ConnectionStatus::Connected, + topics: HashMap::new(), + known_ips: HashSet::new(), + behaviour_penalty: 0f64, + application_score: 0f64, + } + } +} + +impl PeerStats { + /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the + /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + pub(crate) fn stats_or_default_mut( + &mut self, + topic_hash: TopicHash, + params: &PeerScoreParams, + ) -> Option<&mut TopicStats> { + if params.topics.contains_key(&topic_hash) { + Some(self.topics.entry(topic_hash).or_default()) + } else { + self.topics.get_mut(&topic_hash) + } + } +} + +/// Stats assigned to peer for each topic. +#[derive(Debug)] +struct TopicStats { + mesh_status: MeshStatus, + /// Number of first message deliveries. + first_message_deliveries: f64, + /// True if the peer has been in the mesh for enough time to activate mesh message deliveries. + mesh_message_deliveries_active: bool, + /// Number of message deliveries from the mesh. + mesh_message_deliveries: f64, + /// Mesh rate failure penalty. + mesh_failure_penalty: f64, + /// Invalid message counter. + invalid_message_deliveries: f64, +} + +impl TopicStats { + /// Returns true if the peer is in the `mesh`. + pub(crate) fn in_mesh(&self) -> bool { + matches!(self.mesh_status, MeshStatus::Active { .. }) + } +} + +/// Status defining a peer's inclusion in the mesh and associated parameters. +#[derive(Debug)] +enum MeshStatus { + Active { + /// The time the peer was last GRAFTed; + graft_time: Instant, + /// The time the peer has been in the mesh. + mesh_time: Duration, + }, + InActive, +} + +impl MeshStatus { + /// Initialises a new [`MeshStatus::Active`] mesh status. + pub(crate) fn new_active() -> Self { + MeshStatus::Active { + graft_time: Instant::now(), + mesh_time: Duration::from_secs(0), + } + } +} + +impl Default for TopicStats { + fn default() -> Self { + TopicStats { + mesh_status: MeshStatus::InActive, + first_message_deliveries: Default::default(), + mesh_message_deliveries_active: Default::default(), + mesh_message_deliveries: Default::default(), + mesh_failure_penalty: Default::default(), + invalid_message_deliveries: Default::default(), + } + } +} + +#[derive(PartialEq, Debug)] +struct DeliveryRecord { + status: DeliveryStatus, + first_seen: Instant, + peers: HashSet, +} + +#[derive(PartialEq, Debug)] +enum DeliveryStatus { + /// Don't know (yet) if the message is valid. + Unknown, + /// The message is valid together with the validated time. + Valid(Instant), + /// The message is invalid. + Invalid, + /// Instructed by the validator to ignore the message. + Ignored, +} + +impl Default for DeliveryRecord { + fn default() -> Self { + DeliveryRecord { + status: DeliveryStatus::Unknown, + first_seen: Instant::now(), + peers: HashSet::new(), + } + } +} + +impl PeerScore { + /// Creates a new [`PeerScore`] using a given set of peer scoring parameters. + #[allow(dead_code)] + pub(crate) fn new(params: PeerScoreParams) -> Self { + Self::new_with_message_delivery_time_callback(params, None) + } + + pub(crate) fn new_with_message_delivery_time_callback( + params: PeerScoreParams, + callback: Option, + ) -> Self { + PeerScore { + params, + peer_stats: HashMap::new(), + peer_ips: HashMap::new(), + deliveries: TimeCache::new(Duration::from_secs(TIME_CACHE_DURATION)), + message_delivery_time_callback: callback, + } + } + + /// Returns the score for a peer + pub(crate) fn score(&self, peer_id: &NodeId) -> f64 { + todo!() + // self.metric_score(peer_id, None) + } + + // TODO + // /// Returns the score for a peer, logging metrics. This is called from the heartbeat and + // /// increments the metric counts for penalties. + // pub(crate) fn metric_score(&self, peer_id: &NodeId, mut metrics: Option<&mut Metrics>) -> f64 { + // let Some(peer_stats) = self.peer_stats.get(peer_id) else { + // return 0.0; + // }; + // let mut score = 0.0; + + // // topic scores + // for (topic, topic_stats) in peer_stats.topics.iter() { + // // topic parameters + // if let Some(topic_params) = self.params.topics.get(topic) { + // // we are tracking the topic + + // // the topic score + // let mut topic_score = 0.0; + + // // P1: time in mesh + // if let MeshStatus::Active { mesh_time, .. } = topic_stats.mesh_status { + // let p1 = { + // let v = mesh_time.as_secs_f64() + // / topic_params.time_in_mesh_quantum.as_secs_f64(); + // if v < topic_params.time_in_mesh_cap { + // v + // } else { + // topic_params.time_in_mesh_cap + // } + // }; + // topic_score += p1 * topic_params.time_in_mesh_weight; + // } + + // // P2: first message deliveries + // let p2 = { + // let v = topic_stats.first_message_deliveries; + // if v < topic_params.first_message_deliveries_cap { + // v + // } else { + // topic_params.first_message_deliveries_cap + // } + // }; + // topic_score += p2 * topic_params.first_message_deliveries_weight; + + // // P3: mesh message deliveries + // if topic_stats.mesh_message_deliveries_active + // && topic_stats.mesh_message_deliveries + // < topic_params.mesh_message_deliveries_threshold + // { + // let deficit = topic_params.mesh_message_deliveries_threshold + // - topic_stats.mesh_message_deliveries; + // let p3 = deficit * deficit; + // topic_score += p3 * topic_params.mesh_message_deliveries_weight; + // if let Some(metrics) = metrics.as_mut() { + // metrics.register_score_penalty(Penalty::MessageDeficit); + // } + // tracing::debug!( + // peer=%peer_id, + // %topic, + // %deficit, + // penalty=%topic_score, + // "[Penalty] The peer has a mesh deliveries deficit and will be penalized" + // ); + // } + + // // P3b: + // // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts. + // let p3b = topic_stats.mesh_failure_penalty; + // topic_score += p3b * topic_params.mesh_failure_penalty_weight; + + // // P4: invalid messages + // // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts. + // let p4 = + // topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries; + // topic_score += p4 * topic_params.invalid_message_deliveries_weight; + + // // update score, mixing with topic weight + // score += topic_score * topic_params.topic_weight; + // } + // } + + // // apply the topic score cap, if any + // if self.params.topic_score_cap > 0f64 && score > self.params.topic_score_cap { + // score = self.params.topic_score_cap; + // } + + // // P5: application-specific score + // let p5 = peer_stats.application_score; + // score += p5 * self.params.app_specific_weight; + + // // P6: IP collocation factor + // for ip in peer_stats.known_ips.iter() { + // if self.params.ip_colocation_factor_whitelist.contains(ip) { + // continue; + // } + + // // P6 has a cliff (ip_colocation_factor_threshold); it's only applied iff + // // at least that many peers are connected to us from that source IP + // // addr. It is quadratic, and the weight is negative (validated by + // // peer_score_params.validate()). + // if let Some(peers_in_ip) = self.peer_ips.get(ip).map(|peers| peers.len()) { + // if (peers_in_ip as f64) > self.params.ip_colocation_factor_threshold { + // let surplus = (peers_in_ip as f64) - self.params.ip_colocation_factor_threshold; + // let p6 = surplus * surplus; + // if let Some(metrics) = metrics.as_mut() { + // metrics.register_score_penalty(Penalty::IPColocation); + // } + // tracing::debug!( + // peer=%peer_id, + // surplus_ip=%ip, + // surplus=%surplus, + // "[Penalty] The peer gets penalized because of too many peers with the same ip" + // ); + // score += p6 * self.params.ip_colocation_factor_weight; + // } + // } + // } + + // // P7: behavioural pattern penalty + // if peer_stats.behaviour_penalty > self.params.behaviour_penalty_threshold { + // let excess = peer_stats.behaviour_penalty - self.params.behaviour_penalty_threshold; + // let p7 = excess * excess; + // score += p7 * self.params.behaviour_penalty_weight; + // } + // score + // } + + pub(crate) fn add_penalty(&mut self, peer_id: &NodeId, count: usize) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" + ); + peer_stats.behaviour_penalty += count as f64; + } + } + + fn remove_ips_for_peer( + peer_stats: &PeerStats, + peer_ips: &mut HashMap>, + peer_id: &NodeId, + ) { + for ip in peer_stats.known_ips.iter() { + if let Some(peer_set) = peer_ips.get_mut(ip) { + peer_set.remove(peer_id); + } + } + } + + pub(crate) fn refresh_scores(&mut self) { + let now = Instant::now(); + let params_ref = &self.params; + let peer_ips_ref = &mut self.peer_ips; + self.peer_stats.retain(|peer_id, peer_stats| { + if let ConnectionStatus::Disconnected { expire } = peer_stats.status { + // has the retention period expired? + if now > expire { + // yes, throw it away (but clean up the IP tracking first) + Self::remove_ips_for_peer(peer_stats, peer_ips_ref, peer_id); + // re address this, use retain or entry + return false; + } + + // we don't decay retained scores, as the peer is not active. + // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, + // unless the retention period has elapsed. + // similarly, a well behaved peer does not lose its score by getting disconnected. + return true; + } + + for (topic, topic_stats) in peer_stats.topics.iter_mut() { + // the topic parameters + if let Some(topic_params) = params_ref.topics.get(topic) { + // decay counters + topic_stats.first_message_deliveries *= + topic_params.first_message_deliveries_decay; + if topic_stats.first_message_deliveries < params_ref.decay_to_zero { + topic_stats.first_message_deliveries = 0.0; + } + topic_stats.mesh_message_deliveries *= + topic_params.mesh_message_deliveries_decay; + if topic_stats.mesh_message_deliveries < params_ref.decay_to_zero { + topic_stats.mesh_message_deliveries = 0.0; + } + topic_stats.mesh_failure_penalty *= topic_params.mesh_failure_penalty_decay; + if topic_stats.mesh_failure_penalty < params_ref.decay_to_zero { + topic_stats.mesh_failure_penalty = 0.0; + } + topic_stats.invalid_message_deliveries *= + topic_params.invalid_message_deliveries_decay; + if topic_stats.invalid_message_deliveries < params_ref.decay_to_zero { + topic_stats.invalid_message_deliveries = 0.0; + } + // update mesh time and activate mesh message delivery parameter if need be + if let MeshStatus::Active { + ref mut mesh_time, + ref mut graft_time, + } = topic_stats.mesh_status + { + *mesh_time = now.duration_since(*graft_time); + if *mesh_time > topic_params.mesh_message_deliveries_activation { + topic_stats.mesh_message_deliveries_active = true; + } + } + } + } + + // decay P7 counter + peer_stats.behaviour_penalty *= params_ref.behaviour_penalty_decay; + if peer_stats.behaviour_penalty < params_ref.decay_to_zero { + peer_stats.behaviour_penalty = 0.0; + } + true + }); + } + + /// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later + /// through add_ip. + pub(crate) fn add_peer(&mut self, peer_id: NodeId) { + let peer_stats = self.peer_stats.entry(peer_id).or_default(); + + // mark the peer as connected + peer_stats.status = ConnectionStatus::Connected; + } + + /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it + pub(crate) fn add_ip(&mut self, peer_id: &NodeId, ip: IpAddr) { + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); + let peer_stats = self.peer_stats.entry(*peer_id).or_default(); + + // Mark the peer as connected (currently the default is connected, but we don't want to + // rely on the default). + peer_stats.status = ConnectionStatus::Connected; + + // Insert the ip + peer_stats.known_ips.insert(ip); + self.peer_ips.entry(ip).or_default().insert(*peer_id); + } + + /// Removes an ip from a peer + pub(crate) fn remove_ip(&mut self, peer_id: &NodeId, ip: &IpAddr) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.known_ips.remove(ip); + if let Some(peer_ids) = self.peer_ips.get_mut(ip) { + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); + peer_ids.remove(peer_id); + } else { + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" + ); + } + } else { + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" + ); + } + } + + /// Removes a peer from the score table. This retains peer statistics if their score is + /// non-positive. + pub(crate) fn remove_peer(&mut self, peer_id: &NodeId) { + // we only retain non-positive scores of peers + if self.score(peer_id) > 0f64 { + if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) { + Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id); + entry.remove(); + } + return; + } + + // if the peer is retained (including it's score) the `first_message_delivery` counters + // are reset to 0 and mesh delivery penalties applied. + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + for (topic, topic_stats) in peer_stats.topics.iter_mut() { + topic_stats.first_message_deliveries = 0f64; + + if let Some(threshold) = self + .params + .topics + .get(topic) + .map(|param| param.mesh_message_deliveries_threshold) + { + if topic_stats.in_mesh() + && topic_stats.mesh_message_deliveries_active + && topic_stats.mesh_message_deliveries < threshold + { + let deficit = threshold - topic_stats.mesh_message_deliveries; + topic_stats.mesh_failure_penalty += deficit * deficit; + } + } + + topic_stats.mesh_status = MeshStatus::InActive; + topic_stats.mesh_message_deliveries_active = false; + } + + peer_stats.status = ConnectionStatus::Disconnected { + expire: Instant::now() + self.params.retain_score, + }; + } + } + + /// Handles scoring functionality as a peer GRAFTs to a topic. + pub(crate) fn graft(&mut self, peer_id: &NodeId, topic: impl Into) { + let topic = topic.into(); + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + // if we are scoring the topic, update the mesh status. + if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic, &self.params) { + topic_stats.mesh_status = MeshStatus::new_active(); + topic_stats.mesh_message_deliveries_active = false; + } + } + } + + /// Handles scoring functionality as a peer PRUNEs from a topic. + pub(crate) fn prune(&mut self, peer_id: &NodeId, topic: TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + // if we are scoring the topic, update the mesh status. + if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic.clone(), &self.params) + { + // sticky mesh delivery rate failure penalty + let threshold = self + .params + .topics + .get(&topic) + .expect("Topic must exist in order for there to be topic stats") + .mesh_message_deliveries_threshold; + if topic_stats.mesh_message_deliveries_active + && topic_stats.mesh_message_deliveries < threshold + { + let deficit = threshold - topic_stats.mesh_message_deliveries; + topic_stats.mesh_failure_penalty += deficit * deficit; + } + topic_stats.mesh_message_deliveries_active = false; + topic_stats.mesh_status = MeshStatus::InActive; + } + } + } + + pub(crate) fn validate_message( + &mut self, + from: &NodeId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + // adds an empty record with the message id + self.deliveries.entry(msg_id.clone()).or_default(); + + if let Some(callback) = self.message_delivery_time_callback { + if self + .peer_stats + .get(from) + .and_then(|s| s.topics.get(topic_hash)) + .map(|ts| ts.in_mesh()) + .unwrap_or(false) + { + callback(from, topic_hash, 0.0); + } + } + } + + pub(crate) fn deliver_message( + &mut self, + from: &NodeId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + self.mark_first_message_delivery(from, topic_hash); + + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + // this should be the first delivery trace + if record.status != DeliveryStatus::Unknown { + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); + return; + } + + // mark the message as valid and reward mesh peers that have already forwarded it to us + record.status = DeliveryStatus::Valid(Instant::now()); + for peer in record.peers.iter().cloned().collect::>() { + // this check is to make sure a peer can't send us a message twice and get a double + // count if it is a first delivery + if &peer != from { + self.mark_duplicate_message_delivery(&peer, topic_hash, None); + } + } + } + + /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + pub(crate) fn reject_invalid_message(&mut self, from: &NodeId, topic_hash: &TopicHash) { + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" + ); + + self.mark_invalid_message_delivery(from, topic_hash); + } + + // Reject a message. + pub(crate) fn reject_message( + &mut self, + from: &NodeId, + msg_id: &MessageId, + topic_hash: &TopicHash, + reason: RejectReason, + ) { + match reason { + // these messages are not tracked, but the peer is penalized as they are invalid + RejectReason::ValidationError(_) | RejectReason::SelfOrigin => { + self.reject_invalid_message(from, topic_hash); + return; + } + // we ignore those messages, so do nothing. + RejectReason::BlackListedPeer | RejectReason::BlackListedSource => { + return; + } + _ => {} // the rest are handled after record creation + } + + let peers: Vec<_> = { + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + // Multiple peers can now reject the same message as we track which peers send us the + // message. If we have already updated the status, return. + if record.status != DeliveryStatus::Unknown { + return; + } + + if let RejectReason::ValidationIgnored = reason { + // we were explicitly instructed by the validator to ignore the message but not penalize + // the peer + record.status = DeliveryStatus::Ignored; + record.peers.clear(); + return; + } + + // mark the message as invalid and penalize peers that have already forwarded it. + record.status = DeliveryStatus::Invalid; + // release the delivery time tracking map to free some memory early + record.peers.drain().collect() + }; + + self.mark_invalid_message_delivery(from, topic_hash); + for peer_id in peers.iter() { + self.mark_invalid_message_delivery(peer_id, topic_hash) + } + } + + pub(crate) fn duplicated_message( + &mut self, + from: &NodeId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { + let record = self.deliveries.entry(msg_id.clone()).or_default(); + + if record.peers.contains(from) { + // we have already seen this duplicate! + return; + } + + if let Some(callback) = self.message_delivery_time_callback { + let time = if let DeliveryStatus::Valid(validated) = record.status { + validated.elapsed().as_secs_f64() + } else { + 0.0 + }; + if self + .peer_stats + .get(from) + .and_then(|s| s.topics.get(topic_hash)) + .map(|ts| ts.in_mesh()) + .unwrap_or(false) + { + callback(from, topic_hash, time); + } + } + + match record.status { + DeliveryStatus::Unknown => { + // the message is being validated; track the peer delivery and wait for + // the Deliver/Reject notification. + record.peers.insert(*from); + } + DeliveryStatus::Valid(validated) => { + // mark the peer delivery time to only count a duplicate delivery once. + record.peers.insert(*from); + self.mark_duplicate_message_delivery(from, topic_hash, Some(validated)); + } + DeliveryStatus::Invalid => { + // we no longer track delivery time + self.mark_invalid_message_delivery(from, topic_hash); + } + DeliveryStatus::Ignored => { + // the message was ignored; do nothing (we don't know if it was valid) + } + } + } + + /// Sets the application specific score for a peer. Returns true if the peer is the peer is + /// connected or if the score of the peer is not yet expired and false otherwise. + pub(crate) fn set_application_score(&mut self, peer_id: &NodeId, new_score: f64) -> bool { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + peer_stats.application_score = new_score; + true + } else { + false + } + } + + /// Sets scoring parameters for a topic. + pub(crate) fn set_topic_params(&mut self, topic_hash: TopicHash, params: TopicScoreParams) { + use hash_map::Entry::*; + match self.params.topics.entry(topic_hash.clone()) { + Occupied(mut entry) => { + let first_message_deliveries_cap = params.first_message_deliveries_cap; + let mesh_message_deliveries_cap = params.mesh_message_deliveries_cap; + let old_params = entry.insert(params); + + if old_params.first_message_deliveries_cap > first_message_deliveries_cap { + for stats in &mut self.peer_stats.values_mut() { + if let Some(tstats) = stats.topics.get_mut(&topic_hash) { + if tstats.first_message_deliveries > first_message_deliveries_cap { + tstats.first_message_deliveries = first_message_deliveries_cap; + } + } + } + } + + if old_params.mesh_message_deliveries_cap > mesh_message_deliveries_cap { + for stats in self.peer_stats.values_mut() { + if let Some(tstats) = stats.topics.get_mut(&topic_hash) { + if tstats.mesh_message_deliveries > mesh_message_deliveries_cap { + tstats.mesh_message_deliveries = mesh_message_deliveries_cap; + } + } + } + } + } + Vacant(entry) => { + entry.insert(params); + } + } + } + + /// Returns a scoring parameters for a topic if existent. + pub(crate) fn get_topic_params(&self, topic_hash: &TopicHash) -> Option<&TopicScoreParams> { + self.params.topics.get(topic_hash) + } + + /// Increments the "invalid message deliveries" counter for all scored topics the message + /// is published in. + fn mark_invalid_message_delivery(&mut self, peer_id: &NodeId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ + for it", + ); + topic_stats.invalid_message_deliveries += 1f64; + } + } + } + + /// Increments the "first message deliveries" counter for all scored topics the message is + /// published in, as well as the "mesh message deliveries" counter, if the peer is in the + /// mesh for the topic. + fn mark_first_message_delivery(&mut self, peer_id: &NodeId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + let cap = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats") + .first_message_deliveries_cap; + topic_stats.first_message_deliveries = + if topic_stats.first_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.first_message_deliveries + 1f64 + }; + + if let MeshStatus::Active { .. } = topic_stats.mesh_status { + let cap = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats") + .mesh_message_deliveries_cap; + + topic_stats.mesh_message_deliveries = + if topic_stats.mesh_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.mesh_message_deliveries + 1f64 + }; + } + } + } + } + + /// Increments the "mesh message deliveries" counter for messages we've seen before, as long the + /// message was received within the P3 window. + fn mark_duplicate_message_delivery( + &mut self, + peer_id: &NodeId, + topic_hash: &TopicHash, + validated_time: Option, + ) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + let now = if validated_time.is_some() { + Some(Instant::now()) + } else { + None + }; + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + if let MeshStatus::Active { .. } = topic_stats.mesh_status { + let topic_params = self + .params + .topics + .get(topic_hash) + .expect("Topic must exist if there are known topic_stats"); + + // check against the mesh delivery window -- if the validated time is passed as 0, then + // the message was received before we finished validation and thus falls within the mesh + // delivery window. + let mut falls_in_mesh_deliver_window = true; + if let Some(validated_time) = validated_time { + if let Some(now) = &now { + //should always be true + let window_time = validated_time + .checked_add(topic_params.mesh_message_deliveries_window) + .unwrap_or(*now); + if now > &window_time { + falls_in_mesh_deliver_window = false; + } + } + } + + if falls_in_mesh_deliver_window { + let cap = topic_params.mesh_message_deliveries_cap; + topic_stats.mesh_message_deliveries = + if topic_stats.mesh_message_deliveries + 1f64 > cap { + cap + } else { + topic_stats.mesh_message_deliveries + 1f64 + }; + } + } + } + } + } + + pub(crate) fn mesh_message_deliveries(&self, peer: &NodeId, topic: &TopicHash) -> Option { + self.peer_stats + .get(peer) + .and_then(|s| s.topics.get(topic)) + .map(|t| t.mesh_message_deliveries) + } +} + +/// The reason a Gossipsub message has been rejected. +#[derive(Clone, Copy)] +pub(crate) enum RejectReason { + /// The message failed the configured validation during decoding. + ValidationError(ValidationError), + /// The message source is us. + SelfOrigin, + /// The peer that sent the message was blacklisted. + BlackListedPeer, + /// The source (from field) of the message was blacklisted. + BlackListedSource, + /// The validation was ignored. + ValidationIgnored, + /// The validation failed. + ValidationFailed, +} diff --git a/gossipsub/src/peer_score/params.rs b/gossipsub/src/peer_score/params.rs new file mode 100644 index 0000000..d52e02c --- /dev/null +++ b/gossipsub/src/peer_score/params.rs @@ -0,0 +1,376 @@ +use crate::TopicHash; +use std::collections::{HashMap, HashSet}; +use std::net::IpAddr; +use std::time::Duration; + +/// The default number of seconds for a decay interval. +const DEFAULT_DECAY_INTERVAL: u64 = 1; +/// The default rate to decay to 0. +const DEFAULT_DECAY_TO_ZERO: f64 = 0.1; + +/// Computes the decay factor for a parameter, assuming the `decay_interval` is 1s +/// and that the value decays to zero if it drops below 0.01. +pub fn score_parameter_decay(decay: Duration) -> f64 { + score_parameter_decay_with_base( + decay, + Duration::from_secs(DEFAULT_DECAY_INTERVAL), + DEFAULT_DECAY_TO_ZERO, + ) +} + +/// Computes the decay factor for a parameter using base as the `decay_interval`. +pub fn score_parameter_decay_with_base(decay: Duration, base: Duration, decay_to_zero: f64) -> f64 { + // the decay is linear, so after n ticks the value is factor^n + // so factor^n = decay_to_zero => factor = decay_to_zero^(1/n) + let ticks = decay.as_secs_f64() / base.as_secs_f64(); + decay_to_zero.powf(1f64 / ticks) +} + +#[derive(Debug, Clone)] +pub struct PeerScoreThresholds { + /// The score threshold below which gossip propagation is suppressed; + /// should be negative. + pub gossip_threshold: f64, + + /// The score threshold below which we shouldn't publish when using flood + /// publishing (also applies to fanout peers); should be negative and <= `gossip_threshold`. + pub publish_threshold: f64, + + /// The score threshold below which message processing is suppressed altogether, + /// implementing an effective graylist according to peer score; should be negative and + /// <= `publish_threshold`. + pub graylist_threshold: f64, + + /// The score threshold below which px will be ignored; this should be positive + /// and limited to scores attainable by bootstrappers and other trusted nodes. + pub accept_px_threshold: f64, + + /// The median mesh score threshold before triggering opportunistic + /// grafting; this should have a small positive value. + pub opportunistic_graft_threshold: f64, +} + +impl Default for PeerScoreThresholds { + fn default() -> Self { + PeerScoreThresholds { + gossip_threshold: -10.0, + publish_threshold: -50.0, + graylist_threshold: -80.0, + accept_px_threshold: 10.0, + opportunistic_graft_threshold: 20.0, + } + } +} + +impl PeerScoreThresholds { + pub fn validate(&self) -> Result<(), &'static str> { + if self.gossip_threshold > 0f64 { + return Err("invalid gossip threshold; it must be <= 0"); + } + if self.publish_threshold > 0f64 || self.publish_threshold > self.gossip_threshold { + return Err("Invalid publish threshold; it must be <= 0 and <= gossip threshold"); + } + if self.graylist_threshold > 0f64 || self.graylist_threshold > self.publish_threshold { + return Err("Invalid graylist threshold; it must be <= 0 and <= publish threshold"); + } + if self.accept_px_threshold < 0f64 { + return Err("Invalid accept px threshold; it must be >= 0"); + } + if self.opportunistic_graft_threshold < 0f64 { + return Err("Invalid opportunistic grafting threshold; it must be >= 0"); + } + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct PeerScoreParams { + /// Score parameters per topic. + pub topics: HashMap, + + /// Aggregate topic score cap; this limits the total contribution of topics towards a positive + /// score. It must be positive (or 0 for no cap). + pub topic_score_cap: f64, + + /// P5: Application-specific peer scoring + pub app_specific_weight: f64, + + /// P6: IP-colocation factor. + /// The parameter has an associated counter which counts the number of peers with the same IP. + /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value + /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`. + /// If the number of peers in the same IP is less than the threshold, then the value is 0. + /// The weight of the parameter MUST be negative, unless you want to disable for testing. + /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0 + /// thus disabling the IP colocation penalty. + pub ip_colocation_factor_weight: f64, + pub ip_colocation_factor_threshold: f64, + pub ip_colocation_factor_whitelist: HashSet, + + /// P7: behavioural pattern penalties. + /// This parameter has an associated counter which tracks misbehaviour as detected by the + /// router. The router currently applies penalties for the following behaviors: + /// - attempting to re-graft before the prune backoff time has elapsed. + /// - not following up in IWANT requests for messages advertised with IHAVE. + /// + /// The value of the parameter is the square of the counter over the threshold, which decays + /// with BehaviourPenaltyDecay. + /// The weight of the parameter MUST be negative (or zero to disable). + pub behaviour_penalty_weight: f64, + pub behaviour_penalty_threshold: f64, + pub behaviour_penalty_decay: f64, + + /// The decay interval for parameter counters. + pub decay_interval: Duration, + + /// Counter value below which it is considered 0. + pub decay_to_zero: f64, + + /// Time to remember counters for a disconnected peer. + pub retain_score: Duration, +} + +impl Default for PeerScoreParams { + fn default() -> Self { + PeerScoreParams { + topics: HashMap::new(), + topic_score_cap: 3600.0, + app_specific_weight: 10.0, + ip_colocation_factor_weight: -5.0, + ip_colocation_factor_threshold: 10.0, + ip_colocation_factor_whitelist: HashSet::new(), + behaviour_penalty_weight: -10.0, + behaviour_penalty_threshold: 0.0, + behaviour_penalty_decay: 0.2, + decay_interval: Duration::from_secs(DEFAULT_DECAY_INTERVAL), + decay_to_zero: DEFAULT_DECAY_TO_ZERO, + retain_score: Duration::from_secs(3600), + } + } +} + +/// Peer score parameter validation +impl PeerScoreParams { + pub fn validate(&self) -> Result<(), String> { + for (topic, params) in self.topics.iter() { + if let Err(e) = params.validate() { + return Err(format!("Invalid score parameters for topic {topic}: {e}")); + } + } + + // check that the topic score is 0 or something positive + if self.topic_score_cap < 0f64 { + return Err("Invalid topic score cap; must be positive (or 0 for no cap)".into()); + } + + // check the IP colocation factor + if self.ip_colocation_factor_weight > 0f64 { + return Err( + "Invalid ip_colocation_factor_weight; must be negative (or 0 to disable)".into(), + ); + } + if self.ip_colocation_factor_weight != 0f64 && self.ip_colocation_factor_threshold < 1f64 { + return Err("Invalid ip_colocation_factor_threshold; must be at least 1".into()); + } + + // check the behaviour penalty + if self.behaviour_penalty_weight > 0f64 { + return Err( + "Invalid behaviour_penalty_weight; must be negative (or 0 to disable)".into(), + ); + } + if self.behaviour_penalty_weight != 0f64 + && (self.behaviour_penalty_decay <= 0f64 || self.behaviour_penalty_decay >= 1f64) + { + return Err("invalid behaviour_penalty_decay; must be between 0 and 1".into()); + } + + if self.behaviour_penalty_threshold < 0f64 { + return Err("invalid behaviour_penalty_threshold; must be >= 0".into()); + } + + // check the decay parameters + if self.decay_interval < Duration::from_secs(1) { + return Err("Invalid decay_interval; must be at least 1s".into()); + } + if self.decay_to_zero <= 0f64 || self.decay_to_zero >= 1f64 { + return Err("Invalid decay_to_zero; must be between 0 and 1".into()); + } + + // no need to check the score retention; a value of 0 means that we don't retain scores + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct TopicScoreParams { + /// The weight of the topic. + pub topic_weight: f64, + + /// P1: time in the mesh + /// This is the time the peer has been grafted in the mesh. + /// The value of of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` + /// The weight of the parameter must be positive (or zero to disable). + pub time_in_mesh_weight: f64, + pub time_in_mesh_quantum: Duration, + pub time_in_mesh_cap: f64, + + /// P2: first message deliveries + /// This is the number of message deliveries in the topic. + /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped + /// by `first_message_deliveries_cap`. + /// The weight of the parameter MUST be positive (or zero to disable). + pub first_message_deliveries_weight: f64, + pub first_message_deliveries_decay: f64, + pub first_message_deliveries_cap: f64, + + /// P3: mesh message deliveries + /// This is the number of message deliveries in the mesh, within the + /// `mesh_message_deliveries_window` of message validation; deliveries during validation also + /// count and are retroactively applied when validation succeeds. + /// This window accounts for the minimum time before a hostile mesh peer trying to game the + /// score could replay back a valid message we just sent them. + /// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer + /// before we have forwarded it to them. + /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. + /// If the counter exceeds the threshold, its value is 0. + /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of + /// the deficit, ie (`message_deliveries_threshold - counter)^2` + /// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh. + /// The weight of the parameter MUST be negative (or zero to disable). + pub mesh_message_deliveries_weight: f64, + pub mesh_message_deliveries_decay: f64, + pub mesh_message_deliveries_cap: f64, + pub mesh_message_deliveries_threshold: f64, + pub mesh_message_deliveries_window: Duration, + pub mesh_message_deliveries_activation: Duration, + + /// P3b: sticky mesh propagation failures + /// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active + /// mesh message delivery penalty. + /// The weight of the parameter MUST be negative (or zero to disable) + pub mesh_failure_penalty_weight: f64, + pub mesh_failure_penalty_decay: f64, + + /// P4: invalid messages + /// This is the number of invalid messages in the topic. + /// The value of the parameter is the square of the counter, decaying with + /// `invalid_message_deliveries_decay`. + /// The weight of the parameter MUST be negative (or zero to disable). + pub invalid_message_deliveries_weight: f64, + pub invalid_message_deliveries_decay: f64, +} + +/// NOTE: The topic score parameters are very network specific. +/// For any production system, these values should be manually set. +impl Default for TopicScoreParams { + fn default() -> Self { + TopicScoreParams { + topic_weight: 0.5, + // P1 + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + // P2 + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.5, + first_message_deliveries_cap: 2000.0, + // P3 + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_decay: 0.5, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_activation: Duration::from_secs(5), + // P3b + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 0.5, + // P4 + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.3, + } + } +} + +impl TopicScoreParams { + pub fn validate(&self) -> Result<(), &'static str> { + // make sure we have a sane topic weight + if self.topic_weight < 0f64 { + return Err("invalid topic weight; must be >= 0"); + } + + if self.time_in_mesh_quantum == Duration::from_secs(0) { + return Err("Invalid time_in_mesh_quantum; must be non zero"); + } + if self.time_in_mesh_weight < 0f64 { + return Err("Invalid time_in_mesh_weight; must be positive (or 0 to disable)"); + } + if self.time_in_mesh_weight != 0f64 && self.time_in_mesh_cap <= 0f64 { + return Err("Invalid time_in_mesh_cap must be positive"); + } + + if self.first_message_deliveries_weight < 0f64 { + return Err( + "Invalid first_message_deliveries_weight; must be positive (or 0 to disable)", + ); + } + if self.first_message_deliveries_weight != 0f64 + && (self.first_message_deliveries_decay <= 0f64 + || self.first_message_deliveries_decay >= 1f64) + { + return Err("Invalid first_message_deliveries_decay; must be between 0 and 1"); + } + if self.first_message_deliveries_weight != 0f64 && self.first_message_deliveries_cap <= 0f64 + { + return Err("Invalid first_message_deliveries_cap must be positive"); + } + + if self.mesh_message_deliveries_weight > 0f64 { + return Err( + "Invalid mesh_message_deliveries_weight; must be negative (or 0 to disable)", + ); + } + if self.mesh_message_deliveries_weight != 0f64 + && (self.mesh_message_deliveries_decay <= 0f64 + || self.mesh_message_deliveries_decay >= 1f64) + { + return Err("Invalid mesh_message_deliveries_decay; must be between 0 and 1"); + } + if self.mesh_message_deliveries_weight != 0f64 && self.mesh_message_deliveries_cap <= 0f64 { + return Err("Invalid mesh_message_deliveries_cap must be positive"); + } + if self.mesh_message_deliveries_weight != 0f64 + && self.mesh_message_deliveries_threshold <= 0f64 + { + return Err("Invalid mesh_message_deliveries_threshold; must be positive"); + } + if self.mesh_message_deliveries_weight != 0f64 + && self.mesh_message_deliveries_activation < Duration::from_secs(1) + { + return Err("Invalid mesh_message_deliveries_activation; must be at least 1s"); + } + + // check P3b + if self.mesh_failure_penalty_weight > 0f64 { + return Err("Invalid mesh_failure_penalty_weight; must be negative (or 0 to disable)"); + } + if self.mesh_failure_penalty_weight != 0f64 + && (self.mesh_failure_penalty_decay <= 0f64 || self.mesh_failure_penalty_decay >= 1f64) + { + return Err("Invalid mesh_failure_penalty_decay; must be between 0 and 1"); + } + + // check P4 + if self.invalid_message_deliveries_weight > 0f64 { + return Err( + "Invalid invalid_message_deliveries_weight; must be negative (or 0 to disable)", + ); + } + if self.invalid_message_deliveries_decay <= 0f64 + || self.invalid_message_deliveries_decay >= 1f64 + { + return Err("Invalid invalid_message_deliveries_decay; must be between 0 and 1"); + } + Ok(()) + } +} diff --git a/gossipsub/src/peer_score/tests.rs b/gossipsub/src/peer_score/tests.rs new file mode 100644 index 0000000..84e39a9 --- /dev/null +++ b/gossipsub/src/peer_score/tests.rs @@ -0,0 +1,957 @@ +use iroh::net::key::SecretKey; + +/// A collection of unit tests mostly ported from the go implementation. +use super::*; + +use crate::types::RawMessage; +use crate::{IdentTopic as Topic, Message}; + +// estimates a value within variance +fn within_variance(value: f64, expected: f64, variance: f64) -> bool { + if expected >= 0.0 { + return value > expected * (1.0 - variance) && value < expected * (1.0 + variance); + } + value > expected * (1.0 + variance) && value < expected * (1.0 - variance) +} + +// generates a random gossipsub message with sequence number i +fn make_test_message(seq: u64) -> (MessageId, RawMessage) { + let raw_message = RawMessage { + source: Some(SecretKey::generate().public()), + data: vec![12, 34, 56], + sequence_number: Some(seq), + topic: Topic::new("test").hash(), + signature: None, + validated: true, + }; + + let message = Message { + source: raw_message.source, + data: raw_message.data.clone(), + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }; + + let id = default_message_id()(&message); + (id, raw_message) +} + +fn default_message_id() -> fn(&Message) -> MessageId { + |message| { + // default message id is: source + sequence number + // NOTE: If either the peer_id or source is not provided, we set to 0; + let mut source_string = if let Some(peer_id) = message.source.as_ref() { + peer_id.to_string() + } else { + SecretKey::from_bytes(&[1u8; 32]).public().to_string() + }; + source_string.push_str(&message.sequence_number.unwrap_or_default().to_string()); + MessageId::from(source_string) + } +} + +#[test] +fn test_score_time_in_mesh() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + topic_score_cap: 1000.0, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = SecretKey::generate().public(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + + let score = peer_score.score(&peer_id); + assert!( + score == 0.0, + "expected score to start at zero. Score found: {score}" + ); + + // The time in mesh depends on how long the peer has been grafted + peer_score.graft(&peer_id, topic); + let elapsed = topic_params.time_in_mesh_quantum * 200; + std::thread::sleep(elapsed); + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.time_in_mesh_weight + * (elapsed.as_millis() / topic_params.time_in_mesh_quantum.as_millis()) as f64; + assert!( + score >= expected, + "The score: {score} should be greater than or equal to: {expected}" + ); +} + +#[test] +fn test_score_time_in_mesh_cap() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 10.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = SecretKey::generate().public(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + + let score = peer_score.score(&peer_id); + assert!( + score == 0.0, + "expected score to start at zero. Score found: {score}" + ); + + // The time in mesh depends on how long the peer has been grafted + peer_score.graft(&peer_id, topic); + let elapsed = topic_params.time_in_mesh_quantum * 40; + std::thread::sleep(elapsed); + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.time_in_mesh_weight + * topic_params.time_in_mesh_cap; + let variance = 0.5; + assert!( + within_variance(score, expected, variance), + "The score: {} should be within {} of {}", + score, + score * variance, + expected + ); +} + +#[test] +fn test_score_first_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = SecretKey::generate().public(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + + let score = peer_score.score(&peer_id); + let expected = + topic_params.topic_weight * topic_params.first_message_deliveries_weight * messages as f64; + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_first_message_deliveries_cap() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, // test without decay + first_message_deliveries_cap: 50.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + + let peer_id = SecretKey::generate().public(); + + let mut peer_score = PeerScore::new(params); + // Peer score should start at 0 + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score = peer_score.score(&peer_id); + let expected = topic_params.topic_weight + * topic_params.first_message_deliveries_weight + * topic_params.first_message_deliveries_cap; + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_first_message_deliveries_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.9, // decay 10% per decay interval + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let peer_id = SecretKey::generate().public(); + let mut peer_score = PeerScore::new(params); + peer_score.add_peer(peer_id); + peer_score.graft(&peer_id, topic); + + // deliver a bunch of messages from the peer + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score = peer_score.score(&peer_id); + let mut expected = topic_params.topic_weight + * topic_params.first_message_deliveries_weight + * topic_params.first_message_deliveries_decay + * messages as f64; + assert!(score == expected, "The score: {score} should be {expected}"); + + // refreshing the scores applies the decay param + let decay_intervals = 10; + for _ in 0..decay_intervals { + peer_score.refresh_scores(); + expected *= topic_params.first_message_deliveries_decay; + } + let score = peer_score.score(&peer_id); + assert!(score == expected, "The score: {score} should be {expected}"); +} + +#[test] +fn test_score_mesh_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + // peer A always delivers the message first. + // peer B delivers next (within the delivery window). + // peer C delivers outside the delivery window. + // we expect peers A and B to have a score of zero, since all other parameter weights are zero. + // Peer C should have a negative score. + let peer_id_a = SecretKey::generate().public(); + let peer_id_b = SecretKey::generate().public(); + let peer_id_c = SecretKey::generate().public(); + + let peers = vec![peer_id_a, peer_id_b, peer_id_c]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // assert that nobody has been penalized yet for not delivering messages before activation time + peer_score.refresh_scores(); + for peer_id in &peers { + let score = peer_score.score(peer_id); + assert!( + score >= 0.0, + "expected no mesh delivery penalty before activation time, got score {score}" + ); + } + + // wait for the activation time to kick in + std::thread::sleep(topic_params.mesh_message_deliveries_activation); + + // deliver a bunch of messages from peer A, with duplicates within the window from peer B, + // and duplicates outside the window from peer C. + let messages = 100; + let mut messages_to_send = Vec::new(); + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + messages_to_send.push((id, msg)); + } + + std::thread::sleep(topic_params.mesh_message_deliveries_window + Duration::from_millis(20)); + + for (id, msg) in messages_to_send { + peer_score.duplicated_message(&peer_id_c, &id, &msg.topic); + } + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + let score_c = peer_score.score(&peer_id_c); + + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + assert!( + score_b >= 0.0, + "expected non-negative score for Peer B, got score {score_b}" + ); + + // the penalty is the difference between the threshold and the actual mesh deliveries, squared. + // since we didn't deliver anything, this is just the value of the threshold + let penalty = topic_params.mesh_message_deliveries_threshold + * topic_params.mesh_message_deliveries_threshold; + let expected = + topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty; + + assert!(score_c == expected, "Score: {score_c}. Expected {expected}"); +} + +#[test] +fn test_score_mesh_message_deliveries_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 0.9, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // deliver a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + } + + // we should have a positive score, since we delivered more messages than the threshold + peer_score.refresh_scores(); + + let score_a = peer_score.score(&peer_id_a); + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + + let mut decayed_delivery_count = (messages as f64) * topic_params.mesh_message_deliveries_decay; + for _ in 0..20 { + peer_score.refresh_scores(); + decayed_delivery_count *= topic_params.mesh_message_deliveries_decay; + } + + let score_a = peer_score.score(&peer_id_a); + // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count; + let penalty = deficit * deficit; + let expected = + topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty; + + assert_eq!(score_a, expected, "Invalid score"); +} + +#[test] +fn test_score_mesh_failure_penalty() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // mesh_message_deliveries to zero, so the only affect on the score + // is from the mesh failure penalty + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + let peer_id_b = SecretKey::generate().public(); + + let peers = vec![peer_id_a, peer_id_b]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // deliver a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); + } + + // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + assert!( + score_a >= 0.0, + "expected non-negative score for Peer A, got score {score_a}" + ); + assert!( + score_b >= 0.0, + "expected non-negative score for Peer B, got score {score_b}" + ); + + // prune peer B to apply the penalty + peer_score.prune(&peer_id_b, topic.hash()); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + + assert_eq!(score_a, 0.0, "expected Peer A to have a 0"); + + // penalty calculation is the same as for mesh_message_deliveries, but multiplied by + // mesh_failure_penalty_weigh + // instead of mesh_message_deliveries_weight + let penalty = topic_params.mesh_message_deliveries_threshold + * topic_params.mesh_message_deliveries_threshold; + let expected = topic_params.topic_weight * topic_params.mesh_failure_penalty_weight * penalty; + + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_b, expected, "Peer B should have expected score",); +} + +#[test] +fn test_score_invalid_message_deliveries() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // reject a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + } + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + + let expected = topic_params.topic_weight + * topic_params.invalid_message_deliveries_weight + * (messages * messages) as f64; + + assert_eq!(score_a, expected, "Peer has unexpected score",); +} + +#[test] +fn test_score_invalid_message_deliveris_decay() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.9, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params.clone()); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + // reject a bunch of messages from peer A + let messages = 100; + for seq in 0..messages { + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + } + + peer_score.refresh_scores(); + + let decay = topic_params.invalid_message_deliveries_decay * messages as f64; + + let mut expected = + topic_params.topic_weight * topic_params.invalid_message_deliveries_weight * decay * decay; + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, expected, "Peer has unexpected score"); + + // refresh scores a few times to apply decay + for _ in 0..10 { + peer_score.refresh_scores(); + expected *= topic_params.invalid_message_deliveries_decay + * topic_params.invalid_message_deliveries_decay; + } + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, expected, "Peer has unexpected score"); +} + +#[test] +fn test_score_reject_message_deliveries() { + // This tests adds coverage for the dark corners of rejection tracing + + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams::default(); + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + let peer_id_b = SecretKey::generate().public(); + + let peers = vec![peer_id_a, peer_id_b]; + + for peer_id in &peers { + peer_score.add_peer(*peer_id); + } + + let (id, msg) = make_test_message(1); + + // these should have no effect in the score + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedPeer); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedSource); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // insert a record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // now clear the delivery record + peer_score.deliveries.clear(); + + // insert a record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, 0.0, "Should have no effect on the score"); + assert_eq!(score_b, 0.0, "Should have no effect on the score"); + + // now clear the delivery record + peer_score.deliveries.clear(); + + // insert a new record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // and reject the message to make sure duplicates are also penalized + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, -1.0, "Score should be effected"); + assert_eq!(score_b, -1.0, "Score should be effected"); + + // now clear the delivery record again + peer_score.deliveries.clear(); + + // insert a new record in the message deliveries + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + + // and reject the message after a duplicate has arrived + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + + assert_eq!(score_a, -4.0, "Score should be effected"); + assert_eq!(score_b, -4.0, "Score should be effected"); +} + +#[test] +fn test_application_score() { + // Create parameters with reasonable default values + let app_specific_weight = 0.5; + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + app_specific_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + let messages = 100; + for i in -100..messages { + let app_score_value = i as f64; + peer_score.set_application_score(&peer_id_a, app_score_value); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let expected = (i as f64) * app_specific_weight; + assert_eq!(score_a, expected, "Peer has unexpected score"); + } +} + +#[test] +fn test_score_ip_colocation() { + // Create parameters with reasonable default values + let ip_colocation_factor_weight = -1.0; + let ip_colocation_factor_threshold = 1.0; + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + ip_colocation_factor_weight, + ip_colocation_factor_threshold, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + let peer_id_b = SecretKey::generate().public(); + let peer_id_c = SecretKey::generate().public(); + let peer_id_d = SecretKey::generate().public(); + + let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d]; + for peer_id in &peers { + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); + } + + // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + peer_score.add_ip(&peer_id_a, "1.2.3.4".parse().unwrap()); + peer_score.add_ip(&peer_id_b, "2.3.4.5".parse().unwrap()); + peer_score.add_ip(&peer_id_c, "2.3.4.5".parse().unwrap()); + peer_score.add_ip(&peer_id_c, "3.4.5.6".parse().unwrap()); + peer_score.add_ip(&peer_id_d, "2.3.4.5".parse().unwrap()); + + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + let score_b = peer_score.score(&peer_id_b); + let score_c = peer_score.score(&peer_id_c); + let score_d = peer_score.score(&peer_id_d); + + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + let n_shared = 3.0; + let ip_surplus = n_shared - ip_colocation_factor_threshold; + let penalty = ip_surplus * ip_surplus; + let expected = ip_colocation_factor_weight * penalty; + + assert_eq!(score_b, expected, "Peer B should have expected score"); + assert_eq!(score_c, expected, "Peer C should have expected score"); + assert_eq!(score_d, expected, "Peer D should have expected score"); +} + +#[test] +fn test_score_behaviour_penality() { + // Create parameters with reasonable default values + let behaviour_penalty_weight = -1.0; + let behaviour_penalty_decay = 0.99; + + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let mut params = PeerScoreParams { + behaviour_penalty_decay, + behaviour_penalty_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + + // add a penalty to a non-existent peer. + peer_score.add_penalty(&peer_id_a, 1); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + // add the peer and test penalties + peer_score.add_peer(peer_id_a); + assert_eq!(score_a, 0.0, "Peer A should be unaffected"); + + peer_score.add_penalty(&peer_id_a, 1); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -1.0, "Peer A should have been penalized"); + + peer_score.add_penalty(&peer_id_a, 1); + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -4.0, "Peer A should have been penalized"); + + peer_score.refresh_scores(); + + let score_a = peer_score.score(&peer_id_a); + assert_eq!(score_a, -3.9204, "Peer A should have been penalized"); +} + +#[test] +fn test_score_retention() { + // Create parameters with reasonable default values + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let app_specific_weight = 1.0; + let app_score_value = -1000.0; + let retain_score = Duration::from_secs(1); + let mut params = PeerScoreParams { + app_specific_weight, + retain_score, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 0.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; + + params.topics.insert(topic_hash, topic_params); + let mut peer_score = PeerScore::new(params); + + let peer_id_a = SecretKey::generate().public(); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); + + peer_score.set_application_score(&peer_id_a, app_score_value); + + // score should equal -1000 (app specific score) + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, app_score_value, + "Score should be the application specific score" + ); + + // disconnect & wait half of RetainScore time. Should still have negative score + peer_score.remove_peer(&peer_id_a); + std::thread::sleep(retain_score / 2); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, app_score_value, + "Score should be the application specific score" + ); + + // wait remaining time (plus a little slop) and the score should reset to zero + std::thread::sleep(retain_score / 2 + Duration::from_millis(50)); + peer_score.refresh_scores(); + let score_a = peer_score.score(&peer_id_a); + assert_eq!( + score_a, 0.0, + "Score should be the application specific score" + ); +} diff --git a/gossipsub/src/protocol.rs b/gossipsub/src/protocol.rs new file mode 100644 index 0000000..d90b8cb --- /dev/null +++ b/gossipsub/src/protocol.rs @@ -0,0 +1,354 @@ +use crate::config::ValidationMode; +use crate::handler::HandlerEvent; +use crate::types::{self, RawMessage, Rpc}; +use crate::ValidationError; +use bytes::{BufMut, Bytes, BytesMut}; +use iroh::net::key::Signature; +use std::io; +use std::pin::Pin; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_serde::{Deserializer, Serializer}; +use tokio_util::codec::Framed; + +pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; +pub const GOSSIPSUB_1_1_0_PROTOCOL: &[u8] = b"/meshsub/1.1.0"; + +/// Configuration +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// The Gossipsub protocol id to listen on. + pub(crate) protocol_id: Vec, + /// The maximum transmit size for a packet. + pub(crate) max_transmit_size: usize, + /// Determines the level of validation to be done on incoming messages. + pub(crate) validation_mode: ValidationMode, +} + +impl Default for ProtocolConfig { + fn default() -> Self { + Self { + max_transmit_size: 65536, + validation_mode: ValidationMode::Strict, + protocol_id: GOSSIPSUB_1_1_0_PROTOCOL.to_vec(), + } + } +} + +pub type GossipFramed = tokio_serde::Framed< + tokio_util::codec::Framed, + HandlerEvent, + types::Rpc, + GossipsubCodec, +>; + +impl ProtocolConfig { + pub fn upgrade_connection(self, socket: T) -> GossipFramed + where + T: AsyncRead + AsyncWrite, + { + let mut codec = tokio_util::codec::LengthDelimitedCodec::default(); + codec.set_max_frame_length(self.max_transmit_size); + let transport = Framed::new(socket, codec); + + tokio_serde::Framed::new(transport, GossipsubCodec::new(self.validation_mode)) + } +} + +/* Gossip codec for the framing */ + +pub struct GossipsubCodec { + /// Determines the level of validation performed on incoming messages. + validation_mode: ValidationMode, +} + +impl GossipsubCodec { + pub fn new(validation_mode: ValidationMode) -> GossipsubCodec { + GossipsubCodec { validation_mode } + } + + /// Verifies a gossipsub message. This returns either a success or failure. All errors + /// are logged, which prevents error handling in the codec and handler. We simply drop invalid + /// messages and log warnings, rather than propagating errors through the codec. + fn verify_signature(message: &types::RawMessage) -> bool { + let Some(from) = message.source.as_ref() else { + tracing::debug!("Signature verification failed: No source id given"); + return false; + }; + + let Some(signature) = message.signature.as_ref() else { + tracing::debug!("Signature verification failed: No signature provided"); + return false; + }; + let signature = match Signature::from_slice(signature) { + Ok(sig) => sig, + Err(_err) => { + tracing::debug!("Signature verification failed: Invalid signature"); + return false; + } + }; + + // Construct the signature bytes + let message_sig: types::Message = message.clone().into(); + let buf = postcard::to_stdvec(&message_sig).unwrap(); + let mut signature_bytes = SIGNING_PREFIX.to_vec(); + signature_bytes.extend_from_slice(&buf); + from.verify(&signature_bytes, &signature).is_ok() + } +} + +impl Serializer for GossipsubCodec { + type Error = io::Error; + + fn serialize(self: Pin<&mut Self>, data: &types::Rpc) -> Result { + postcard::experimental::serialized_size(data) + .and_then(|size| postcard::to_io(data, BytesMut::with_capacity(size).writer())) + .map(|writer| writer.into_inner().freeze()) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } +} + +impl Deserializer for GossipsubCodec { + type Error = std::io::Error; + + fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result { + let rpc: types::Rpc = postcard::from_bytes(&src) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + + // Store valid messages. + let mut messages = Vec::with_capacity(rpc.messages.len()); + // Store any invalid messages. + let mut invalid_messages = Vec::new(); + + for message in rpc.messages.into_iter() { + // Keep track of the type of invalid message. + let mut invalid_kind = None; + let mut verify_signature = false; + let mut verify_sequence_no = false; + let mut verify_source = false; + + match self.validation_mode { + ValidationMode::Strict => { + // Validate everything + verify_signature = true; + verify_sequence_no = true; + verify_source = true; + } + ValidationMode::Permissive => { + // If the fields exist, validate them + if message.signature.is_some() { + verify_signature = true; + } + if message.sequence_number.is_some() { + verify_sequence_no = true; + } + if message.source.is_some() { + verify_source = true; + } + } + ValidationMode::Anonymous => { + if message.signature.is_some() { + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); + invalid_kind = Some(ValidationError::SignaturePresent); + } else if message.sequence_number.is_some() { + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); + invalid_kind = Some(ValidationError::SequenceNumberPresent); + } else if message.source.is_some() { + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + invalid_kind = Some(ValidationError::MessageSourcePresent); + } + } + ValidationMode::None => {} + } + + // If the initial validation logic failed, add the message to invalid messages and + // continue processing the others. + if let Some(validation_error) = invalid_kind.take() { + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data, + sequence_number: None, // don't inform the application + topic: message.topic, + signature: None, // don't inform the application + validated: false, + }; + invalid_messages.push((message, validation_error)); + // proceed to the next message + continue; + } + + // verify message signatures if required + if verify_signature && !GossipsubCodec::verify_signature(&message) { + tracing::warn!("Invalid signature for received message"); + + // Build the invalid message (ignoring further validation of sequence number + // and source) + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data, + sequence_number: None, // don't inform the application + topic: message.topic, + signature: None, // don't inform the application + validated: false, + }; + invalid_messages.push((message, ValidationError::InvalidSignature)); + // proceed to the next message + continue; + } + + // ensure the sequence number is a u64 + let sequence_number = if verify_sequence_no { + if let Some(seq_no) = message.sequence_number { + // valid sequence number + Some(seq_no) + } else { + // sequence number was not present + tracing::debug!("Sequence number not present but expected"); + let message = RawMessage { + source: None, // don't bother inform the application + data: message.data, + sequence_number: None, // don't inform the application + topic: message.topic, + signature: message.signature, // don't inform the application + validated: false, + }; + invalid_messages.push((message, ValidationError::EmptySequenceNumber)); + continue; + } + } else { + // Do not verify the sequence number, consider it empty + None + }; + + // Verify the message source if required + let source = if verify_source { + if let Some(peer_id) = message.source { + // valid peer id + Some(peer_id) + } else { + None + } + } else { + None + }; + + // This message has passed all validation, add it to the validated messages. + messages.push(RawMessage { + source, + data: message.data, + sequence_number, + topic: message.topic, + signature: message.signature, + validated: false, + }); + } + + Ok(HandlerEvent::Message { + rpc: Rpc { + messages, + subscriptions: rpc.subscriptions, + control_msgs: rpc.control_msgs, + }, + invalid_messages, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::Config; + use crate::{Behaviour, ConfigBuilder, TopicHash}; + use crate::{IdentTopic as Topic, Version}; + use iroh::net::key::SecretKey; + use quickcheck::*; + + #[derive(Clone, Debug)] + struct Message(RawMessage); + + impl Arbitrary for Message { + fn arbitrary(g: &mut Gen) -> Self { + let keypair = TestKeypair::arbitrary(g); + + // generate an arbitrary GossipsubMessage using the behaviour signing functionality + let config = Config::default(); + let mut gs: Behaviour = + Behaviour::new(crate::MessageAuthenticity::Signed(keypair.0), config).unwrap(); + let data = (0..gen_range(g, 10..10024)) + .map(|_| u8::arbitrary(g)) + .collect::>(); + let topic_id = TopicId::arbitrary(g).0; + let rt = tokio::runtime::Runtime::new().unwrap(); + let msg = rt.block_on(async { gs.build_raw_message(topic_id, data).await.unwrap() }); + Message(msg) + } + } + + #[derive(Clone, Debug)] + struct TopicId(TopicHash); + + impl Arbitrary for TopicId { + fn arbitrary(g: &mut Gen) -> Self { + let topic_string: String = (0..gen_range(g, 20..1024)) + .map(|_| char::arbitrary(g)) + .collect::(); + TopicId(Topic::new(topic_string).into()) + } + } + + #[derive(Clone)] + struct TestKeypair(SecretKey); + + impl Arbitrary for TestKeypair { + fn arbitrary(_g: &mut Gen) -> Self { + // Small enough to be inlined. + TestKeypair(SecretKey::generate()) + } + } + + fn gen_range(gen: &mut Gen, range: std::ops::Range) -> u32 { + u32::arbitrary(gen) % (range.end - range.start) + range.start + } + + impl std::fmt::Debug for TestKeypair { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TestKeypair") + .field("public", &self.0.public()) + .finish() + } + } + + // TODO + // #[test] + // /// Test that RPC messages can be encoded and decoded successfully. + // fn encode_decode() { + // fn prop(message: Message) { + // let message = message.0; + + // let rpc = Rpc { + // messages: vec![message.clone()], + // subscriptions: vec![], + // control_msgs: vec![], + // }; + + // let mut codec = GossipsubCodec::new(ValidationMode::Strict); + // let mut buf = BytesMut::new(); + // codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); + // let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); + // // mark as validated as its a published message + // match decoded_rpc { + // HandlerEvent::Message { mut rpc, .. } => { + // rpc.messages[0].validated = true; + + // assert_eq!(vec![message], rpc.messages); + // } + // _ => panic!("Must decode a message"), + // } + // } + + // QuickCheck::new().quickcheck(prop as fn(_) -> _) + // } +} diff --git a/gossipsub/src/rpc_proto.rs b/gossipsub/src/rpc_proto.rs new file mode 100644 index 0000000..0ab57a9 --- /dev/null +++ b/gossipsub/src/rpc_proto.rs @@ -0,0 +1,71 @@ +pub(crate) mod proto { + #![allow(unreachable_pub)] + include!("generated/mod.rs"); + pub use self::gossipsub::pb::{mod_RPC::SubOpts, *}; +} + +#[cfg(test)] +mod test { + use crate::rpc_proto::proto::compat; + use crate::IdentTopic as Topic; + use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; + use rand::Rng; + + #[test] + fn test_multi_topic_message_compatibility() { + let topic1 = Topic::new("t1").hash(); + let topic2 = Topic::new("t2").hash(); + + let new_message1 = super::proto::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic: topic1.clone().into_string(), + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + let old_message1 = compat::pb::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic_ids: vec![topic1.clone().into_string()], + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + let old_message2 = compat::pb::Message { + from: Some(PeerId::random().to_bytes()), + data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), + topic_ids: vec![topic1.clone().into_string(), topic2.clone().into_string()], + signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), + }; + + let mut new_message1b = Vec::with_capacity(new_message1.get_size()); + let mut writer = Writer::new(&mut new_message1b); + new_message1.write_message(&mut writer).unwrap(); + + let mut old_message1b = Vec::with_capacity(old_message1.get_size()); + let mut writer = Writer::new(&mut old_message1b); + old_message1.write_message(&mut writer).unwrap(); + + let mut old_message2b = Vec::with_capacity(old_message2.get_size()); + let mut writer = Writer::new(&mut old_message2b); + old_message2.write_message(&mut writer).unwrap(); + + let mut reader = BytesReader::from_bytes(&old_message1b[..]); + let new_message = + super::proto::Message::from_reader(&mut reader, &old_message1b[..]).unwrap(); + assert_eq!(new_message.topic, topic1.clone().into_string()); + + let mut reader = BytesReader::from_bytes(&old_message2b[..]); + let new_message = + super::proto::Message::from_reader(&mut reader, &old_message2b[..]).unwrap(); + assert_eq!(new_message.topic, topic2.into_string()); + + let mut reader = BytesReader::from_bytes(&new_message1b[..]); + let old_message = + compat::pb::Message::from_reader(&mut reader, &new_message1b[..]).unwrap(); + assert_eq!(old_message.topic_ids, vec![topic1.into_string()]); + } +} diff --git a/gossipsub/src/subscription_filter.rs b/gossipsub/src/subscription_filter.rs new file mode 100644 index 0000000..5838ecb --- /dev/null +++ b/gossipsub/src/subscription_filter.rs @@ -0,0 +1,415 @@ +use crate::types::Subscription; +use crate::TopicHash; +use std::collections::{BTreeSet, HashMap, HashSet}; + +pub trait TopicSubscriptionFilter { + /// Returns true iff the topic is of interest and we can subscribe to it. + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool; + + /// Filters a list of incoming subscriptions and returns a filtered set + /// By default this deduplicates the subscriptions and calls + /// [`Self::filter_incoming_subscription_set`] on the filtered set. + fn filter_incoming_subscriptions<'a>( + &mut self, + subscriptions: &'a [Subscription], + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + let mut filtered_subscriptions: HashMap = HashMap::new(); + for subscription in subscriptions { + use std::collections::hash_map::Entry::*; + match filtered_subscriptions.entry(subscription.topic_hash.clone()) { + Occupied(entry) => { + if entry.get().action != subscription.action { + entry.remove(); + } + } + Vacant(entry) => { + entry.insert(subscription); + } + } + } + self.filter_incoming_subscription_set( + filtered_subscriptions.into_values().collect(), + currently_subscribed_topics, + ) + } + + /// Filters a set of deduplicated subscriptions + /// By default this filters the elements based on [`Self::allow_incoming_subscription`]. + fn filter_incoming_subscription_set<'a>( + &mut self, + mut subscriptions: HashSet<&'a Subscription>, + _currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + subscriptions.retain(|s| { + if self.allow_incoming_subscription(s) { + true + } else { + tracing::debug!(subscription=?s, "Filtered incoming subscription"); + false + } + }); + Ok(subscriptions) + } + + /// Returns true iff we allow an incoming subscription. + /// This is used by the default implementation of filter_incoming_subscription_set to decide + /// whether to filter out a subscription or not. + /// By default this uses can_subscribe to decide the same for incoming subscriptions as for + /// outgoing ones. + fn allow_incoming_subscription(&mut self, subscription: &Subscription) -> bool { + self.can_subscribe(&subscription.topic_hash) + } +} + +//some useful implementers + +/// Allows all subscriptions +#[derive(Default, Clone)] +pub struct AllowAllSubscriptionFilter {} + +impl TopicSubscriptionFilter for AllowAllSubscriptionFilter { + fn can_subscribe(&mut self, _: &TopicHash) -> bool { + true + } +} + +/// Allows only whitelisted subscriptions +#[derive(Default, Clone)] +pub struct WhitelistSubscriptionFilter(pub HashSet); + +impl TopicSubscriptionFilter for WhitelistSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.0.contains(topic_hash) + } +} + +/// Adds a max count to a given subscription filter +pub struct MaxCountSubscriptionFilter { + pub filter: T, + pub max_subscribed_topics: usize, + pub max_subscriptions_per_request: usize, +} + +impl TopicSubscriptionFilter for MaxCountSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.filter.can_subscribe(topic_hash) + } + + fn filter_incoming_subscriptions<'a>( + &mut self, + subscriptions: &'a [Subscription], + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + if subscriptions.len() > self.max_subscriptions_per_request { + return Err("too many subscriptions per request".into()); + } + let result = self + .filter + .filter_incoming_subscriptions(subscriptions, currently_subscribed_topics)?; + + use crate::types::SubscriptionAction::*; + + let mut unsubscribed = 0; + let mut new_subscribed = 0; + for s in &result { + let currently_contained = currently_subscribed_topics.contains(&s.topic_hash); + match s.action { + Unsubscribe => { + if currently_contained { + unsubscribed += 1; + } + } + Subscribe => { + if !currently_contained { + new_subscribed += 1; + } + } + } + } + + if new_subscribed + currently_subscribed_topics.len() + > self.max_subscribed_topics + unsubscribed + { + return Err("too many subscribed topics".into()); + } + + Ok(result) + } +} + +/// Combines two subscription filters +pub struct CombinedSubscriptionFilters { + pub filter1: T, + pub filter2: S, +} + +impl TopicSubscriptionFilter for CombinedSubscriptionFilters +where + T: TopicSubscriptionFilter, + S: TopicSubscriptionFilter, +{ + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.filter1.can_subscribe(topic_hash) && self.filter2.can_subscribe(topic_hash) + } + + fn filter_incoming_subscription_set<'a>( + &mut self, + subscriptions: HashSet<&'a Subscription>, + currently_subscribed_topics: &BTreeSet, + ) -> Result, String> { + let intermediate = self + .filter1 + .filter_incoming_subscription_set(subscriptions, currently_subscribed_topics)?; + self.filter2 + .filter_incoming_subscription_set(intermediate, currently_subscribed_topics) + } +} + +pub struct CallbackSubscriptionFilter(pub T) +where + T: FnMut(&TopicHash) -> bool; + +impl TopicSubscriptionFilter for CallbackSubscriptionFilter +where + T: FnMut(&TopicHash) -> bool, +{ + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + (self.0)(topic_hash) + } +} + +///A subscription filter that filters topics based on a regular expression. +pub struct RegexSubscriptionFilter(pub regex::Regex); + +impl TopicSubscriptionFilter for RegexSubscriptionFilter { + fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool { + self.0.is_match(topic_hash.as_str()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::types::SubscriptionAction::*; + + #[test] + fn test_filter_incoming_allow_all_with_duplicates() { + let mut filter = AllowAllSubscriptionFilter {}; + + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let old = BTreeSet::from_iter(vec![t1.clone()]); + let subscriptions = vec![ + Subscription { + action: Unsubscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t2.clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + Subscription { + action: Subscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t1, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[4]].into_iter().collect()); + } + + #[test] + fn test_filter_incoming_whitelist() { + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let mut filter = WhitelistSubscriptionFilter(HashSet::from_iter(vec![t1.clone()])); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[0]].into_iter().collect()); + } + + #[test] + fn test_filter_incoming_too_many_subscriptions_per_request() { + let t1 = TopicHash::from_raw("t1"); + + let mut filter = MaxCountSubscriptionFilter { + filter: AllowAllSubscriptionFilter {}, + max_subscribed_topics: 100, + max_subscriptions_per_request: 2, + }; + + let old = Default::default(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t1.clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t1, + }, + ]; + + let result = filter.filter_incoming_subscriptions(&subscriptions, &old); + assert_eq!(result, Err("too many subscriptions per request".into())); + } + + #[test] + fn test_filter_incoming_too_many_subscriptions() { + let t: Vec<_> = (0..4) + .map(|i| TopicHash::from_raw(format!("t{i}"))) + .collect(); + + let mut filter = MaxCountSubscriptionFilter { + filter: AllowAllSubscriptionFilter {}, + max_subscribed_topics: 3, + max_subscriptions_per_request: 2, + }; + + let old = t[0..2].iter().cloned().collect(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t[2].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[3].clone(), + }, + ]; + + let result = filter.filter_incoming_subscriptions(&subscriptions, &old); + assert_eq!(result, Err("too many subscribed topics".into())); + } + + #[test] + fn test_filter_incoming_max_subscribed_valid() { + let t: Vec<_> = (0..5) + .map(|i| TopicHash::from_raw(format!("t{i}"))) + .collect(); + + let mut filter = MaxCountSubscriptionFilter { + filter: WhitelistSubscriptionFilter(t.iter().take(4).cloned().collect()), + max_subscribed_topics: 2, + max_subscriptions_per_request: 5, + }; + + let old = t[0..2].iter().cloned().collect(); + + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t[4].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[2].clone(), + }, + Subscription { + action: Subscribe, + topic_hash: t[3].clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t[0].clone(), + }, + Subscription { + action: Unsubscribe, + topic_hash: t[1].clone(), + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, subscriptions[1..].iter().collect()); + } + + #[test] + fn test_callback_filter() { + let t1 = TopicHash::from_raw("t1"); + let t2 = TopicHash::from_raw("t2"); + + let mut filter = CallbackSubscriptionFilter(|h| h.as_str() == "t1"); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, vec![&subscriptions[0]].into_iter().collect()); + } + + #[test] + fn test_regex_subscription_filter() { + let t1 = TopicHash::from_raw("tt"); + let t2 = TopicHash::from_raw("et3t3te"); + let t3 = TopicHash::from_raw("abcdefghijklmnopqrsuvwxyz"); + + let mut filter = RegexSubscriptionFilter(regex::Regex::new("t.*t").unwrap()); + + let old = Default::default(); + let subscriptions = vec![ + Subscription { + action: Subscribe, + topic_hash: t1, + }, + Subscription { + action: Subscribe, + topic_hash: t2, + }, + Subscription { + action: Subscribe, + topic_hash: t3, + }, + ]; + + let result = filter + .filter_incoming_subscriptions(&subscriptions, &old) + .unwrap(); + assert_eq!(result, subscriptions[..2].iter().collect()); + } +} diff --git a/gossipsub/src/time_cache.rs b/gossipsub/src/time_cache.rs new file mode 100644 index 0000000..2ce5c9c --- /dev/null +++ b/gossipsub/src/time_cache.rs @@ -0,0 +1,202 @@ +//! This implements a time-based LRU cache for checking gossipsub message duplicates. + +use fnv::FnvHashMap; +use std::collections::hash_map::{ + self, + Entry::{Occupied, Vacant}, +}; +use std::collections::VecDeque; +use std::time::Duration; +use web_time::Instant; + +#[derive(Debug)] +struct ExpiringElement { + /// The element that expires + element: Element, + /// The expire time. + expires: Instant, +} + +#[derive(Debug)] +pub(crate) struct TimeCache { + /// Mapping a key to its value together with its latest expire time (can be updated through + /// reinserts). + map: FnvHashMap>, + /// An ordered list of keys by expires time. + list: VecDeque>, + /// The time elements remain in the cache. + ttl: Duration, +} + +pub(crate) struct OccupiedEntry<'a, K, V> { + entry: hash_map::OccupiedEntry<'a, K, ExpiringElement>, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn into_mut(self) -> &'a mut V { + &mut self.entry.into_mut().element + } +} + +pub(crate) struct VacantEntry<'a, K, V> { + expiration: Instant, + entry: hash_map::VacantEntry<'a, K, ExpiringElement>, + list: &'a mut VecDeque>, +} + +impl<'a, K, V> VacantEntry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn insert(self, value: V) -> &'a mut V { + self.list.push_back(ExpiringElement { + element: self.entry.key().clone(), + expires: self.expiration, + }); + &mut self + .entry + .insert(ExpiringElement { + element: value, + expires: self.expiration, + }) + .element + } +} + +pub(crate) enum Entry<'a, K: 'a, V: 'a> { + Occupied(OccupiedEntry<'a, K, V>), + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K: 'a, V: 'a> Entry<'a, K, V> +where + K: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn or_default(self) -> &'a mut V + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } +} + +impl TimeCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn new(ttl: Duration) -> Self { + TimeCache { + map: FnvHashMap::default(), + list: VecDeque::new(), + ttl, + } + } + + fn remove_expired_keys(&mut self, now: Instant) { + while let Some(element) = self.list.pop_front() { + if element.expires > now { + self.list.push_front(element); + break; + } + if let Occupied(entry) = self.map.entry(element.element.clone()) { + if entry.get().expires <= now { + entry.remove(); + } + } + } + } + + pub(crate) fn entry(&mut self, key: Key) -> Entry { + let now = Instant::now(); + self.remove_expired_keys(now); + match self.map.entry(key) { + Occupied(entry) => Entry::Occupied(OccupiedEntry { entry }), + Vacant(entry) => Entry::Vacant(VacantEntry { + expiration: now + self.ttl, + entry, + list: &mut self.list, + }), + } + } + + /// Empties the entire cache. + #[cfg(test)] + pub(crate) fn clear(&mut self) { + self.map.clear(); + self.list.clear(); + } + + pub(crate) fn contains_key(&self, key: &Key) -> bool { + self.map.contains_key(key) + } +} + +#[derive(Debug)] +pub(crate) struct DuplicateCache(TimeCache); + +impl DuplicateCache +where + Key: Eq + std::hash::Hash + Clone, +{ + pub(crate) fn new(ttl: Duration) -> Self { + Self(TimeCache::new(ttl)) + } + + // Inserts new elements and removes any expired elements. + // + // If the key was not present this returns `true`. If the value was already present this + // returns `false`. + pub(crate) fn insert(&mut self, key: Key) -> bool { + if let Entry::Vacant(entry) = self.0.entry(key) { + entry.insert(()); + true + } else { + false + } + } + + pub(crate) fn contains(&self, key: &Key) -> bool { + self.0.contains_key(key) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_added_entries_exist() { + let mut cache = DuplicateCache::new(Duration::from_secs(10)); + + cache.insert("t"); + cache.insert("e"); + + // Should report that 't' and 't' already exists + assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + } + + #[test] + fn cache_entries_expire() { + let mut cache = DuplicateCache::new(Duration::from_millis(100)); + + cache.insert("t"); + assert!(!cache.insert("t")); + cache.insert("e"); + //assert!(!cache.insert("t")); + assert!(!cache.insert("e")); + // sleep until cache expiry + std::thread::sleep(Duration::from_millis(101)); + // add another element to clear previous cache + cache.insert("s"); + + // should be removed from the cache + assert!(cache.insert("t")); + } +} diff --git a/gossipsub/src/topic.rs b/gossipsub/src/topic.rs new file mode 100644 index 0000000..46d873b --- /dev/null +++ b/gossipsub/src/topic.rs @@ -0,0 +1,106 @@ +use base64::prelude::*; +use prometheus_client::encoding::EncodeLabelSet; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use std::fmt; + +/// A generic trait that can be extended for various hashing types for a topic. +pub trait Hasher { + /// The function that takes a topic string and creates a topic hash. + fn hash(topic_string: String) -> TopicHash; +} + +/// A type for representing topics who use the identity hash. +#[derive(Debug, Clone)] +pub struct IdentityHash {} +impl Hasher for IdentityHash { + /// Creates a [`TopicHash`] as a raw string. + fn hash(topic_string: String) -> TopicHash { + TopicHash { hash: topic_string } + } +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct TopicDescriptor { + pub name: Option, + // TODO: + // pub auth: Option, + // pub enc: Option, +} + +#[derive(Debug, Clone)] +pub struct Sha256Hash {} +impl Hasher for Sha256Hash { + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the + /// hash. + fn hash(topic_string: String) -> TopicHash { + let topic_descripter = TopicDescriptor { + name: Some(topic_string), + // auth: None, + // enc: None, + }; + let bytes = postcard::to_stdvec(&topic_descripter).unwrap(); + let hash = BASE64_STANDARD.encode(Sha256::digest(&bytes)); + TopicHash { hash } + } +} + +#[derive( + Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, EncodeLabelSet, Serialize, Deserialize, +)] +pub struct TopicHash { + /// The topic hash. Stored as a string to align with the protobuf API. + hash: String, +} + +impl TopicHash { + pub fn from_raw(hash: impl Into) -> TopicHash { + TopicHash { hash: hash.into() } + } + + pub fn into_string(self) -> String { + self.hash + } + + pub fn as_str(&self) -> &str { + &self.hash + } +} + +/// A gossipsub topic. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Topic { + topic: String, + phantom_data: std::marker::PhantomData, +} + +impl From> for TopicHash { + fn from(topic: Topic) -> TopicHash { + topic.hash() + } +} + +impl Topic { + pub fn new(topic: impl Into) -> Self { + Topic { + topic: topic.into(), + phantom_data: std::marker::PhantomData, + } + } + + pub fn hash(&self) -> TopicHash { + H::hash(self.topic.clone()) + } +} + +impl fmt::Display for Topic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.topic) + } +} + +impl fmt::Display for TopicHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.hash) + } +} diff --git a/gossipsub/src/transform.rs b/gossipsub/src/transform.rs new file mode 100644 index 0000000..d70f002 --- /dev/null +++ b/gossipsub/src/transform.rs @@ -0,0 +1,52 @@ +//! This trait allows of extended user-level decoding that can apply to message-data before a +//! message-id is calculated. +//! +//! This is primarily designed to allow applications to implement their own custom compression +//! algorithms that can be topic-specific. Once the raw data is transformed the message-id is then +//! calculated, allowing for applications to employ message-id functions post compression. + +use crate::{Message, RawMessage, TopicHash}; + +/// A general trait of transforming a [`RawMessage`] into a [`Message`]. The +/// [`RawMessage`] is obtained from the wire and the [`Message`] is used to +/// calculate the [`crate::MessageId`] of the message and is what is sent to the application. +/// +/// The inbound/outbound transforms must be inverses. Applying the inbound transform and then the +/// outbound transform MUST leave the underlying data un-modified. +/// +/// By default, this is the identity transform for all fields in [`Message`]. +pub trait DataTransform { + /// Takes a [`RawMessage`] received and converts it to a [`Message`]. + fn inbound_transform(&self, raw_message: RawMessage) -> Result; + + /// Takes the data to be published (a topic and associated data) transforms the data. The + /// transformed data will then be used to create a [`crate::RawMessage`] to be sent to peers. + fn outbound_transform( + &self, + topic: &TopicHash, + data: Vec, + ) -> Result, std::io::Error>; +} + +/// The default transform, the raw data is propagated as is to the application layer gossipsub. +#[derive(Default, Clone)] +pub struct IdentityTransform; + +impl DataTransform for IdentityTransform { + fn inbound_transform(&self, raw_message: RawMessage) -> Result { + Ok(Message { + source: raw_message.source, + data: raw_message.data, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic, + }) + } + + fn outbound_transform( + &self, + _topic: &TopicHash, + data: Vec, + ) -> Result, std::io::Error> { + Ok(data) + } +} diff --git a/gossipsub/src/types.rs b/gossipsub/src/types.rs new file mode 100644 index 0000000..0b26919 --- /dev/null +++ b/gossipsub/src/types.rs @@ -0,0 +1,256 @@ +//! A collection of types using the Gossipsub system. +use crate::{handler::HandlerIn, TopicHash}; +use iroh::net::NodeId; +use std::fmt; +use std::fmt::Debug; +use tokio::{sync::mpsc, task::JoinHandle}; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +/// Validation kinds from the application for received messages. +pub enum MessageAcceptance { + /// The message is considered valid, and it should be delivered and forwarded to the network. + Accept, + /// The message is considered invalid, and it should be rejected and trigger the P₄ penalty. + Reject, + /// The message is neither delivered nor forwarded to the network, but the router does not + /// trigger the P₄ penalty. + Ignore, +} + +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub struct MessageId(pub Vec); + +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} + +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } +} + +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} + +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} + +#[derive(Debug)] +pub(crate) struct PeerConnections { + /// Its current connections. + pub(crate) connections: Vec, + pub(crate) connection_task: JoinHandle<()>, + pub(crate) connection_sender: mpsc::Sender, +} + +/// A message received by the gossipsub system and stored locally in caches.. +#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] +pub struct RawMessage { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. Its meaning is out of scope of this library. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, + + /// The signature of the message if it's signed. + pub signature: Option>, + + /// Flag indicating if this message has been validated by the application or not. + pub validated: bool, +} + +impl From for Message { + fn from(value: RawMessage) -> Self { + Message { + source: value.source, + data: value.data, + sequence_number: value.sequence_number, + topic: value.topic, + } + } +} + +/// The message sent to the user after a [`RawMessage`] has been transformed by a +/// [`crate::DataTransform`]. +#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Message { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, +} + +impl fmt::Debug for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Message") + .field( + "data", + &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)), + ) + .field("source", &self.source) + .field("sequence_number", &self.sequence_number) + .field("topic", &self.topic) + .finish() + } +} + +/// A subscription received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Subscription { + /// Action to perform. + pub action: SubscriptionAction, + /// The topic from which to subscribe or unsubscribe. + pub topic_hash: TopicHash, +} + +/// Action that a subscription wants to perform. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum SubscriptionAction { + /// The remote wants to subscribe to the given topic. + Subscribe, + /// The remote wants to unsubscribe from the given topic. + Unsubscribe, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct PeerInfo { + pub peer_id: Option, + //TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // https://github.com/libp2p/specs/pull/217) + //pub signed_peer_record: ?, +} + +/// A Control message received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum ControlAction { + /// Node broadcasts known messages per topic - IHave control message. + IHave { + /// The topic of the messages. + topic_hash: TopicHash, + /// A list of known message ids (peer_id + sequence _number) as a string. + message_ids: Vec, + }, + /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. + IWant { + /// A list of known message ids (peer_id + sequence _number) as a string. + message_ids: Vec, + }, + /// The node has been added to the mesh - Graft control message. + Graft { + /// The mesh topic the peer should be added to. + topic_hash: TopicHash, + }, + /// The node has been removed from the mesh - Prune control message. + Prune { + /// The mesh topic the peer should be removed from. + topic_hash: TopicHash, + /// A list of peers to be proposed to the removed peer as peer exchange + peers: Vec, + /// The backoff time in seconds before we allow to reconnect + backoff: Option, + }, +} + +/// A Gossipsub RPC message sent. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum RpcOut { + /// Publish a Gossipsub message on network. + Publish(RawMessage), + /// Forward a Gossipsub message to the network. + Forward(RawMessage), + /// Subscribe a topic. + Subscribe(TopicHash), + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// List of Gossipsub control messages. + Control(ControlAction), +} + +impl From for Rpc { + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish(message) => Rpc { + subscriptions: Vec::new(), + messages: vec![message], + control_msgs: Vec::new(), + }, + RpcOut::Forward(message) => Rpc { + messages: vec![message], + subscriptions: Vec::new(), + control_msgs: Vec::new(), + }, + RpcOut::Subscribe(topic) => Rpc { + messages: Vec::new(), + subscriptions: vec![Subscription { + action: SubscriptionAction::Subscribe, + topic_hash: topic, + }], + control_msgs: Vec::new(), + }, + RpcOut::Unsubscribe(topic) => Rpc { + messages: Vec::new(), + subscriptions: vec![Subscription { + action: SubscriptionAction::Unsubscribe, + topic_hash: topic, + }], + control_msgs: Vec::new(), + }, + RpcOut::Control(action) => Rpc { + messages: Vec::new(), + subscriptions: Vec::new(), + control_msgs: vec![action], + }, + } + } +} + +/// An RPC received/sent. +#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Rpc { + /// List of messages that were part of this RPC query. + pub messages: Vec, + /// List of subscriptions. + pub subscriptions: Vec, + /// List of Gossipsub control messages. + pub control_msgs: Vec, +} + +impl fmt::Debug for Rpc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut b = f.debug_struct("GossipsubRpc"); + if !self.messages.is_empty() { + b.field("messages", &self.messages); + } + if !self.subscriptions.is_empty() { + b.field("subscriptions", &self.subscriptions); + } + if !self.control_msgs.is_empty() { + b.field("control_msgs", &self.control_msgs); + } + b.finish() + } +} diff --git a/gossipsub/tests/smoke.rs b/gossipsub/tests/smoke.rs new file mode 100644 index 0000000..1a9bb84 --- /dev/null +++ b/gossipsub/tests/smoke.rs @@ -0,0 +1,210 @@ +use futures::{stream::SelectAll, StreamExt}; +use gossipsub::{MessageAuthenticity, ValidationMode, GOSSIPSUB_1_1_0_PROTOCOL}; +use quickcheck::{QuickCheck, TestResult}; +use rand::{seq::SliceRandom, SeedableRng}; +use std::{sync::Arc, task::Poll, time::Duration}; +use tokio_stream::wrappers::ReceiverStream; +use tracing_subscriber::EnvFilter; + +struct Graph { + nodes: Vec, + subscriptions: SelectAll>, +} + +impl Graph { + async fn new_connected(num_nodes: usize, seed: u64) -> Graph { + assert!(num_nodes > 0, "expecting at least one node"); + + let mut rng = rand::rngs::StdRng::seed_from_u64(seed); + + let mut subscriptions = SelectAll::new(); + let mut not_connected_nodes = Vec::new(); + for _i in 0..num_nodes { + let (node, sub) = build_node().await; + not_connected_nodes.push((node, sub)); + } + let mut connected_nodes = vec![not_connected_nodes.pop().unwrap().0]; + + for (next, sub) in not_connected_nodes { + let connected = connected_nodes + .choose_mut(&mut rng) + .expect("at least one connected node"); + + let next_gossip = next + .get_protocol::(GOSSIPSUB_1_1_0_PROTOCOL) + .unwrap(); + next_gossip + .connect(connected.node_addr().await.unwrap()) + .await + .unwrap(); + subscriptions.push(sub); + connected_nodes.push(next); + } + + Graph { + nodes: connected_nodes, + subscriptions, + } + } + + /// Polls the graph and passes each event into the provided FnMut until the closure returns + /// `true`. + /// + /// Returns [`true`] on success and [`false`] on timeout. + async fn wait_for bool>(&mut self, mut f: F) -> bool { + let condition = async { + loop { + let event = self.subscriptions.select_next_some().await; + if f(&event) { + break; + } + } + }; + + match tokio::time::timeout(Duration::from_secs(10), condition).await { + Ok(()) => true, + Err(_) => false, + } + } + + /// Polls the graph until Poll::Pending is obtained, completing the underlying polls. + async fn drain_events(&mut self) { + let fut = futures::future::poll_fn(|cx| loop { + match self.subscriptions.poll_next_unpin(cx) { + Poll::Ready(_) => {} + Poll::Pending => return Poll::Ready(()), + } + }); + tokio::time::timeout(Duration::from_secs(10), fut) + .await + .unwrap(); + } +} + +async fn build_node() -> (iroh::node::MemNode, ReceiverStream) { + // NOTE: The graph of created nodes can be disconnected from the mesh point of view as nodes + // can reach their d_lo value and not add other nodes to their mesh. To speed up this test, we + // reduce the default values of the heartbeat, so that all nodes will receive gossip in a + // timely fashion. + + let mut builder = iroh::node::Node::memory().build().await.unwrap(); + + let endpoint = builder.endpoint().clone(); + let node_id = endpoint.node_id(); + + let config = gossipsub::ConfigBuilder::default() + .heartbeat_initial_delay(Duration::from_millis(100)) + .heartbeat_interval(Duration::from_millis(200)) + .history_length(10) + .history_gossip(10) + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + + let gossip: gossipsub::Behaviour = + gossipsub::Behaviour::new(MessageAuthenticity::Author(node_id), config).unwrap(); + let sub = ReceiverStream::new(gossip.subscribe_events().await.unwrap()); + let gossip = Arc::new(gossip); + let node = builder + .accept(GOSSIPSUB_1_1_0_PROTOCOL, gossip.clone()) + .spawn() + .await + .unwrap(); + (node, sub) +} + +#[test] +fn multi_hop_propagation() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + fn prop(num_nodes: u8, seed: u64) -> TestResult { + if !(2..=50).contains(&num_nodes) { + return TestResult::discard(); + } + + tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); + + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + let mut graph = Graph::new_connected(num_nodes as usize, seed).await; + let number_nodes = graph.nodes.len(); + + // Subscribe each node to the same topic. + let topic = gossipsub::IdentTopic::new("test-net"); + for node in &mut graph.nodes { + node.get_protocol::(GOSSIPSUB_1_1_0_PROTOCOL) + .unwrap() + .subscribe(topic.clone()) + .await + .unwrap(); + } + + // Wait for all nodes to be subscribed. + let mut subscribed = 0; + + let all_subscribed = graph + .wait_for(move |ev| { + if let gossipsub::Event::Subscribed { .. } = ev { + subscribed += 1; + if subscribed == (number_nodes - 1) * 2 { + return true; + } + } + + false + }) + .await; + + if !all_subscribed { + return TestResult::error(format!( + "Timed out waiting for all nodes to subscribe but only have {subscribed:?}/{num_nodes:?}.", + )); + } + + // It can happen that the publish occurs before all grafts have completed causing this test + // to fail. We drain all the poll messages before publishing. + graph.drain_events().await; + + // Publish a single message. + graph + .nodes + .iter() + .next() + .unwrap() + .get_protocol::(GOSSIPSUB_1_1_0_PROTOCOL) + .unwrap() + .publish(topic, vec![1, 2, 3]) + .await + .unwrap(); + + // Wait for all nodes to receive the published message. + let mut received_msgs = 0; + let all_received = graph + .wait_for(move |ev| { + if let gossipsub::Event::Message { .. } = ev { + received_msgs += 1; + if received_msgs == number_nodes - 1 { + return true; + } + } + + false + }) + .await; + + if !all_received { + return TestResult::error(format!( + "Timed out waiting for all nodes to receive the msg but only have {received_msgs:?}/{num_nodes:?}.", + )); + } + + TestResult::passed() + }) + } + + QuickCheck::new() + .max_tests(5) + .quickcheck(prop as fn(u8, u64) -> TestResult) +}