diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 06d46050284..69f74d5cdbb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -140,6 +140,41 @@ jobs: command: clippy args: -- -A clippy::mutable_key_type -A clippy::type_complexity + run-benchmarks: + runs-on: ubuntu-latest + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.6.0 + with: + access_token: ${{ github.token }} + + - uses: actions/checkout@v2 + + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Cache CARGO_HOME + uses: actions/cache@v2 + with: + path: ~/.cargo + key: cargo-home-${{ hashFiles('Cargo.toml') }} + + - name: Cache cargo build + uses: actions/cache@v2 + with: + path: target + key: cargo-build-target-${{ hashFiles('Cargo.toml') }} + + - name: Run cargo bench + uses: actions-rs/cargo@v1 + with: + command: bench + args: --workspace + integration-test: name: Integration tests runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cfa61efc24..faeb4f58898 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,10 +23,26 @@ - [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md) - [`libp2p-core-derive` CHANGELOG](misc/core-derive/CHANGELOG.md) -# Version 0.32.0 [unreleased] +# Version 0.33.0 [unreleased] + +- Update `libp2p-core` and all dependent crates. + +# Version 0.32.2 [2020-12-10] + +- Update `libp2p-websocket`. + +# Version 0.32.1 [2020-12-09] + +- Update minimum patch version of `libp2p-websocket`. + +# Version 0.32.0 [2020-12-08] + +- Update `libp2p-request-response`. - Update to `libp2p-mdns-0.26`. +- Update `libp2p-websocket` minimum patch version. + # Version 0.31.2 [2020-12-02] - Bump minimum `libp2p-core` patch version. diff --git a/Cargo.toml b/Cargo.toml index 04f5dafdb88..149d9d01980 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p" edition = "2018" description = "Peer-to-peer networking library" -version = "0.32.0" +version = "0.33.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -61,22 +61,22 @@ atomic = "0.5.0" bytes = "0.5" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.25.2", path = "core" } +libp2p-core = { version = "0.26.0", path = "core" } libp2p-core-derive = { version = "0.21.0", path = "misc/core-derive" } -libp2p-floodsub = { version = "0.25.0", path = "protocols/floodsub", optional = true } -libp2p-gossipsub = { version = "0.25.0", path = "./protocols/gossipsub", optional = true } -libp2p-identify = { version = "0.25.0", path = "protocols/identify", optional = true } -libp2p-kad = { version = "0.26.0", path = "protocols/kad", optional = true } -libp2p-mplex = { version = "0.25.0", path = "muxers/mplex", optional = true } -libp2p-noise = { version = "0.27.0", path = "protocols/noise", optional = true } -libp2p-ping = { version = "0.25.0", path = "protocols/ping", optional = true } -libp2p-plaintext = { version = "0.25.0", path = "protocols/plaintext", optional = true } +libp2p-floodsub = { version = "0.26.0", path = "protocols/floodsub", optional = true } +libp2p-gossipsub = { version = "0.26.0", path = "./protocols/gossipsub", optional = true } +libp2p-identify = { version = "0.26.0", path = "protocols/identify", optional = true } +libp2p-kad = { version = "0.27.0", path = "protocols/kad", optional = true } +libp2p-mplex = { version = "0.26.0", path = "muxers/mplex", optional = true } +libp2p-noise = { version = "0.28.0", path = "protocols/noise", optional = true } +libp2p-ping = { version = "0.26.0", path = "protocols/ping", optional = true } +libp2p-plaintext = { version = "0.26.0", path = "protocols/plaintext", optional = true } libp2p-pnet = { version = "0.19.2", path = "protocols/pnet", optional = true } -libp2p-request-response = { version = "0.6.0", path = "protocols/request-response", optional = true } -libp2p-swarm = { version = "0.25.0", path = "swarm" } -libp2p-uds = { version = "0.25.0", path = "transports/uds", optional = true } -libp2p-wasm-ext = { version = "0.25.0", path = "transports/wasm-ext", optional = true } -libp2p-yamux = { version = "0.28.0", path = "muxers/yamux", optional = true } +libp2p-request-response = { version = "0.8.0", path = "protocols/request-response", optional = true } +libp2p-swarm = { version = "0.26.0", path = "swarm" } +libp2p-uds = { version = "0.26.0", path = "transports/uds", optional = true } +libp2p-wasm-ext = { version = "0.26.0", path = "transports/wasm-ext", optional = true } +libp2p-yamux = { version = "0.29.0", path = "muxers/yamux", optional = true } multiaddr = { package = "parity-multiaddr", version = "0.10.0", path = "misc/multiaddr" } parking_lot = "0.11.0" pin-project = "1.0.0" @@ -84,11 +84,11 @@ smallvec = "1.0" wasm-timer = "0.2.4" [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] -libp2p-deflate = { version = "0.25.0", path = "protocols/deflate", optional = true } -libp2p-dns = { version = "0.25.0", path = "transports/dns", optional = true } -libp2p-mdns = { version = "0.26.0", path = "protocols/mdns", optional = true } -libp2p-tcp = { version = "0.25.1", path = "transports/tcp", optional = true } -libp2p-websocket = { version = "0.26.0", path = "transports/websocket", optional = true } +libp2p-deflate = { version = "0.26.0", path = "protocols/deflate", optional = true } +libp2p-dns = { version = "0.26.0", path = "transports/dns", optional = true } +libp2p-mdns = { version = "0.27.0", path = "protocols/mdns", optional = true } +libp2p-tcp = { version = "0.26.0", path = "transports/tcp", optional = true } +libp2p-websocket = { version = "0.27.0", path = "transports/websocket", optional = true } [dev-dependencies] async-std = "1.6.2" diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 09e654d2a6a..327a65268d9 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.26.0 [unreleased] + +- Make `PeerId` be `Copy`, including small `PeerId` API changes. + [PR 1874](https://github.com/libp2p/rust-libp2p/pull/1874/). + # 0.25.2 [2020-12-02] - Require `multistream-select-0.9.1`. diff --git a/core/Cargo.toml b/core/Cargo.toml index ce2aba164be..db365e02321 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-core" edition = "2018" description = "Core traits and structs of libp2p" -version = "0.25.2" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,6 @@ categories = ["network-programming", "asynchronous"] [dependencies] asn1_der = "0.6.1" bs58 = "0.4.0" -bytes = "0.5" ed25519-dalek = "1.0.1" either = "1.5" fnv = "1.0" @@ -41,6 +40,7 @@ ring = { version = "0.16.9", features = ["alloc", "std"], default-features = fal [dev-dependencies] async-std = "1.6.2" +criterion = "0.3" libp2p-mplex = { path = "../muxers/mplex" } libp2p-noise = { path = "../protocols/noise" } libp2p-tcp = { path = "../transports/tcp", features = ["async-std"] } @@ -54,3 +54,7 @@ prost-build = "0.6" [features] default = ["secp256k1"] secp256k1 = ["libsecp256k1"] + +[[bench]] +name = "peer_id" +harness = false diff --git a/core/benches/peer_id.rs b/core/benches/peer_id.rs new file mode 100644 index 00000000000..32b2c1f5e3f --- /dev/null +++ b/core/benches/peer_id.rs @@ -0,0 +1,68 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use libp2p_core::{identity, PeerId}; + +fn from_bytes(c: &mut Criterion) { + let peer_id_bytes = identity::Keypair::generate_ed25519() + .public() + .into_peer_id() + .to_bytes(); + + c.bench_function("from_bytes", |b| { + b.iter(|| { + black_box(PeerId::from_bytes(&peer_id_bytes).unwrap()); + }) + }); +} + +fn clone(c: &mut Criterion) { + let peer_id = identity::Keypair::generate_ed25519() + .public() + .into_peer_id(); + + c.bench_function("clone", |b| { + b.iter(|| { + black_box(peer_id.clone()); + }) + }); +} + +fn sort_vec(c: &mut Criterion) { + let peer_ids: Vec<_> = (0..100) + .map(|_| { + identity::Keypair::generate_ed25519() + .public() + .into_peer_id() + }) + .collect(); + + c.bench_function("sort_vec", |b| { + b.iter(|| { + let mut peer_ids = peer_ids.clone(); + peer_ids.sort_unstable(); + black_box(peer_ids); + }) + }); +} + +criterion_group!(peer_id, from_bytes, clone, sort_vec); +criterion_main!(peer_id); diff --git a/core/src/network/peer.rs b/core/src/network/peer.rs index a1039334d44..4e179e546c3 100644 --- a/core/src/network/peer.rs +++ b/core/src/network/peer.rs @@ -555,7 +555,7 @@ where } self.network.pool.add(connection, connected) - .map(|_id| ConnectedPeer { + .map(move |_id| ConnectedPeer { network: self.network, peer_id: self.peer_id, }) diff --git a/core/src/peer_id.rs b/core/src/peer_id.rs index b81eeefcfd3..5f9a8f78629 100644 --- a/core/src/peer_id.rs +++ b/core/src/peer_id.rs @@ -19,11 +19,10 @@ // DEALINGS IN THE SOFTWARE. use crate::PublicKey; -use bytes::Bytes; -use thiserror::Error; -use multihash::{Code, Multihash, MultihashDigest}; +use multihash::{Code, Error, Multihash, MultihashDigest}; use rand::Rng; -use std::{convert::TryFrom, borrow::Borrow, fmt, hash, str::FromStr, cmp}; +use std::{convert::TryFrom, fmt, str::FromStr}; +use thiserror::Error; /// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be /// automatically used as the peer id using an identity multihash. @@ -32,10 +31,9 @@ const MAX_INLINE_KEY_LENGTH: usize = 42; /// Identifier of a peer of the network. /// /// The data is a multihash of the public key of the peer. -// TODO: maybe keep things in decoded version? -#[derive(Clone, Eq)] +#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct PeerId { - multihash: Bytes, + multihash: Multihash, } impl fmt::Debug for PeerId { @@ -52,21 +50,6 @@ impl fmt::Display for PeerId { } } -impl cmp::PartialOrd for PeerId { - fn partial_cmp(&self, other: &Self) -> Option { - Some(Ord::cmp(self, other)) - } -} - -impl cmp::Ord for PeerId { - fn cmp(&self, other: &Self) -> cmp::Ordering { - // must use borrow, because as_bytes is not consistent with equality - let lhs: &[u8] = self.borrow(); - let rhs: &[u8] = other.borrow(); - lhs.cmp(rhs) - } -} - impl PeerId { /// Builds a `PeerId` from a public key. pub fn from_public_key(key: PublicKey) -> PeerId { @@ -78,18 +61,15 @@ impl PeerId { Code::Sha2_256 }; - let multihash = hash_algorithm.digest(&key_enc).to_bytes().into(); + let multihash = hash_algorithm.digest(&key_enc); PeerId { multihash } } - /// Checks whether `data` is a valid `PeerId`. If so, returns the `PeerId`. If not, returns - /// back the data as an error. - pub fn from_bytes(data: Vec) -> Result> { - match Multihash::from_bytes(&data) { - Ok(multihash) => PeerId::from_multihash(multihash).map_err(|_| data), - Err(_err) => Err(data), - } + /// Parses a `PeerId` from bytes. + pub fn from_bytes(data: &[u8]) -> Result { + Ok(PeerId::from_multihash(Multihash::from_bytes(&data)?) + .map_err(|mh| Error::UnsupportedCode(mh.code()))?) } /// Tries to turn a `Multihash` into a `PeerId`. @@ -99,9 +79,9 @@ impl PeerId { /// peer ID, it is returned as an `Err`. pub fn from_multihash(multihash: Multihash) -> Result { match Code::try_from(multihash.code()) { - Ok(Code::Sha2_256) => Ok(PeerId { multihash: multihash.to_bytes().into() }), + Ok(Code::Sha2_256) => Ok(PeerId { multihash }), Ok(Code::Identity) if multihash.digest().len() <= MAX_INLINE_KEY_LENGTH - => Ok(PeerId { multihash: multihash.to_bytes().into() }), + => Ok(PeerId { multihash }), _ => Err(multihash) } } @@ -113,31 +93,18 @@ impl PeerId { let peer_id = rand::thread_rng().gen::<[u8; 32]>(); PeerId { multihash: Multihash::wrap(Code::Identity.into(), &peer_id) - .expect("The digest size is never too large").to_bytes().into() + .expect("The digest size is never too large") } } /// Returns a raw bytes representation of this `PeerId`. - /// - /// **NOTE:** This byte representation is not necessarily consistent with - /// equality of peer IDs. That is, two peer IDs may be considered equal - /// while having a different byte representation as per `into_bytes`. - pub fn into_bytes(self) -> Vec { - self.multihash.to_vec() - } - - /// Returns a raw bytes representation of this `PeerId`. - /// - /// **NOTE:** This byte representation is not necessarily consistent with - /// equality of peer IDs. That is, two peer IDs may be considered equal - /// while having a different byte representation as per `as_bytes`. - pub fn as_bytes(&self) -> &[u8] { - &self.multihash + pub fn to_bytes(&self) -> Vec { + self.multihash.to_bytes() } /// Returns a base-58 encoded string of this `PeerId`. pub fn to_base58(&self) -> String { - bs58::encode(self.borrow() as &[u8]).into_string() + bs58::encode(self.to_bytes()).into_string() } /// Checks whether the public key passed as parameter matches the public key of this `PeerId`. @@ -145,22 +112,10 @@ impl PeerId { /// Returns `None` if this `PeerId`s hash algorithm is not supported when encoding the /// given public key, otherwise `Some` boolean as the result of an equality check. pub fn is_public_key(&self, public_key: &PublicKey) -> Option { - let multihash = Multihash::from_bytes(&self.multihash) - .expect("Internal multihash is always a valid"); - let alg = Code::try_from(multihash.code()) + let alg = Code::try_from(self.multihash.code()) .expect("Internal multihash is always a valid `Code`"); let enc = public_key.clone().into_protobuf_encoding(); - Some(alg.digest(&enc) == multihash) - } -} - -impl hash::Hash for PeerId { - fn hash(&self, state: &mut H) - where - H: hash::Hasher - { - let digest = self.borrow() as &[u8]; - hash::Hash::hash(digest, state) + Some(alg.digest(&enc) == self.multihash) } } @@ -174,7 +129,7 @@ impl TryFrom> for PeerId { type Error = Vec; fn try_from(value: Vec) -> Result { - PeerId::from_bytes(value) + PeerId::from_bytes(&value).map_err(|_| value) } } @@ -186,33 +141,21 @@ impl TryFrom for PeerId { } } -impl PartialEq for PeerId { - fn eq(&self, other: &PeerId) -> bool { - let self_digest = self.borrow() as &[u8]; - let other_digest = other.borrow() as &[u8]; - self_digest == other_digest - } -} - -impl Borrow<[u8]> for PeerId { - fn borrow(&self) -> &[u8] { +impl AsRef for PeerId { + fn as_ref(&self) -> &Multihash { &self.multihash } } -/// **NOTE:** This byte representation is not necessarily consistent with -/// equality of peer IDs. That is, two peer IDs may be considered equal -/// while having a different byte representation as per `AsRef<[u8]>`. -impl AsRef<[u8]> for PeerId { - fn as_ref(&self) -> &[u8] { - self.as_bytes() +impl From for Multihash { + fn from(peer_id: PeerId) -> Self { + peer_id.multihash } } -impl From for Multihash { +impl From for Vec { fn from(peer_id: PeerId) -> Self { - Multihash::from_bytes(&peer_id.multihash) - .expect("PeerIds always contain valid Multihashes") + peer_id.to_bytes() } } @@ -230,7 +173,7 @@ impl FromStr for PeerId { #[inline] fn from_str(s: &str) -> Result { let bytes = bs58::decode(s).into_vec()?; - PeerId::from_bytes(bytes).map_err(|_| ParseError::MultiHash) + PeerId::from_bytes(&bytes).map_err(|_| ParseError::MultiHash) } } @@ -248,7 +191,7 @@ mod tests { #[test] fn peer_id_into_bytes_then_from_bytes() { let peer_id = identity::Keypair::generate_ed25519().public().into_peer_id(); - let second = PeerId::from_bytes(peer_id.clone().into_bytes()).unwrap(); + let second = PeerId::from_bytes(&peer_id.to_bytes()).unwrap(); assert_eq!(peer_id, second); } @@ -263,7 +206,7 @@ mod tests { fn random_peer_id_is_valid() { for _ in 0 .. 5000 { let peer_id = PeerId::random(); - assert_eq!(peer_id, PeerId::from_bytes(peer_id.clone().into_bytes()).unwrap()); + assert_eq!(peer_id, PeerId::from_bytes(&peer_id.to_bytes()).unwrap()); } } } diff --git a/examples/gossipsub-chat.rs b/examples/gossipsub-chat.rs index 1bfa386a8d6..2a03b5051e3 100644 --- a/examples/gossipsub-chat.rs +++ b/examples/gossipsub-chat.rs @@ -51,7 +51,7 @@ use env_logger::{Builder, Env}; use futures::prelude::*; use libp2p::gossipsub::MessageId; use libp2p::gossipsub::{ - GossipsubEvent, IdentTopic as Topic, MessageAuthenticity, RawGossipsubMessage, ValidationMode, + GossipsubEvent, GossipsubMessage, IdentTopic as Topic, MessageAuthenticity, ValidationMode, }; use libp2p::{gossipsub, identity, PeerId}; use std::collections::hash_map::DefaultHasher; @@ -79,7 +79,7 @@ fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events let mut swarm = { // To content-address message, we can take the hash of message and use it as an ID. - let message_id_fn = |message: &RawGossipsubMessage| { + let message_id_fn = |message: &GossipsubMessage| { let mut s = DefaultHasher::new(); message.data.hash(&mut s); MessageId::from(s.finish().to_string()) @@ -94,7 +94,7 @@ fn main() -> Result<(), Box> { .build() .expect("Valid config"); // build a gossipsub network behaviour - let mut gossipsub = + let mut gossipsub: gossipsub::Gossipsub = gossipsub::Gossipsub::new(MessageAuthenticity::Signed(local_key), gossipsub_config) .expect("Correct configuration"); @@ -154,7 +154,7 @@ fn main() -> Result<(), Box> { message, } => println!( "Got message: {} with id: {} from peer: {:?}", - String::from_utf8_lossy(message.data()), + String::from_utf8_lossy(&message.data), id, peer_id ), diff --git a/examples/ipfs-private.rs b/examples/ipfs-private.rs index 7afd9a5cb68..f53419d267e 100644 --- a/examples/ipfs-private.rs +++ b/examples/ipfs-private.rs @@ -34,14 +34,16 @@ use async_std::{io, task}; use futures::{future, prelude::*}; use libp2p::{ - core::{either::EitherTransport, transport, transport::upgrade::Version, muxing::StreamMuxerBox}, + core::{ + either::EitherTransport, muxing::StreamMuxerBox, transport, transport::upgrade::Version, + }, gossipsub::{self, Gossipsub, GossipsubConfigBuilder, GossipsubEvent, MessageAuthenticity}, identify::{Identify, IdentifyEvent}, identity, multiaddr::Protocol, + noise, ping::{self, Ping, PingConfig, PingEvent}, pnet::{PnetConfig, PreSharedKey}, - noise, swarm::NetworkBehaviourEventProcess, tcp::TcpConfig, yamux::YamuxConfig, @@ -61,9 +63,10 @@ use std::{ pub fn build_transport( key_pair: identity::Keypair, psk: Option, -) -> transport::Boxed<(PeerId, StreamMuxerBox)> -{ - let noise_keys = noise::Keypair::::new().into_authentic(&key_pair).unwrap(); +) -> transport::Boxed<(PeerId, StreamMuxerBox)> { + let noise_keys = noise::Keypair::::new() + .into_authentic(&key_pair) + .unwrap(); let noise_config = noise::NoiseConfig::xx(noise_keys).into_authenticated(); let yamux_config = YamuxConfig::default(); @@ -184,7 +187,7 @@ fn main() -> Result<(), Box> { message, } => println!( "Got message: {} with id: {} from peer: {:?}", - String::from_utf8_lossy(message.data()), + String::from_utf8_lossy(&message.data), id, peer_id ), diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 2a1126104d7..4daf1b43b46 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 40b3d2ae963..19105aec5b0 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mplex" edition = "2018" description = "Mplex multiplexing protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] bytes = "0.5" futures = "0.3.1" futures_codec = "0.4.1" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4" nohash-hasher = "0.2" parking_lot = "0.11" diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index a499d470f2d..2f38f555200 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.29.0 [unreleased] + +- Update `libp2p-core`. + # 0.28.0 [2020-11-25] - Update `libp2p-core`. diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 7376495ab10..d4423e9168f 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-yamux" edition = "2018" description = "Yamux multiplexing protocol for libp2p" -version = "0.28.0" +version = "0.29.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } parking_lot = "0.11" thiserror = "1.0" yamux = "0.8.0" diff --git a/protocols/deflate/CHANGELOG.md b/protocols/deflate/CHANGELOG.md index b0f3e1760e5..a30234b9d61 100644 --- a/protocols/deflate/CHANGELOG.md +++ b/protocols/deflate/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/protocols/deflate/Cargo.toml b/protocols/deflate/Cargo.toml index b960433c411..3ba000c56ab 100644 --- a/protocols/deflate/Cargo.toml +++ b/protocols/deflate/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-deflate" edition = "2018" description = "Deflate encryption protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } flate2 = "1.0" [dev-dependencies] diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index 927dfe608f4..be3a6c65323 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 3b87710e619..65df9746025 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-floodsub" edition = "2018" description = "Floodsub protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"] cuckoofilter = "0.5.0" fnv = "1.0" futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } log = "0.4" prost = "0.6.1" rand = "0.7" diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index 046c72d856b..bd0b5b2a646 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -61,7 +61,7 @@ where let mut messages = Vec::with_capacity(rpc.publish.len()); for publish in rpc.publish.into_iter() { messages.push(FloodsubMessage { - source: PeerId::from_bytes(publish.from.unwrap_or_default()).map_err(|_| { + source: PeerId::from_bytes(&publish.from.unwrap_or_default()).map_err(|_| { FloodsubDecodeError::InvalidPeerId })?, data: publish.data.unwrap_or_default(), @@ -179,7 +179,7 @@ impl FloodsubRpc { publish: self.messages.into_iter() .map(|msg| { rpc_proto::Message { - from: Some(msg.source.into_bytes()), + from: Some(msg.source.to_bytes()), data: Some(msg.data), seqno: Some(msg.sequence_number), topic_ids: msg.topics diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index fc01624c327..1040fa2714a 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index dfc8a0d245d..6e9ec310921 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-gossipsub" edition = "2018" description = "Gossipsub protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,15 +11,16 @@ categories = ["network-programming", "asynchronous"] [features] regex-filter = ["regex"] +snappy = ["snap"] default = [] [dependencies] -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } -libp2p-core = { version = "0.25.0", path = "../../core" } bytes = "0.5.6" byteorder = "1.3.4" fnv = "1.0.7" futures = "0.3.5" +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } rand = "0.7.3" futures_codec = "0.4.1" wasm-timer = "0.2.4" @@ -31,6 +32,7 @@ smallvec = "1.4.2" prost = "0.6.1" hex_fmt = "0.3.0" regex = { version = "1.4.0", optional = true } +snap = {version = "1.0.3", optional = true } [dev-dependencies] async-std = "1.6.3" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index ca3cb8f77f5..4dcf17ec360 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -38,8 +38,8 @@ use rand::{seq::SliceRandom, thread_rng}; use wasm_timer::{Instant, Interval}; use libp2p_core::{ - connection::ConnectionId, identity::error::SigningError, identity::Keypair, - multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, ConnectedPoint, Multiaddr, PeerId, + connection::ConnectionId, identity::Keypair, multiaddr::Protocol::Ip4, + multiaddr::Protocol::Ip6, ConnectedPoint, Multiaddr, PeerId, }; use libp2p_swarm::{ DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, @@ -47,8 +47,9 @@ use libp2p_swarm::{ }; use crate::backoff::BackoffStorage; -use crate::config::{Config, ValidationMode}; -use crate::error::{PublishError, SubscriptionError}; +use crate::compression::{MessageCompression, NoCompression}; +use crate::config::{GossipsubConfig, ValidationMode}; +use crate::error::{PublishError, SubscriptionError, ValidationError}; use crate::gossip_promises::GossipPromises; use crate::handler::{GossipsubHandler, HandlerEvent}; use crate::mcache::MessageCache; @@ -58,14 +59,12 @@ use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFi use crate::time_cache::{DuplicateCache, TimeCache}; use crate::topic::{Hasher, Topic, TopicHash}; use crate::types::{ - FastMessageId, GenericGossipsubMessage, GossipsubControlAction, GossipsubMessageWithId, - GossipsubSubscription, GossipsubSubscriptionAction, MessageAcceptance, MessageId, PeerInfo, - RawGossipsubMessage, + FastMessageId, GossipsubControlAction, GossipsubMessage, GossipsubSubscription, + GossipsubSubscriptionAction, MessageAcceptance, MessageId, PeerInfo, RawGossipsubMessage, }; use crate::types::{GossipsubRpc, PeerKind}; use crate::{rpc_proto, TopicScoreParams}; -use std::cmp::Ordering::Equal; -use std::fmt::Debug; +use std::{cmp::Ordering::Equal, fmt::Debug}; #[cfg(test)] mod tests; @@ -75,7 +74,7 @@ mod tests; /// Without signing, a number of privacy preserving modes can be selected. /// /// NOTE: The default validation settings are to require signatures. The [`ValidationMode`] -/// should be updated in the [`Config`] to allow for unsigned messages. +/// should be updated in the [`GossipsubConfig`] to allow for unsigned messages. #[derive(Clone)] pub enum MessageAuthenticity { /// Message signing is enabled. The author will be the owner of the key and the sequence number @@ -83,12 +82,12 @@ pub enum MessageAuthenticity { Signed(Keypair), /// Message signing is disabled. /// - /// The specified `PeerId` will be used as the author of all published messages. The sequence + /// The specified [`PeerId`] will be used as the author of all published messages. The sequence /// number will be randomized. Author(PeerId), /// Message signing is disabled. /// - /// A random `PeerId` will be used when publishing each message. The sequence number will be + /// A random [`PeerId`] will be used when publishing each message. The sequence number will be /// randomized. RandomAuthor, /// Message signing is disabled. @@ -96,7 +95,7 @@ pub enum MessageAuthenticity { /// The author of the message and the sequence numbers are excluded from the message. /// /// NOTE: Excluding these fields may make these messages invalid by other nodes who - /// enforce validation of these fields. See [`ValidationMode`] in the `Config` + /// enforce validation of these fields. See [`ValidationMode`] in the [`GossipsubConfig`] /// for how to customise this for rust-libp2p gossipsub. A custom `message_id` /// function will need to be set to prevent all messages from a peer being filtered /// as duplicates. @@ -116,16 +115,16 @@ impl MessageAuthenticity { /// Event that can be emitted by the gossipsub behaviour. #[derive(Debug)] -pub enum Event> { +pub enum GossipsubEvent { /// A message has been received. Message { /// The peer that forwarded us this message. propagation_source: PeerId, - /// The `MessageId` of the message. This should be referenced by the application when + /// The [`MessageId`] of the message. This should be referenced by the application when /// validating a message (if required). message_id: MessageId, - /// The message itself. - message: GossipsubMessageWithId, + /// The decompressed message itself. + message: GossipsubMessage, }, /// A remote subscribed to a topic. Subscribed { @@ -142,8 +141,6 @@ pub enum Event> { topic: TopicHash, }, } -// For general use cases -pub type GossipsubEvent = Event>; /// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`] /// for further details. @@ -196,19 +193,22 @@ impl From for PublishConfig { } } -type GossipsubNetworkBehaviourAction = NetworkBehaviourAction, Event>; +type GossipsubNetworkBehaviourAction = NetworkBehaviourAction, GossipsubEvent>; /// Network behaviour that handles the gossipsub protocol. /// -/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] instance. If message signing is -/// disabled, the [`ValidationMode`] in the config should be adjusted to an appropriate level to -/// accept unsigned messages. -pub struct GenericGossipsub, Filter: TopicSubscriptionFilter> { +/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`GossipsubConfig`] instance. If +/// message signing is disabled, the [`ValidationMode`] in the config should be adjusted to an +/// appropriate level to accept unsigned messages. +pub struct Gossipsub< + C: MessageCompression = NoCompression, + F: TopicSubscriptionFilter = AllowAllSubscriptionFilter, +> { /// Configuration providing gossipsub performance parameters. - config: Config, + config: GossipsubConfig, /// Events that need to be yielded to the outside when polling. - events: VecDeque>, + events: VecDeque, /// Pools non-urgent control messages between heartbeats. control_pool: HashMap>, @@ -251,7 +251,7 @@ pub struct GenericGossipsub, Filter: TopicSubscriptionFilter> { backoffs: BackoffStorage, /// Message cache for the last few heartbeats. - mcache: MessageCache, + mcache: MessageCache, /// Heartbeat interval stream. heartbeat: Interval, @@ -288,33 +288,22 @@ pub struct GenericGossipsub, Filter: TopicSubscriptionFilter> { fast_messsage_id_cache: TimeCache, /// The filter used to handle message subscriptions. - subscription_filter: Filter, -} - -// For general use and convenience. -pub type Gossipsub = GenericGossipsub, AllowAllSubscriptionFilter>; + subscription_filter: F, -impl GenericGossipsub -where - T: Clone + Into> + From> + AsRef<[u8]>, - F: TopicSubscriptionFilter + Default, -{ - /// Creates a `GenericGossipsub` struct given a set of parameters specified via a `GenericGossipsubConfig`. - pub fn new(privacy: MessageAuthenticity, config: Config) -> Result { - Self::new_with_subscription_filter(privacy, config, F::default()) - } + /// Marker to pin the generic compression algorithm. + message_compression: C, } -impl GenericGossipsub +impl Gossipsub where - T: Clone + Into> + From> + AsRef<[u8]>, - F: TopicSubscriptionFilter, + C: MessageCompression + Default, + F: TopicSubscriptionFilter + Default, { - /// Creates a `GenericGossipsub` struct given a set of parameters specified via a `Config`. - pub fn new_with_subscription_filter( + /// Creates a [`Gossipsub`] struct given a set of parameters specified via a + /// [`GossipsubConfig`]. This has no subscription filter and uses no compression. + pub fn new( privacy: MessageAuthenticity, - config: Config, - subscription_filter: F, + config: GossipsubConfig, ) -> Result { // Set up the router given the configuration settings. @@ -324,7 +313,7 @@ where // Set up message publishing parameters. - Ok(GenericGossipsub { + Ok(Gossipsub { events: VecDeque::new(), control_pool: HashMap::new(), publish_config: privacy.into(), @@ -356,10 +345,55 @@ where peer_protocols: HashMap::new(), published_message_ids: DuplicateCache::new(config.published_message_ids_cache_time()), config, - subscription_filter, + subscription_filter: F::default(), + message_compression: C::default(), }) } + /// Creates a [`Gossipsub`] struct given a set of parameters specified via a + /// [`GossipsubConfig`] and a custom subscription filter. + pub fn new_with_subscription_filter( + privacy: MessageAuthenticity, + config: GossipsubConfig, + subscription_filter: F, + ) -> Result { + let mut gs = Self::new(privacy, config)?; + gs.subscription_filter = subscription_filter; + Ok(gs) + } + + /// Creates a [`Gossipsub`] struct given a set of parameters specified via a + /// [`GossipsubConfig`] and a custom message compression algorithm. + pub fn new_with_compression( + privacy: MessageAuthenticity, + config: GossipsubConfig, + message_compression: C, + ) -> Result { + let mut gs = Self::new(privacy, config)?; + gs.message_compression = message_compression; + Ok(gs) + } + + /// Creates a [`Gossipsub`] struct given a set of parameters specified via a + /// [`GossipsubConfig`] and a custom subscription filter and message compression algorithm. + pub fn new_with_subscription_filter_and_compression( + privacy: MessageAuthenticity, + config: GossipsubConfig, + subscription_filter: F, + message_compression: C, + ) -> Result { + let mut gs = Self::new(privacy, config)?; + gs.subscription_filter = subscription_filter; + gs.message_compression = message_compression; + Ok(gs) + } +} + +impl Gossipsub +where + C: MessageCompression, + F: TopicSubscriptionFilter, +{ /// Lists the hashes of the topics we are currently subscribed to. pub fn topics(&self) -> impl Iterator { self.mesh.keys() @@ -493,22 +527,28 @@ where pub fn publish( &mut self, topic: Topic, - data: impl Into, + data: impl Into>, ) -> Result { - let message = self.build_message(topic.into(), data.into())?; - let msg_id = self.config.message_id(&message); + let data = data.into(); + let raw_message = self.build_raw_message(topic.into(), &data)?; + + // calculate the message id from the uncompressed data + let msg_id = self.config.message_id(&GossipsubMessage { + source: raw_message.source.clone(), + data, // the uncompressed form + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); let event = Arc::new( GossipsubRpc { subscriptions: Vec::new(), - messages: vec![RawGossipsubMessage::from(message.clone())], + messages: vec![raw_message.clone()], control_msgs: Vec::new(), } .into_protobuf(), ); - let message = GossipsubMessageWithId::new(message, msg_id.clone()); - // check that the size doesn't exceed the max transmission size if event.encoded_len() > self.config.max_transmit_size() { // NOTE: The size limit can be reached by excessive topics or an excessive message. @@ -530,7 +570,7 @@ where } // If the message isn't a duplicate add it to the memcache. - self.mcache.put(message.clone()); + self.mcache.put(&msg_id, raw_message.clone()); debug!("Publishing message: {:?}", msg_id); @@ -542,12 +582,13 @@ where } } + let topic_hash = raw_message.topic.clone(); + // If we are not flood publishing forward the message to mesh peers. let mesh_peers_sent = - !self.config.flood_publish() && self.forward_msg(message.clone(), None)?; + !self.config.flood_publish() && self.forward_msg(&msg_id, raw_message, None)?; let mut recipient_peers = HashSet::new(); - let topic_hash = &message.topic; if let Some(set) = self.topic_peers.get(&topic_hash) { if self.config.flood_publish() { // Forward to all peers above score and all explicit peers @@ -589,7 +630,7 @@ where } else { // We have no fanout peers, select mesh_n of them and add them to the fanout let mesh_n = self.config.mesh_n(); - let new_peers = Self::get_random_peers( + let new_peers = get_random_peers( &self.topic_peers, &self.peer_protocols, &topic_hash, @@ -631,10 +672,10 @@ where Ok(msg_id) } - /// This function should be called when `config.validate_messages()` is `true` after the message - /// got validated by the caller. Messages are stored in the ['Memcache'] and validation is - /// expected to be fast enough that the messages should still exist in the cache. There are - /// three possible validation outcomes and the outcome is given in acceptance. + /// This function should be called when [`GossipsubConfig::validate_messages()`] is `true` after + /// the message got validated by the caller. Messages are stored in the ['Memcache'] and + /// validation is expected to be fast enough that the messages should still exist in the cache. + /// There are three possible validation outcomes and the outcome is given in acceptance. /// /// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the /// network. The `propagation_source` parameter indicates who the message was received by and @@ -652,37 +693,42 @@ where /// This should only be called once per message. pub fn report_message_validation_result( &mut self, - message_id: &MessageId, + msg_id: &MessageId, propagation_source: &PeerId, acceptance: MessageAcceptance, ) -> Result { let reject_reason = match acceptance { MessageAcceptance::Accept => { - let message = match self.mcache.validate(message_id) { - Some(message) => message.clone(), + let raw_message = match self.mcache.validate(msg_id) { + Some(raw_message) => raw_message.clone(), None => { warn!( "Message not in cache. Ignoring forwarding. Message Id: {}", - message_id + msg_id ); return Ok(false); } }; - self.forward_msg(message, Some(propagation_source))?; + self.forward_msg(msg_id, raw_message, Some(propagation_source))?; return Ok(true); } MessageAcceptance::Reject => RejectReason::ValidationFailed, MessageAcceptance::Ignore => RejectReason::ValidationIgnored, }; - if let Some(message) = self.mcache.remove(message_id) { + if let Some(raw_message) = self.mcache.remove(msg_id) { // Tell peer_score about reject if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.reject_message(propagation_source, &message, reject_reason); + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + reject_reason, + ); } Ok(true) } else { - warn!("Rejected message not in cache. Message Id: {}", message_id); + warn!("Rejected message not in cache. Message Id: {}", msg_id); Ok(false) } } @@ -752,7 +798,7 @@ where /// Sets scoring parameters for a topic. /// - /// The `with_peer_score()` must first be called to initialise peer scoring. + /// The [`Self::with_peer_score()`] must first be called to initialise peer scoring. pub fn set_topic_params( &mut self, topic: Topic, @@ -825,7 +871,7 @@ where // check if we need to get more peers, which we randomly select if added_peers.len() < self.config.mesh_n() { // get the peers - let new_peers = Self::get_random_peers( + let new_peers = get_random_peers( &self.topic_peers, &self.peer_protocols, topic_hash, @@ -898,7 +944,7 @@ where // Select peers for peer exchange let peers = if do_px { - Self::get_random_peers( + get_random_peers( &self.topic_peers, &self.peer_protocols, &topic_hash, @@ -1382,14 +1428,15 @@ where /// Applies some basic checks to whether this message is valid. Does not apply user validation /// checks. - fn mesage_with_id_is_valid( + fn message_is_valid( &mut self, - msg: &mut GossipsubMessageWithId, + msg_id: &MessageId, + raw_message: &mut RawGossipsubMessage, propagation_source: &PeerId, ) -> bool { debug!( "Handling message: {:?} from peer: {}", - msg.message_id(), + msg_id, propagation_source.to_string() ); @@ -1400,14 +1447,19 @@ where propagation_source ); if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - peer_score.reject_message(propagation_source, &msg, RejectReason::BlackListedPeer); - gossip_promises.reject_message(msg.message_id(), &RejectReason::BlackListedPeer); + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + RejectReason::BlackListedPeer, + ); + gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); } return false; } // Also reject any message that originated from a blacklisted peer - if let Some(source) = msg.source.as_ref() { + if let Some(source) = raw_message.source.as_ref() { if self.blacklisted_peers.contains(source) { debug!( "Rejecting message from peer {} because of blacklisted source: {}", @@ -1416,11 +1468,11 @@ where if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { peer_score.reject_message( propagation_source, - &msg, + msg_id, + &raw_message.topic, RejectReason::BlackListedSource, ); - gossip_promises - .reject_message(msg.message_id(), &RejectReason::BlackListedSource); + gossip_promises.reject_message(msg_id, &RejectReason::BlackListedSource); } return false; } @@ -1430,26 +1482,31 @@ where // This will allow the message to be gossiped without explicitly calling // `validate_message`. if !self.config.validate_messages() { - msg.validated = true; + raw_message.validated = true; } // reject messages claiming to be from ourselves but not locally published let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { - own_id != propagation_source && msg.source.as_ref().map_or(false, |s| s == own_id) + own_id != propagation_source + && raw_message.source.as_ref().map_or(false, |s| s == own_id) } else { - self.published_message_ids.contains(msg.message_id()) + self.published_message_ids.contains(&msg_id) }; if self_published { debug!( "Dropping message {} claiming to be from self but forwarded from {}", - msg.message_id(), - propagation_source + msg_id, propagation_source ); if let Some((peer_score, _, _, gossip_promises)) = &mut self.peer_score { - peer_score.reject_message(propagation_source, &msg, RejectReason::SelfOrigin); - gossip_promises.reject_message(msg.message_id(), &RejectReason::SelfOrigin); + peer_score.reject_message( + propagation_source, + msg_id, + &raw_message.topic, + RejectReason::SelfOrigin, + ); + gossip_promises.reject_message(msg_id, &RejectReason::SelfOrigin); } return false; } @@ -1460,85 +1517,139 @@ where /// Handles a newly received [`RawGossipsubMessage`]. /// /// Forwards the message to all peers in the mesh. - fn handle_received_message(&mut self, msg: RawGossipsubMessage, propagation_source: &PeerId) { - let fast_message_id = self.config.fast_message_id(&msg); + fn handle_received_message( + &mut self, + mut raw_message: RawGossipsubMessage, + propagation_source: &PeerId, + ) { + let fast_message_id = self.config.fast_message_id(&raw_message); if let Some(fast_message_id) = fast_message_id.as_ref() { if let Some(msg_id) = self.fast_messsage_id_cache.get(fast_message_id) { - let mut msg = GossipsubMessageWithId::new(msg, msg_id.clone()); - self.mesage_with_id_is_valid(&mut msg, propagation_source); + let msg_id = msg_id.clone(); + self.message_is_valid(&msg_id, &mut raw_message, propagation_source); if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.duplicated_message(propagation_source, &msg); + peer_score.duplicated_message(propagation_source, &msg_id, &raw_message.topic); } return; } } - let msg = GenericGossipsubMessage::from(msg); - let msg_id = self.config.message_id(&msg); - let mut msg = GossipsubMessageWithId::new(msg, msg_id); - if !self.mesage_with_id_is_valid(&mut msg, propagation_source) { + // Try and decompress the message. If it fails, consider it invalid. + let message = match GossipsubMessage::from_raw( + &self.message_compression, + raw_message.clone(), + self.config.max_transmit_size(), + ) { + Ok(message) => message, + Err(e) => { + debug!("Invalid message. Decompression error: {:?}", e); + // Reject the message and return + self.handle_invalid_message( + propagation_source, + raw_message, + ValidationError::DecompressionFailed, + ); + return; + } + }; + + let msg_id = self.config.message_id(&message); + + // Check the validity of the message + // Peers get penalized if this message is invalid. We don't add it to the duplicate cache + // and instead continually penalize peers that repeatedly send this message. + if !self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { return; } - // Add the message to the duplicate caches and memcache. + // Add the message to the duplicate caches if let Some(fast_message_id) = fast_message_id { // add id to cache self.fast_messsage_id_cache .entry(fast_message_id) - .or_insert_with(|| msg.message_id().clone()); + .or_insert_with(|| msg_id.clone()); } - if !self.duplicate_cache.insert(msg.message_id().clone()) { + if !self.duplicate_cache.insert(msg_id.clone()) { debug!( "Message already received, ignoring. Message: {}", - msg.message_id() + msg_id.clone() ); if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.duplicated_message(propagation_source, &msg); + peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } return; } debug!( "Put message {:?} in duplicate_cache and resolve promises", - msg.message_id() + msg_id ); // Tells score that message arrived (but is maybe not fully validated yet). - // Consider message as delivered for gossip promises. + // Consider the message as delivered for gossip promises. if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - peer_score.validate_message(propagation_source, &msg); - gossip_promises.message_delivered(msg.message_id()); + peer_score.validate_message(propagation_source, &msg_id, &message.topic); + gossip_promises.message_delivered(&msg_id); } // Add the message to our memcache - self.mcache.put(msg.clone()); + self.mcache.put(&msg_id, raw_message.clone()); // Dispatch the message to the user if we are subscribed to any of the topics - if self.mesh.contains_key(&msg.topic) { + if self.mesh.contains_key(&message.topic) { debug!("Sending received message to user"); - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(Event::Message { + self.events.push_back(NetworkBehaviourAction::GenerateEvent( + GossipsubEvent::Message { propagation_source: propagation_source.clone(), - message_id: msg.message_id().clone(), - message: msg.clone(), - })); + message_id: msg_id.clone(), + message, + }, + )); } else { debug!( "Received message on a topic we are not subscribed to: {:?}", - msg.topic + message.topic ); return; } // forward the message to mesh peers, if no validation is required if !self.config.validate_messages() { - let msg_id = msg.message_id().clone(); - if self.forward_msg(msg, Some(propagation_source)).is_err() { + if self + .forward_msg(&msg_id, raw_message, Some(propagation_source)) + .is_err() + { error!("Failed to forward message. Too large"); } debug!("Completed message handling for message: {:?}", msg_id); } } + // Handles invalid messages received. + fn handle_invalid_message( + &mut self, + propagation_source: &PeerId, + raw_message: RawGossipsubMessage, + validation_error: ValidationError, + ) { + if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + let reason = RejectReason::ValidationError(validation_error); + let fast_message_id_cache = &self.fast_messsage_id_cache; + if let Some(msg_id) = self + .config + .fast_message_id(&raw_message) + .and_then(|id| fast_message_id_cache.get(&id)) + { + peer_score.reject_message(propagation_source, msg_id, &raw_message.topic, reason); + gossip_promises.reject_message(msg_id, &reason); + } else { + // The message is invalid, we reject it ignoring any gossip promises. If a peer is + // advertising this message via an IHAVE and it's invalid it will be double + // penalized, one for sending us an invalid and again for breaking a promise. + peer_score.reject_invalid_message(propagation_source, &raw_message.topic); + } + } + } + /// Handles received subscriptions. fn handle_received_subscriptions( &mut self, @@ -1649,7 +1760,7 @@ where } // generates a subscription event to be polled application_event.push(NetworkBehaviourAction::GenerateEvent( - Event::Subscribed { + GossipsubEvent::Subscribed { peer_id: propagation_source.clone(), topic: subscription.topic_hash.clone(), }, @@ -1669,7 +1780,7 @@ where .push((propagation_source.clone(), subscription.topic_hash.clone())); // generate an unsubscribe event to be polled application_event.push(NetworkBehaviourAction::GenerateEvent( - Event::Unsubscribed { + GossipsubEvent::Unsubscribed { peer_id: propagation_source.clone(), topic: subscription.topic_hash.clone(), }, @@ -1804,7 +1915,7 @@ where ); // not enough peers - get mesh_n - current_length more let desired_peers = self.config.mesh_n() - peers.len(); - let peer_list = Self::get_random_peers( + let peer_list = get_random_peers( topic_peers, &self.peer_protocols, topic_hash, @@ -1886,7 +1997,7 @@ where // if we have not enough outbound peers, graft to some new outbound peers if outbound < self.config.mesh_outbound_min() { let needed = self.config.mesh_outbound_min() - outbound; - let peer_list = Self::get_random_peers( + let peer_list = get_random_peers( topic_peers, &self.peer_protocols, topic_hash, @@ -1943,7 +2054,7 @@ where // if the median score is below the threshold, select a better peer (if any) and // GRAFT if median < thresholds.opportunistic_graft_threshold { - let peer_list = Self::get_random_peers( + let peer_list = get_random_peers( topic_peers, &self.peer_protocols, topic_hash, @@ -2027,7 +2138,7 @@ where ); let needed_peers = self.config.mesh_n() - peers.len(); let explicit_peers = &self.explicit_peers; - let new_peers = Self::get_random_peers( + let new_peers = get_random_peers( &self.topic_peers, &self.peer_protocols, topic_hash, @@ -2121,7 +2232,7 @@ where ) }; // get gossip_lazy random peers - let to_msg_peers = Self::get_random_peers_dynamic( + let to_msg_peers = get_random_peers_dynamic( &self.topic_peers, &self.peer_protocols, &topic_hash, @@ -2248,15 +2359,14 @@ where /// Returns true if at least one peer was messaged. fn forward_msg( &mut self, - message: GossipsubMessageWithId, + msg_id: &MessageId, + message: RawGossipsubMessage, propagation_source: Option<&PeerId>, ) -> Result { - let msg_id = message.message_id(); - // message is fully validated inform peer_score if let Some((peer_score, ..)) = &mut self.peer_score { if let Some(peer) = propagation_source { - peer_score.deliver_message(peer, &message); + peer_score.deliver_message(peer, msg_id, &message.topic); } } @@ -2308,12 +2418,15 @@ where } } - /// Constructs a [`GenericGossipsubMessage`] performing message signing if required. - pub(crate) fn build_message( + /// Constructs a [`RawGossipsubMessage`] performing message signing if required. + pub(crate) fn build_raw_message( &self, topic: TopicHash, - data: T, - ) -> Result, SigningError> { + data: &[u8], + ) -> Result { + // Compress the data if required. + let compressed_data = self.message_compression.compress_message(data.to_vec())?; + match &self.publish_config { PublishConfig::Signing { ref keypair, @@ -2325,8 +2438,8 @@ where let signature = { let message = rpc_proto::Message { - from: Some(author.clone().into_bytes()), - data: Some(data.clone().into()), + from: Some(author.clone().to_bytes()), + data: Some(compressed_data.clone()), seqno: Some(sequence_number.to_be_bytes().to_vec()), topic: topic.clone().into_string(), signature: None, @@ -2344,9 +2457,9 @@ where Some(keypair.sign(&signature_bytes)?) }; - Ok(GenericGossipsubMessage { + Ok(RawGossipsubMessage { source: Some(author.clone()), - data, + data: compressed_data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. sequence_number: Some(sequence_number), @@ -2357,9 +2470,9 @@ where }) } PublishConfig::Author(peer_id) => { - Ok(GenericGossipsubMessage { + Ok(RawGossipsubMessage { source: Some(peer_id.clone()), - data, + data: compressed_data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. sequence_number: Some(rand::random()), @@ -2370,9 +2483,9 @@ where }) } PublishConfig::RandomAuthor => { - Ok(GenericGossipsubMessage { + Ok(RawGossipsubMessage { source: Some(PeerId::random()), - data, + data: compressed_data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. sequence_number: Some(rand::random()), @@ -2383,9 +2496,9 @@ where }) } PublishConfig::Anonymous => { - Ok(GenericGossipsubMessage { + Ok(RawGossipsubMessage { source: None, - data, + data: compressed_data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. sequence_number: None, @@ -2398,61 +2511,6 @@ where } } - /// Helper function to get a subset of random gossipsub peers for a `topic_hash` - /// filtered by the function `f`. The number of peers to get equals the output of `n_map` - /// that gets as input the number of filtered peers. - fn get_random_peers_dynamic( - topic_peers: &HashMap>, - peer_protocols: &HashMap, - topic_hash: &TopicHash, - // maps the number of total peers to the number of selected peers - n_map: impl Fn(usize) -> usize, - mut f: impl FnMut(&PeerId) -> bool, - ) -> BTreeSet { - let mut gossip_peers = match topic_peers.get(topic_hash) { - // if they exist, filter the peers by `f` - Some(peer_list) => peer_list - .iter() - .cloned() - .filter(|p| { - f(p) && match peer_protocols.get(p) { - Some(PeerKind::Gossipsub) => true, - Some(PeerKind::Gossipsubv1_1) => true, - _ => false, - } - }) - .collect(), - None => Vec::new(), - }; - - // if we have less than needed, return them - let n = n_map(gossip_peers.len()); - if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); - return gossip_peers.into_iter().collect(); - } - - // we have more peers than needed, shuffle them and return n of them - let mut rng = thread_rng(); - gossip_peers.partial_shuffle(&mut rng, n); - - debug!("RANDOM PEERS: Got {:?} peers", n); - - gossip_peers.into_iter().take(n).collect() - } - - /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` - /// filtered by the function `f`. - fn get_random_peers( - topic_peers: &HashMap>, - peer_protocols: &HashMap, - topic_hash: &TopicHash, - n: usize, - f: impl FnMut(&PeerId) -> bool, - ) -> BTreeSet { - Self::get_random_peers_dynamic(topic_peers, peer_protocols, topic_hash, |_| n, f) - } - // adds a control action to control_pool fn control_pool_add( control_pool: &mut HashMap>, @@ -2642,13 +2700,13 @@ fn get_ip_addr(addr: &Multiaddr) -> Option { }) } -impl NetworkBehaviour for GenericGossipsub +impl NetworkBehaviour for Gossipsub where - T: Send + 'static + Clone + Into> + From> + AsRef<[u8]>, + C: Send + 'static + MessageCompression, F: Send + 'static + TopicSubscriptionFilter, { type ProtocolsHandler = GossipsubHandler; - type OutEvent = Event; + type OutEvent = GossipsubEvent; fn new_handler(&mut self) -> Self::ProtocolsHandler { GossipsubHandler::new( @@ -2913,25 +2971,13 @@ where } // Handle any invalid messages from this peer - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - for (message, validation_error) in invalid_messages { - let reason = RejectReason::ValidationError(validation_error); - let fast_message_id_cache = &self.fast_messsage_id_cache; - if let Some(msg_id) = self - .config - .fast_message_id(&message) - .and_then(|id| fast_message_id_cache.get(&id)) - { - let message = GossipsubMessageWithId::new(message, msg_id.clone()); - peer_score.reject_message(&propagation_source, &message, reason); - gossip_promises.reject_message(msg_id, &reason); - } else { - let message = GenericGossipsubMessage::from(message); - let id = self.config.message_id(&message); - let message = GossipsubMessageWithId::new(message, id); - peer_score.reject_message(&propagation_source, &message, reason); - gossip_promises.reject_message(message.message_id(), &reason); - } + if self.peer_score.is_some() { + for (raw_message, validation_error) in invalid_messages { + self.handle_invalid_message( + &propagation_source, + raw_message, + validation_error, + ) } } else { // log the invalid messages @@ -2946,8 +2992,8 @@ where } // Handle messages - for message in rpc.messages { - self.handle_received_message(message, &propagation_source); + for raw_message in rpc.messages { + self.handle_received_message(raw_message, &propagation_source); } // Handle control messages @@ -3042,6 +3088,61 @@ where } } +/// Helper function to get a subset of random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. The number of peers to get equals the output of `n_map` +/// that gets as input the number of filtered peers. +fn get_random_peers_dynamic( + topic_peers: &HashMap>, + peer_protocols: &HashMap, + topic_hash: &TopicHash, + // maps the number of total peers to the number of selected peers + n_map: impl Fn(usize) -> usize, + mut f: impl FnMut(&PeerId) -> bool, +) -> BTreeSet { + let mut gossip_peers = match topic_peers.get(topic_hash) { + // if they exist, filter the peers by `f` + Some(peer_list) => peer_list + .iter() + .cloned() + .filter(|p| { + f(p) && match peer_protocols.get(p) { + Some(PeerKind::Gossipsub) => true, + Some(PeerKind::Gossipsubv1_1) => true, + _ => false, + } + }) + .collect(), + None => Vec::new(), + }; + + // if we have less than needed, return them + let n = n_map(gossip_peers.len()); + if gossip_peers.len() <= n { + debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + return gossip_peers.into_iter().collect(); + } + + // we have more peers than needed, shuffle them and return n of them + let mut rng = thread_rng(); + gossip_peers.partial_shuffle(&mut rng, n); + + debug!("RANDOM PEERS: Got {:?} peers", n); + + gossip_peers.into_iter().take(n).collect() +} + +/// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` +/// filtered by the function `f`. +fn get_random_peers( + topic_peers: &HashMap>, + peer_protocols: &HashMap, + topic_hash: &TopicHash, + n: usize, + f: impl FnMut(&PeerId) -> bool, +) -> BTreeSet { + get_random_peers_dynamic(topic_peers, peer_protocols, topic_hash, |_| n, f) +} + /// Validates the combination of signing, privacy and message validation to ensure the /// configuration will not reject published messages. fn validate_config( @@ -3072,7 +3173,7 @@ fn validate_config( Ok(()) } -impl, F: TopicSubscriptionFilter> fmt::Debug for GenericGossipsub { +impl fmt::Debug for Gossipsub { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Gossipsub") .field("config", &self.config) @@ -3168,12 +3269,12 @@ mod local_test { /// Tests RPC message fragmentation fn test_message_fragmentation_deterministic() { let max_transmit_size = 500; - let config = crate::ConfigBuilder::default() + let config = crate::GossipsubConfigBuilder::default() .max_transmit_size(max_transmit_size) .validation_mode(ValidationMode::Permissive) .build() .unwrap(); - let gs = Gossipsub::new(MessageAuthenticity::RandomAuthor, config).unwrap(); + let gs: Gossipsub = Gossipsub::new(MessageAuthenticity::RandomAuthor, config).unwrap(); // Message under the limit should be fine. let mut rpc = empty_rpc(); @@ -3216,12 +3317,12 @@ mod local_test { fn test_message_fragmentation() { fn prop(rpc: GossipsubRpc) { let max_transmit_size = 500; - let config = crate::ConfigBuilder::default() + let config = crate::GossipsubConfigBuilder::default() .max_transmit_size(max_transmit_size) .validation_mode(ValidationMode::Permissive) .build() .unwrap(); - let gs = Gossipsub::new(MessageAuthenticity::RandomAuthor, config).unwrap(); + let gs: Gossipsub = Gossipsub::new(MessageAuthenticity::RandomAuthor, config).unwrap(); let mut length_codec = unsigned_varint::codec::UviBytes::default(); length_codec.set_max_len(max_transmit_size); diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index ed367795a72..d38c66857c3 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -29,11 +29,14 @@ mod tests { use rand::Rng; use crate::{ - ConfigBuilder, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, - IdentTopic as Topic, TopicScoreParams, + GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, + TopicScoreParams, }; use super::super::*; + #[cfg(feature = "snappy")] + use crate::compression::SnappyCompression; + use crate::compression::{MessageCompression, NoCompression}; use crate::error::ValidationError; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::types::FastMessageId; @@ -42,35 +45,37 @@ mod tests { #[derive(Default, Builder, Debug)] #[builder(default)] - struct InjectNodes + struct InjectNodes // TODO: remove trait bound Default when this issue is fixed: // https://github.com/colin-kiegel/rust-derive-builder/issues/93 where - T: Send + 'static + Default + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { peer_no: usize, topics: Vec, to_subscribe: bool, - gs_config: Config, + gs_config: GossipsubConfig, explicit: usize, outbound: usize, scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, + message_compression: C, subscription_filter: F, } - impl InjectNodes + impl InjectNodes where - T: Send + 'static + Default + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { - pub fn create_network(self) -> (GenericGossipsub, Vec, Vec) { + pub fn create_network(self) -> (Gossipsub, Vec, Vec) { let keypair = libp2p_core::identity::Keypair::generate_secp256k1(); // create a gossipsub struct - let mut gs: GenericGossipsub = GenericGossipsub::new_with_subscription_filter( + let mut gs: Gossipsub = Gossipsub::new_with_subscription_filter_and_compression( MessageAuthenticity::Signed(keypair), self.gs_config, self.subscription_filter, + self.message_compression, ) .unwrap(); @@ -109,59 +114,59 @@ mod tests { } } - impl InjectNodesBuilder + impl InjectNodesBuilder where - T: Send + 'static + Default + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { - pub fn create_network(&self) -> (GenericGossipsub, Vec, Vec) { + pub fn create_network(&self) -> (Gossipsub, Vec, Vec) { self.build().unwrap().create_network() } } - fn inject_nodes() -> InjectNodesBuilder + fn inject_nodes() -> InjectNodesBuilder where - T: Send + 'static + Default + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { InjectNodesBuilder::default() } - fn inject_nodes1() -> InjectNodesBuilder - where - T: Send + 'static + Default + Clone + Into> + From> + AsRef<[u8]>, - { + fn inject_nodes1() -> InjectNodesBuilder { inject_nodes() } - fn inject_nodes2() -> InjectNodesBuilder, AllowAllSubscriptionFilter> { + #[cfg(feature = "snappy")] + // used for testing compression + fn _inject_nodes_compression( + ) -> InjectNodesBuilder { inject_nodes() } // helper functions for testing - fn add_peer( - gs: &mut GenericGossipsub, + fn add_peer( + gs: &mut Gossipsub, topic_hashes: &Vec, outbound: bool, explicit: bool, ) -> PeerId where - T: Send + 'static + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) } - fn add_peer_with_addr( - gs: &mut GenericGossipsub, + fn add_peer_with_addr( + gs: &mut Gossipsub, topic_hashes: &Vec, outbound: bool, explicit: bool, address: Multiaddr, ) -> PeerId where - T: Send + 'static + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { add_peer_with_addr_and_kind( @@ -174,8 +179,8 @@ mod tests { ) } - fn add_peer_with_addr_and_kind( - gs: &mut GenericGossipsub, + fn add_peer_with_addr_and_kind( + gs: &mut Gossipsub, topic_hashes: &Vec, outbound: bool, explicit: bool, @@ -183,7 +188,7 @@ mod tests { kind: Option, ) -> PeerId where - T: Send + 'static + Clone + Into> + From> + AsRef<[u8]>, + C: MessageCompression + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, { let peer = PeerId::random(); @@ -200,7 +205,7 @@ mod tests { } }, ); - as NetworkBehaviour>::inject_connected(gs, &peer); + as NetworkBehaviour>::inject_connected(gs, &peer); if let Some(kind) = kind { gs.inject_event( peer.clone(), @@ -234,7 +239,7 @@ mod tests { let rpc = rpc.clone(); for message in rpc.publish.into_iter() { messages.push(RawGossipsubMessage { - source: message.from.map(|x| PeerId::from_bytes(x).unwrap()), + source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), data: message.data.unwrap_or_default(), sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application topic: TopicHash::from_raw(message.topic), @@ -288,7 +293,7 @@ mod tests { .into_iter() .filter_map(|info| { info.peer_id - .and_then(|id| PeerId::from_bytes(id).ok()) + .and_then(|id| PeerId::from_bytes(&id).ok()) .map(|peer_id| //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 PeerInfo { @@ -338,7 +343,7 @@ mod tests { // - run JOIN(topic) let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, topic_hashes) = inject_nodes2() + let (gs, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(subscribe_topic) .to_subscribe(true) @@ -388,7 +393,7 @@ mod tests { .collect::>(); // subscribe to topic_strings - let (mut gs, _, topic_hashes) = inject_nodes2() + let (mut gs, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) @@ -464,7 +469,7 @@ mod tests { .map(|t| Topic::new(t.clone())) .collect::>(); - let (mut gs, _, topic_hashes) = inject_nodes2() + let (mut gs, _, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) @@ -603,11 +608,20 @@ mod tests { _ => collected_publish, }); - let msg_id = gs - .config - .message_id(&publishes.first().expect("Should contain > 0 entries")); + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); - let config = GossipsubConfig::default(); + let msg_id = gs.config.message_id(&message); + + let config: GossipsubConfig = GossipsubConfig::default(); assert_eq!( publishes.len(), config.mesh_n_low(), @@ -681,9 +695,18 @@ mod tests { _ => collected_publish, }); - let msg_id = gs - .config - .message_id(&publishes.first().expect("Should contain > 0 entries")); + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + + let msg_id = gs.config.message_id(&message); assert_eq!( publishes.len(), @@ -700,7 +723,7 @@ mod tests { #[test] /// Test the gossipsub NetworkBehaviour peer connection logic. fn test_inject_connected() { - let (gs, peers, topic_hashes) = inject_nodes2() + let (gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -761,7 +784,7 @@ mod tests { .iter() .map(|&t| String::from(t)) .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topics) .to_subscribe(false) @@ -864,57 +887,42 @@ mod tests { .map(|p| (p.clone(), PeerKind::Gossipsubv1_1)) .collect(); - let random_peers = Gossipsub::get_random_peers( - &gs.topic_peers, - &gs.peer_protocols, - &topic_hash, - 5, - |_| true, - ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 5, |_| { + true + }); assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = Gossipsub::get_random_peers( - &gs.topic_peers, - &gs.peer_protocols, - &topic_hash, - 30, - |_| true, - ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 30, |_| { + true + }); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = Gossipsub::get_random_peers( - &gs.topic_peers, - &gs.peer_protocols, - &topic_hash, - 20, - |_| true, - ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 20, |_| { + true + }); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = Gossipsub::get_random_peers( - &gs.topic_peers, - &gs.peer_protocols, - &topic_hash, - 0, - |_| true, - ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 0, |_| { + true + }); assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); // test the filter - let random_peers = Gossipsub::get_random_peers( - &gs.topic_peers, - &gs.peer_protocols, - &topic_hash, - 5, - |_| false, - ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 5, |_| { + false + }); assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 10, { + get_random_peers(&gs.topic_peers, &gs.peer_protocols, &topic_hash, 10, { |peer| peers.contains(peer) }); assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); @@ -923,13 +931,13 @@ mod tests { /// Tests that the correct message is sent when a peer asks for a message in our cache. #[test] fn test_handle_iwant_msg_cached() { - let (mut gs, peers, _) = inject_nodes2() + let (mut gs, peers, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) .create_network(); - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(peers[11].clone()), data: vec![1, 2, 3, 4], sequence_number: Some(1u64), @@ -938,9 +946,17 @@ mod tests { key: None, validated: true, }; + + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + let msg_id = gs.config.message_id(&message); - gs.mcache - .put(GossipsubMessage::new(message, msg_id.clone())); + gs.mcache.put(&msg_id, raw_message); gs.handle_iwant(&peers[7], vec![msg_id.clone()]); @@ -962,7 +978,13 @@ mod tests { assert!( sent_messages .iter() - .any(|msg| gs.config.message_id(msg) == msg_id), + .map(|msg| GossipsubMessage::from_raw( + &gs.message_compression, + msg.clone(), + gs.config.max_transmit_size(), + ) + .unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id), "Expected the cached message to be sent to an IWANT peer" ); } @@ -970,7 +992,7 @@ mod tests { /// Tests that messages are sent correctly depending on the shifting of the message cache. #[test] fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, _) = inject_nodes2() + let (mut gs, peers, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -978,7 +1000,7 @@ mod tests { // perform 10 memshifts and check that it leaves the cache for shift in 1..10 { - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(peers[11].clone()), data: vec![1, 2, 3, 4], sequence_number: Some(shift), @@ -987,9 +1009,16 @@ mod tests { key: None, validated: true, }; + + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); let msg_id = gs.config.message_id(&message); - gs.mcache - .put(GossipsubMessage::new(message, msg_id.clone())); + gs.mcache.put(&msg_id, raw_message); for _ in 0..shift { gs.mcache.shift(); } @@ -1003,7 +1032,15 @@ mod tests { event .messages .iter() - .any(|msg| gs.config.message_id(msg) == msg_id) + .map(|msg| { + GossipsubMessage::from_raw( + &gs.message_compression, + msg.clone(), + gs.config.max_transmit_size(), + ) + .unwrap() + }) + .any(|msg| gs.config.message_id(&msg) == msg_id) } _ => false, }); @@ -1025,7 +1062,7 @@ mod tests { #[test] // tests that an event is not created when a peers asks for a message not in our cache fn test_handle_iwant_msg_not_cached() { - let (mut gs, peers, _) = inject_nodes2() + let (mut gs, peers, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1044,7 +1081,7 @@ mod tests { #[test] // tests that an event is created when a peer shares that it has a message we want fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1076,7 +1113,7 @@ mod tests { // tests that an event is not created when a peer shares that it has a message that // we already have fn test_handle_ihave_subscribed_and_msg_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1098,7 +1135,7 @@ mod tests { // test that an event is not created when a peer shares that it has a message in // a topic that we are not subscribed to fn test_handle_ihave_not_subscribed() { - let (mut gs, peers, _) = inject_nodes2() + let (mut gs, peers, _) = inject_nodes1() .peer_no(20) .topics(vec![]) .to_subscribe(true) @@ -1124,7 +1161,7 @@ mod tests { // tests that a peer is added to our mesh when we are both subscribed // to the same topic fn test_handle_graft_is_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1142,7 +1179,7 @@ mod tests { // tests that a peer is not added to our mesh when they are subscribed to // a topic that we are not fn test_handle_graft_is_not_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1167,7 +1204,7 @@ mod tests { .map(|&t| String::from(t)) .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topics.clone()) .to_subscribe(true) @@ -1197,7 +1234,7 @@ mod tests { #[test] // tests that a peer is removed from our mesh fn test_handle_prune_peer_in_mesh() { - let (mut gs, peers, topic_hashes) = inject_nodes2() + let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1224,8 +1261,8 @@ mod tests { ); } - fn count_control_msgs( - gs: &Gossipsub, + fn count_control_msgs( + gs: &Gossipsub, mut filter: impl FnMut(&PeerId, &GossipsubControlAction) -> bool, ) -> usize { gs.control_pool @@ -1248,7 +1285,7 @@ mod tests { .sum::() } - fn flush_events(gs: &mut Gossipsub) { + fn flush_events(gs: &mut Gossipsub) { gs.control_pool.clear(); gs.events.clear(); } @@ -1256,7 +1293,7 @@ mod tests { #[test] // tests that a peer added as explicit peer gets connected to fn test_explicit_peer_gets_connected() { - let (mut gs, _, _) = inject_nodes2() + let (mut gs, _, _) = inject_nodes1() .peer_no(0) .topics(Vec::new()) .to_subscribe(true) @@ -1651,10 +1688,10 @@ mod tests { // Tests the mesh maintenance addition #[test] fn test_mesh_addition() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.mesh_n() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1712,9 +1749,9 @@ mod tests { #[test] fn test_connect_to_px_peers_on_handle_prune() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1768,10 +1805,10 @@ mod tests { #[test] fn test_send_px_and_backoff_in_prune() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1809,10 +1846,10 @@ mod tests { #[test] fn test_prune_backoffed_peer_on_graft() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1965,11 +2002,11 @@ mod tests { #[test] fn test_flood_publish() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); let topic = "test"; // Adds more peers than mesh can hold to test flood publishing - let (mut gs, _, _) = inject_nodes2() + let (mut gs, _, _) = inject_nodes1() .peer_no(config.mesh_n_high() + 10) .topics(vec![topic.into()]) .to_subscribe(true) @@ -1994,11 +2031,20 @@ mod tests { _ => collected_publish, }); - let msg_id = gs - .config - .message_id(&publishes.first().expect("Should contain > 0 entries")); + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); - let config = GossipsubConfig::default(); + let msg_id = gs.config.message_id(&message); + + let config: GossipsubConfig = GossipsubConfig::default(); assert_eq!( publishes.len(), config.mesh_n_high() + 10, @@ -2013,18 +2059,18 @@ mod tests { #[test] fn test_gossip_to_at_least_gossip_lazy_peers() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); //add more peers than in mesh to test gossipping //by default only mesh_n_low peers will get added to mesh - let (mut gs, _, topic_hashes) = inject_nodes2() + let (mut gs, _, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) .create_network(); //receive message - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![], sequence_number: Some(0), @@ -2033,13 +2079,22 @@ mod tests { key: None, validated: true, }; - gs.handle_received_message(message.clone(), &PeerId::random()); + gs.handle_received_message(raw_message.clone(), &PeerId::random()); //emit gossip gs.emit_gossip(); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message, + gs.config.max_transmit_size(), + ) + .unwrap(); + let msg_id = gs.config.message_id(&message); + + //check that exactly config.gossip_lazy() many gossip messages were sent. assert_eq!( count_control_msgs(&gs, |_, action| match action { GossipsubControlAction::IHave { @@ -2054,19 +2109,19 @@ mod tests { #[test] fn test_gossip_to_at_most_gossip_factor_peers() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); //add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; - let (mut gs, _, topic_hashes) = inject_nodes2() + let (mut gs, _, topic_hashes) = inject_nodes1() .peer_no(m) .topics(vec!["topic".into()]) .to_subscribe(true) .create_network(); //receive message - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![], sequence_number: Some(0), @@ -2075,13 +2130,21 @@ mod tests { key: None, validated: true, }; - gs.handle_received_message(message.clone(), &PeerId::random()); + gs.handle_received_message(raw_message.clone(), &PeerId::random()); //emit gossip gs.emit_gossip(); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message, + gs.config.max_transmit_size(), + ) + .unwrap(); + let msg_id = gs.config.message_id(&message); + //check that exactly config.gossip_lazy() many gossip messages were sent. assert_eq!( count_control_msgs(&gs, |_, action| match action { GossipsubControlAction::IHave { @@ -2096,10 +2159,10 @@ mod tests { #[test] fn test_accept_only_outbound_peer_grafts_when_mesh_full() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); //enough peers to fill the mesh - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2180,10 +2243,10 @@ mod tests { #[test] fn test_add_outbound_peers_if_min_is_not_satisfied() { - let config = GossipsubConfig::default(); + let config: GossipsubConfig = GossipsubConfig::default(); // Fill full mesh with inbound peers - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2434,7 +2497,7 @@ mod tests { gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); // Receive message - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![], sequence_number: Some(0), @@ -2443,12 +2506,21 @@ mod tests { key: None, validated: true, }; - gs.handle_received_message(message.clone(), &PeerId::random()); + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message, + gs.config.max_transmit_size(), + ) + .unwrap(); + + let msg_id = gs.config.message_id(&message); // Emit gossip gs.emit_gossip(); - let msg_id = gs.config.message_id(&message); // Check that exactly one gossip messages got sent and it got sent to p2 assert_eq!( count_control_msgs(&gs, |peer, action| match action { @@ -2504,8 +2576,8 @@ mod tests { // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - // Rreceive message - let message = RawGossipsubMessage { + // Receive message + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![], sequence_number: Some(0), @@ -2514,7 +2586,15 @@ mod tests { key: None, validated: true, }; - gs.handle_received_message(message.clone(), &PeerId::random()); + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); let msg_id = gs.config.message_id(&message); @@ -2539,11 +2619,29 @@ mod tests { //the message got sent to p2 assert!(sent_messages .iter() - .any(|(peer_id, msg)| peer_id == &p2 && &gs.config.message_id(msg) == &msg_id)); + .map(|(peer_id, msg)| ( + peer_id, + GossipsubMessage::from_raw( + &gs.message_compression, + msg.clone(), + gs.config.max_transmit_size(), + ) + .unwrap() + )) + .any(|(peer_id, msg)| peer_id == &p2 && &gs.config.message_id(&msg) == &msg_id)); //the message got not sent to p1 assert!(sent_messages .iter() - .all(|(peer_id, msg)| !(peer_id == &p1 && &gs.config.message_id(msg) == &msg_id))); + .map(|(peer_id, msg)| ( + peer_id, + GossipsubMessage::from_raw( + &gs.message_compression, + msg.clone(), + gs.config.max_transmit_size(), + ) + .unwrap() + )) + .all(|(peer_id, msg)| !(peer_id == &p1 && &gs.config.message_id(&msg) == &msg_id))); } #[test] @@ -2582,7 +2680,7 @@ mod tests { gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); //message that other peers have - let message = RawGossipsubMessage { + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![], sequence_number: Some(0), @@ -2592,6 +2690,14 @@ mod tests { validated: true, }; + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message, + gs.config.max_transmit_size(), + ) + .unwrap(); + let msg_id = gs.config.message_id(&message); gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); @@ -2755,7 +2861,7 @@ mod tests { //reduce score of p2 below publish_threshold but not below graylist_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - let message1 = RawGossipsubMessage { + let raw_message1 = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![1, 2, 3, 4], sequence_number: Some(1u64), @@ -2765,7 +2871,7 @@ mod tests { validated: true, }; - let message2 = RawGossipsubMessage { + let raw_message2 = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![1, 2, 3, 4, 5], sequence_number: Some(2u64), @@ -2775,7 +2881,7 @@ mod tests { validated: true, }; - let message3 = RawGossipsubMessage { + let raw_message3 = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![1, 2, 3, 4, 5, 6], sequence_number: Some(3u64), @@ -2785,7 +2891,7 @@ mod tests { validated: true, }; - let message4 = RawGossipsubMessage { + let raw_message4 = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![1, 2, 3, 4, 5, 6, 7], sequence_number: Some(4u64), @@ -2795,6 +2901,20 @@ mod tests { validated: true, }; + let message2 = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message2.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + + let message4 = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message4, + gs.config.max_transmit_size(), + ) + .unwrap(); + let subscription = GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, topic_hash: topics[0].clone(), @@ -2814,7 +2934,7 @@ mod tests { ConnectionId::new(0), HandlerEvent::Message { rpc: GossipsubRpc { - messages: vec![message1], + messages: vec![raw_message1], subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], }, @@ -2843,7 +2963,7 @@ mod tests { ConnectionId::new(0), HandlerEvent::Message { rpc: GossipsubRpc { - messages: vec![message3], + messages: vec![raw_message3], subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], }, @@ -3397,11 +3517,16 @@ mod tests { let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); + // Decompress the raw message and calculate the message id. + let message1 = + GossipsubMessage::from_raw(&gs.message_compression, m1, gs.config.max_transmit_size()) + .unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //message m1 gets validated gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Accept, ) @@ -3563,9 +3688,13 @@ mod tests { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + // Decompress the raw message and calculate the message id. + let message1 = + GossipsubMessage::from_raw(&gs.message_compression, m1, gs.config.max_transmit_size()) + .unwrap(); //message m1 gets ignored gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Ignore, ) @@ -3619,9 +3748,14 @@ mod tests { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + // Decompress the raw message and calculate the message id. + let message1 = + GossipsubMessage::from_raw(&gs.message_compression, m1, gs.config.max_transmit_size()) + .unwrap(); + //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Reject, ) @@ -3676,6 +3810,14 @@ mod tests { let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); + // Decompress the raw message and calculate the message id. + let message1 = GossipsubMessage::from_raw( + &gs.message_compression, + m1.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + //peer 1 delivers same message deliver_message(&mut gs, 1, m1.clone()); @@ -3684,7 +3826,7 @@ mod tests { //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Reject, ) @@ -3747,23 +3889,36 @@ mod tests { deliver_message(&mut gs, 0, m2.clone()); deliver_message(&mut gs, 0, m3.clone()); + // Decompress the raw message and calculate the message id. + let message1 = + GossipsubMessage::from_raw(&gs.message_compression, m1, gs.config.max_transmit_size()) + .unwrap(); + // Decompress the raw message and calculate the message id. + let message2 = + GossipsubMessage::from_raw(&gs.message_compression, m2, gs.config.max_transmit_size()) + .unwrap(); + // Decompress the raw message and calculate the message id. + let message3 = + GossipsubMessage::from_raw(&gs.message_compression, m3, gs.config.max_transmit_size()) + .unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //messages gets rejected gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Reject, ) .unwrap(); gs.report_message_validation_result( - &config.message_id(&m2), + &config.message_id(&message2), &peers[0], MessageAcceptance::Reject, ) .unwrap(); gs.report_message_validation_result( - &config.message_id(&m3), + &config.message_id(&message3), &peers[0], MessageAcceptance::Reject, ) @@ -3819,11 +3974,15 @@ mod tests { let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); + // Decompress the raw message and calculate the message id. + let message1 = + GossipsubMessage::from_raw(&gs.message_compression, m1, gs.config.max_transmit_size()) + .unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&m1), + &config.message_id(&message1), &peers[0], MessageAcceptance::Reject, ) @@ -4146,7 +4305,7 @@ mod tests { #[test] fn test_ignore_graft_from_unknown_topic() { //build gossipsub without subscribing to any topics - let (mut gs, _, _) = inject_nodes2() + let (mut gs, _, _) = inject_nodes1() .peer_no(0) .topics(vec![]) .to_subscribe(false) @@ -4170,7 +4329,7 @@ mod tests { fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = GossipsubConfig::default(); //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes2() + let (mut gs, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4182,7 +4341,15 @@ mod tests { //receive a message let mut seq = 0; let m1 = random_message(&mut seq, &topics); - let id = config.message_id(&m1); + + // Decompress the raw message and calculate the message id. + let message1 = GossipsubMessage::from_raw( + &gs.message_compression, + m1.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + let id = config.message_id(&message1); gs.handle_received_message(m1.clone(), &PeerId::random()); @@ -4233,17 +4400,33 @@ mod tests { let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); //peer sends us one ihave for each message in order - for message in &messages { + for raw_message in &messages { + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + gs.handle_ihave( &peer, - vec![(topics[0].clone(), vec![config.message_id(message)])], + vec![(topics[0].clone(), vec![config.message_id(&message)])], ); } let first_ten: HashSet<_> = messages .iter() .take(10) - .map(|m| config.message_id(m)) + .map(|msg| { + GossipsubMessage::from_raw( + &gs.message_compression, + msg.clone(), + gs.config.max_transmit_size(), + ) + .unwrap() + }) + .map(|m| config.message_id(&m)) .collect(); //we send iwant only for the first 10 messages @@ -4270,10 +4453,17 @@ mod tests { //after a heartbeat everything is forgotten gs.heartbeat(); - for message in messages[10..].iter() { + for raw_message in messages[10..].iter() { + // Decompress the raw message and calculate the message id. + let message = GossipsubMessage::from_raw( + &gs.message_compression, + raw_message.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); gs.handle_ihave( &peer, - vec![(topics[0].clone(), vec![config.message_id(message)])], + vec![(topics[0].clone(), vec![config.message_id(&message)])], ); } @@ -4318,7 +4508,16 @@ mod tests { //peer has 20 messages let mut seq = 0; let message_ids: Vec<_> = (0..20) - .map(|_| config.message_id(&random_message(&mut seq, &topics))) + .map(|_| random_message(&mut seq, &topics)) + .map(|msg| { + GossipsubMessage::from_raw( + &gs.message_compression, + msg, + gs.config.max_transmit_size(), + ) + .unwrap() + }) + .map(|msg| config.message_id(&msg)) .collect(); //peer sends us three ihaves @@ -4509,13 +4708,29 @@ mod tests { for _ in 0..2 { let msg1 = random_message(&mut seq, &topics); let msg2 = random_message(&mut seq, &topics); + + // Decompress the raw message and calculate the message id. + let message1 = GossipsubMessage::from_raw( + &gs.message_compression, + msg1.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + // Decompress the raw message and calculate the message id. + let message2 = GossipsubMessage::from_raw( + &gs.message_compression, + msg2.clone(), + gs.config.max_transmit_size(), + ) + .unwrap(); + first_messages.push(msg1.clone()); second_messages.push(msg2.clone()); gs.handle_ihave( peer, vec![( topics[0].clone(), - vec![config.message_id(&msg1), config.message_id(&msg2)], + vec![config.message_id(&message1), config.message_id(&message2)], )], ); } @@ -4699,7 +4914,7 @@ mod tests { #[test] fn test_dont_add_floodsub_peers_to_mesh_on_join() { - let (mut gs, _, _) = inject_nodes2() + let (mut gs, _, _) = inject_nodes1() .peer_no(0) .topics(Vec::new()) .to_subscribe(false) @@ -4730,7 +4945,7 @@ mod tests { #[test] fn test_dont_send_px_to_old_gossipsub_peers() { - let (mut gs, _, topics) = inject_nodes2() + let (mut gs, _, topics) = inject_nodes1() .peer_no(0) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4767,7 +4982,7 @@ mod tests { #[test] fn test_dont_send_floodsub_peers_in_px() { //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes2() + let (mut gs, peers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -4807,7 +5022,7 @@ mod tests { #[test] fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { - let (mut gs, _, topics) = inject_nodes2() + let (mut gs, _, topics) = inject_nodes1() .peer_no(0) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4836,7 +5051,7 @@ mod tests { // Some very basic test of public api methods. #[test] fn test_public_api() { - let (gs, peers, topic_hashes) = inject_nodes2() + let (gs, peers, topic_hashes) = inject_nodes1() .peer_no(4) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -4869,13 +5084,11 @@ mod tests { struct Pointers { slow_counter: u32, fast_counter: u32, - from_counter: u32, }; let mut counters = Pointers { slow_counter: 0, fast_counter: 0, - from_counter: 0, }; let counters_pointer: *mut Pointers = &mut counters; @@ -4890,30 +5103,6 @@ mod tests { address as *mut Pointers }}; } - #[derive(Clone, Default)] - struct MessageData(pub Vec); - - impl Into> for MessageData { - fn into(self) -> Vec { - self.0 - } - } - - impl From> for MessageData { - fn from(v: Vec) -> Self { - let mut counters_pointer = get_counters_pointer!(&v); - unsafe { - (*counters_pointer).from_counter += 1; - } - Self(v) - } - } - - impl AsRef<[u8]> for MessageData { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } - } macro_rules! get_counters_and_hash { ($m: expr) => {{ @@ -4924,9 +5113,9 @@ mod tests { }}; } - let message_id_fn = |m: &GenericGossipsubMessage| -> MessageId { + let message_id_fn = |m: &GossipsubMessage| -> MessageId { let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = - get_counters_and_hash!(&m.data.0); + get_counters_and_hash!(&m.data); unsafe { (*counters_pointer).slow_counter += 1; } @@ -4940,7 +5129,7 @@ mod tests { } id }; - let config = ConfigBuilder::default() + let config = GossipsubConfigBuilder::default() .message_id_fn(message_id_fn) .fast_message_id_fn(fast_message_id_fn) .build() @@ -4968,14 +5157,13 @@ mod tests { assert!(counters.fast_counter <= 5); assert_eq!(counters.slow_counter, 1); - assert_eq!(counters.from_counter, 1); } #[test] fn test_subscribe_to_invalid_topic() { let t1 = Topic::new("t1"); let t2 = Topic::new("t2"); - let (mut gs, _, _) = inject_nodes::, _>() + let (mut gs, _, _) = inject_nodes::() .subscription_filter(WhitelistSubscriptionFilter( vec![t1.hash()].into_iter().collect(), )) @@ -4997,7 +5185,7 @@ mod tests { ))) .create_network(); - let (mut gs2, _, _) = inject_nodes2().create_network(); + let (mut gs2, _, _) = inject_nodes1().create_network(); let connection_id = ConnectionId::new(0); @@ -5014,29 +5202,30 @@ mod tests { //subscribe to topic in gs2 gs2.subscribe(&topic).unwrap(); - let forward_messages_to_p1 = |gs1: &mut Gossipsub, gs2: &mut Gossipsub| { - //collect messages to p1 - let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if &peer_id == &p1 { - Some(event) - } else { - None + let forward_messages_to_p1 = + |gs1: &mut Gossipsub, gs2: &mut Gossipsub| { + //collect messages to p1 + let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if &peer_id == &p1 { + Some(event) + } else { + None + } } + _ => None, + }); + for message in messages_to_p1 { + gs1.inject_event( + p2.clone(), + connection_id, + HandlerEvent::Message { + rpc: proto_to_message(&message), + invalid_messages: vec![], + }, + ); } - _ => None, - }); - for message in messages_to_p1 { - gs1.inject_event( - p2.clone(), - connection_id, - HandlerEvent::Message { - rpc: proto_to_message(&message), - invalid_messages: vec![], - }, - ); - } - }; + }; //forward the subscribe message forward_messages_to_p1(&mut gs1, &mut gs2); diff --git a/protocols/gossipsub/src/compression.rs b/protocols/gossipsub/src/compression.rs new file mode 100644 index 00000000000..c1abcd0b6ad --- /dev/null +++ b/protocols/gossipsub/src/compression.rs @@ -0,0 +1,79 @@ +//! Allows compression algorithms to be added to the gossipsub layer. + +#[cfg(feature = "snappy")] +use snap::raw::{decompress_len, Decoder, Encoder}; + +pub trait MessageCompression { + fn compress_message(&self, data: Vec) -> Result, CompressionError>; + + fn decompress_message( + &self, + data: Vec, + max_len: usize, + ) -> Result, CompressionError>; +} + +#[derive(Debug)] +pub enum CompressionError { + /// The decompressed contents are too large. + DecompressionTooLarge, + /// A custom error type. + Custom(String), +} + +#[cfg(feature = "snappy")] +impl From for CompressionError { + fn from(error: snap::Error) -> CompressionError { + CompressionError::Custom(error.to_string()) + } +} + +/// The default for gossipsub. +#[derive(Default, Clone)] +pub struct NoCompression; + +impl MessageCompression for NoCompression { + fn compress_message(&self, data: Vec) -> Result, CompressionError> { + Ok(data) + } + + fn decompress_message( + &self, + data: Vec, + _max_len: usize, + ) -> Result, CompressionError> { + Ok(data) + } +} + +/// Optional Snappy compression +#[cfg(feature = "snappy")] +#[derive(Default, Clone)] +pub struct SnappyCompression; + +#[cfg(feature = "snappy")] +impl MessageCompression for SnappyCompression { + fn decompress_message( + &self, + data: Vec, + max_len: usize, + ) -> Result, CompressionError> { + // Exit early if uncompressed data is > max_len + match decompress_len(&data) { + Ok(n) if n > max_len => { + return Err(CompressionError::DecompressionTooLarge); + } + Ok(_) => {} + Err(e) => { + return Err(CompressionError::Custom(e.to_string())); + } + }; + let mut decoder = Decoder::new(); + decoder.decompress_vec(&data).map_err(|e| e.into()) + } + + fn compress_message(&self, data: Vec) -> Result, CompressionError> { + let mut encoder = Encoder::new(); + encoder.compress_vec(&data).map_err(|e| e.into()) + } +} diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 8ff0453e1d0..cb1d77e3099 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -23,17 +23,17 @@ use std::time::Duration; use libp2p_core::PeerId; -use crate::types::{FastMessageId, GenericGossipsubMessage, MessageId}; -use crate::RawGossipsubMessage; +use crate::types::{FastMessageId, GossipsubMessage, MessageId, RawGossipsubMessage}; /// The types of message validation that can be employed by gossipsub. #[derive(Debug, Clone)] pub enum ValidationMode { - /// This is the default setting. This requires the message author to be a valid `PeerId` and to + /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to /// be present as well as the sequence number. All messages must have valid signatures. /// - /// NOTE: This setting will reject messages from nodes using `PrivacyMode::Anonymous` and - /// all messages that do not have signatures. + /// NOTE: This setting will reject messages from nodes using + /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have + /// signatures. Strict, /// This setting permits messages that have no author, sequence number or signature. If any of /// these fields exist in the message these are validated. @@ -48,198 +48,47 @@ pub enum ValidationMode { None, } -// For general use cases. -pub type GossipsubConfig = Config>; - /// Configuration parameters that define the performance of the gossipsub network. #[derive(Clone)] -pub struct Config { - /// The protocol id prefix to negotiate this protocol. The protocol id is of the form - /// `//`. As gossipsub supports version 1.0 and 1.1, there are two - /// protocol id's supported. - /// - /// The default prefix is `meshsub`, giving the supported protocol ids: `/meshsub/1.1.0` and `/meshsub/1.0.0`, negotiated in that order. +pub struct GossipsubConfig { protocol_id_prefix: Cow<'static, str>, - - // Overlay network parameters. - /// Number of heartbeats to keep in the `memcache` (default is 5). history_length: usize, - - /// Number of past heartbeats to gossip about (default is 3). history_gossip: usize, - - /// Target number of peers for the mesh network (D in the spec, default is 6). mesh_n: usize, - - /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 5). mesh_n_low: usize, - - /// Maximum number of peers in mesh network before removing some (D_high in the spec, default - /// is 12). mesh_n_high: usize, - - /// Affects how peers are selected when pruning a mesh due to over subscription. - /// - /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). retain_scores: usize, - - /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, - /// default is 6). gossip_lazy: usize, - - /// Affects how many peers we will emit gossip to at each heartbeat. - /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or - /// `gossip_lazy`, whichever is greater. The default is 0.25. gossip_factor: f64, - - /// Initial delay in each heartbeat (default is 5 seconds). heartbeat_initial_delay: Duration, - - /// Time between each heartbeat (default is 1 second). heartbeat_interval: Duration, - - /// Time to live for fanout peers (default is 60 seconds). fanout_ttl: Duration, - - /// The number of heartbeat ticks until we recheck the connection to explicit peers and - /// reconnecting if necessary (default 300). check_explicit_peers_ticks: u64, - - /// The maximum byte size for each gossip (default is 65536 bytes). - /// - /// This represents the maximum size of the entire protobuf payload. It must be at least - /// large enough to support basic control messages. If Peer eXchange is enabled, this - /// must be large enough to transmit the desired peer information on pruning. It must be at - /// least 100 bytes. Default is 65536 bytes. max_transmit_size: usize, - - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. - /// This settings sets the time period that messages are stored in the cache. Duplicates can be - /// received if duplicate messages are sent at a time greater than this setting apart. The - /// default is 1 minute. duplicate_cache_time: Duration, - - /// When set to `true`, prevents automatic forwarding of all received messages. This setting - /// allows a user to validate the messages before propagating them to their peers. If set to - /// true, the user must manually call [crate::GenericGossipsub::report_message_validation_result()] on the behaviour to forward message - /// once validated (default is `false`). validate_messages: bool, - - /// Determines the level of validation used when receiving messages. See [`ValidationMode`] - /// for the available types. The default is ValidationMode::Strict. validation_mode: ValidationMode, - - /// A user-defined function allowing the user to specify the message id of a gossipsub message. - /// The default value is to concatenate the source peer id with a sequence number. Setting this - /// parameter allows the user to address packets arbitrarily. One example is content based - /// addressing, where this function may be set to `hash(message)`. This would prevent messages - /// of the same content from being duplicated. - /// - /// The function takes a `GenericGossipsubMessage` as input and outputs a String to be - /// interpreted as the message id. - message_id_fn: fn(&GenericGossipsubMessage) -> MessageId, - - /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from `RawGossipsubMessage` to - /// `GenericGossipsubMessage` for duplicates. Two semantically different messages must always - /// have different fast message ids, but it is allowed that two semantically identical messages - /// have different fast message ids as long as the message_id_fn produces the same id for them. - /// - /// On high intensive networks with lots of messages, where the message_id is based on the result of - /// decompressed traffic, it is beneficial to specify a `fast-message-id` that can identify and - /// filter duplicates quickly without performing the overhead of decompression. - /// - /// The function takes a `RawGossipsubMessage` as input and outputs a String to be - /// interpreted as the fast message id. Default is None. + message_id_fn: fn(&GossipsubMessage) -> MessageId, fast_message_id_fn: Option FastMessageId>, - - /// By default, gossipsub will reject messages that are sent to us that has the same message - /// source as we have specified locally. Enabling this, allows these messages and prevents - /// penalizing the peer that sent us the message. Default is false. allow_self_origin: bool, - - /// Whether Peer eXchange is enabled; this should be enabled in bootstrappers and other well - /// connected/trusted nodes. The default is false. do_px: bool, - - /// Controls the number of peers to include in prune Peer eXchange. - /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to - /// send them signed peer records for up to `prune_peers` other peers that we - /// know of. It is recommended that this value is larger than `mesh_n_high` so that the pruned - /// peer can reliably form a full mesh. The default is 16. prune_peers: usize, - - /// Controls the backoff time for pruned peers. This is how long - /// a peer must wait before attempting to graft into our mesh again after being pruned. - /// When pruning a peer, we send them our value of `prune_backoff` so they know - /// the minimum time to wait. Peers running older versions may not send a backoff time, - /// so if we receive a prune message without one, we will wait at least `prune_backoff` - /// before attempting to re-graft. The default is one minute. prune_backoff: Duration, - - /// Number of heartbeat slots considered as slack for backoffs. This gurantees that we wait - /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This - /// solves problems occuring through high latencies. In particular if - /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing - /// prunes on our side and processing prunes on the receiving side this guarantees that we - /// get not punished for too early grafting. The default is 1. backoff_slack: u32, - - /// Whether to do flood publishing or not. If enabled newly created messages authored by the - /// local node will always be sent to all peers that are subscribed to the topic and have a - /// good enough score. The default is true. flood_publish: bool, - - /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, - /// then there is an extra score penalty applied to the peer through P7. The default is 10 - /// seconds. graft_flood_threshold: Duration, - - /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). - /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. - /// The default is 2. mesh_outbound_min: usize, - - /// Number of heartbeat ticks that specifcy the interval in which opportunistic grafting is - /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh - /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a - /// threshold (see https://godoc.org/github.com/libp2p/go-libp2p-pubsub#PeerScoreThresholds). - /// The default is 60. opportunistic_graft_ticks: u64, - - /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. opportunistic_graft_peers: usize, - - /// Controls how many times we will allow a peer to request the same message id through IWANT - /// gossip before we start ignoring them. This is designed to prevent peers from spamming us - /// with requests and wasting our resources. The default is 3. gossip_retransimission: u32, - - /// The maximum number of messages to include in an IHAVE message. - /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a - /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the - /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip - /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. max_ihave_length: usize, - - /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer - /// within a heartbeat. The default is 10. max_ihave_messages: usize, - - /// Time to wait for a message requested through IWANT following an IHAVE advertisement. - /// If the message is not received within this window, a broken promise is declared and - /// the router may apply behavioural penalties. The default is 3 seconds. iwant_followup_time: Duration, - - /// Enable support for flooodsub peers. Default false. support_floodsub: bool, - - /// Published message ids time cache duration. The default is 10 seconds. published_message_ids_cache_time: Duration, } -impl Config { +impl GossipsubConfig { // All the getters /// The protocol id prefix to negotiate this protocol. The protocol id is of the form @@ -279,9 +128,9 @@ impl Config { } /// Affects how peers are selected when pruning a mesh due to over subscription. - // - // At least `retain_scores` of the retained peers will be high-scoring, while the remainder are - // chosen randomly (D_score in the spec, default is 4). + /// + /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are + /// chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&self) -> usize { self.retain_scores } @@ -341,10 +190,9 @@ impl Config { /// When set to `true`, prevents automatic forwarding of all received messages. This setting /// allows a user to validate the messages before propagating them to their peers. If set to - /// true, the user must manually call `validate_message()` on the behaviour to forward message - /// once validated (default is `false`). Furthermore, the application may optionally call - /// `invalidate_message()` on the behaviour to remove the message from the memcache. The - /// default is false. + /// true, the user must manually call [`crate::Gossipsub::report_message_validation_result()`] + /// on the behaviour to forward message once validated (default is `false`). + /// The default is `false`. pub fn validate_messages(&self) -> bool { self.validate_messages } @@ -361,19 +209,19 @@ impl Config { /// addressing, where this function may be set to `hash(message)`. This would prevent messages /// of the same content from being duplicated. /// - /// The function takes a `GenericGossipsubMessage` as input and outputs a String to be interpreted as + /// The function takes a [`GossipsubMessage`] as input and outputs a String to be interpreted as /// the message id. - pub fn message_id(&self, message: &GenericGossipsubMessage) -> MessageId { + pub fn message_id(&self, message: &GossipsubMessage) -> MessageId { (self.message_id_fn)(message) } /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from `RawGossipsubMessage` to - /// `GenericGossipsubMessage` for duplicates. Two semantically different messages must always + /// to avoid possibly expensive transformations from [`RawGossipsubMessage`] to + /// [`GossipsubMessage`] for duplicates. Two semantically different messages must always /// have different fast message ids, but it is allowed that two semantically identical messages /// have different fast message ids as long as the message_id_fn produces the same id for them. /// - /// The function takes a `RawGossipsubMessage` as input and outputs a String to be + /// The function takes a [`RawGossipsubMessage`] as input and outputs a String to be /// interpreted as the fast message id. Default is None. pub fn fast_message_id(&self, message: &RawGossipsubMessage) -> Option { self.fast_message_id_fn @@ -497,27 +345,24 @@ impl Config { } } -impl Default for Config { +impl Default for GossipsubConfig { fn default() -> Self { // use ConfigBuilder to also validate defaults - ConfigBuilder::default() + GossipsubConfigBuilder::default() .build() .expect("Default config parameters should be valid parameters") } } /// The builder struct for constructing a gossipsub configuration. -pub struct ConfigBuilder { - config: Config, +pub struct GossipsubConfigBuilder { + config: GossipsubConfig, } -// For general use cases. -pub type GossipsubConfigBuilder = ConfigBuilder>; - -impl Default for ConfigBuilder { +impl Default for GossipsubConfigBuilder { fn default() -> Self { - ConfigBuilder { - config: Config { + GossipsubConfigBuilder { + config: GossipsubConfig { protocol_id_prefix: Cow::Borrowed("meshsub"), history_length: 5, history_gossip: 3, @@ -541,7 +386,7 @@ impl Default for ConfigBuilder { let mut source_string = if let Some(peer_id) = message.source.as_ref() { peer_id.to_base58() } else { - PeerId::from_bytes(vec![0, 1, 0]) + PeerId::from_bytes(&vec![0, 1, 0]) .expect("Valid peer id") .to_base58() }; @@ -571,13 +416,13 @@ impl Default for ConfigBuilder { } } -impl From> for ConfigBuilder { - fn from(config: Config) -> Self { - ConfigBuilder { config } +impl From for GossipsubConfigBuilder { + fn from(config: GossipsubConfig) -> Self { + GossipsubConfigBuilder { config } } } -impl ConfigBuilder { +impl GossipsubConfigBuilder { /// The protocol id to negotiate this protocol (default is `/meshsub/1.0.0`). pub fn protocol_id_prefix(&mut self, protocol_id: impl Into>) -> &mut Self { self.config.protocol_id_prefix = protocol_id.into(); @@ -617,7 +462,7 @@ impl ConfigBuilder { /// Affects how peers are selected when pruning a mesh due to over subscription. /// - /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are + /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are /// chosen randomly (D_score in the spec, default is 4). pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { self.config.retain_scores = retain_scores; @@ -682,8 +527,8 @@ impl ConfigBuilder { /// When set, prevents automatic forwarding of all received messages. This setting /// allows a user to validate the messages before propagating them to their peers. If set, - /// the user must manually call `validate_message()` on the behaviour to forward a message - /// once validated. + /// the user must manually call [`crate::Gossipsub::report_message_validation_result()`] on the + /// behaviour to forward a message once validated. pub fn validate_messages(&mut self) -> &mut Self { self.config.validate_messages = true; self @@ -702,19 +547,16 @@ impl ConfigBuilder { /// addressing, where this function may be set to `hash(message)`. This would prevent messages /// of the same content from being duplicated. /// - /// The function takes a [`GenericGossipsubMessage`] as input and outputs a String to be + /// The function takes a [`GossipsubMessage`] as input and outputs a String to be /// interpreted as the message id. - pub fn message_id_fn( - &mut self, - id_fn: fn(&GenericGossipsubMessage) -> MessageId, - ) -> &mut Self { + pub fn message_id_fn(&mut self, id_fn: fn(&GossipsubMessage) -> MessageId) -> &mut Self { self.config.message_id_fn = id_fn; self } /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from `RawGossipsubMessage` to - /// [`GenericGossipsubMessage`] for duplicates. Two semantically different messages must always + /// to avoid possibly expensive transformations from [`RawGossipsubMessage`] to + /// [`GossipsubMessage`] for duplicates. Two semantically different messages must always /// have different fast message ids, but it is allowed that two semantically identical messages /// have different fast message ids as long as the message_id_fn produces the same id for them. /// @@ -738,9 +580,9 @@ impl ConfigBuilder { /// Controls the number of peers to include in prune Peer eXchange. /// /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to - /// send them signed peer records for up to `prune_peers` other peers that we - /// know of. It is recommended that this value is larger than `mesh_n_high` so that the pruned - /// peer can reliably form a full mesh. The default is 16. + /// send them signed peer records for up to [`Self::prune_peers] other peers that we + /// know of. It is recommended that this value is larger than [`Self::mesh_n_high`] so that the + /// pruned peer can reliably form a full mesh. The default is 16. pub fn prune_peers(&mut self, prune_peers: usize) -> &mut Self { self.config.prune_peers = prune_peers; self @@ -748,9 +590,9 @@ impl ConfigBuilder { /// Controls the backoff time for pruned peers. This is how long /// a peer must wait before attempting to graft into our mesh again after being pruned. - /// When pruning a peer, we send them our value of `prune_backoff` so they know + /// When pruning a peer, we send them our value of [`Self::prune_backoff`] so they know /// the minimum time to wait. Peers running older versions may not send a backoff time, - /// so if we receive a prune message without one, we will wait at least `prune_backoff` + /// so if we receive a prune message without one, we will wait at least [`Self::prune_backoff`] /// before attempting to re-graft. The default is one minute. pub fn prune_backoff(&mut self, prune_backoff: Duration) -> &mut Self { self.config.prune_backoff = prune_backoff; @@ -863,8 +705,8 @@ impl ConfigBuilder { self } - /// Constructs a `GenericGossipsubConfig` from the given configuration and validates the settings. - pub fn build(&self) -> Result, &str> { + /// Constructs a [`GossipsubConfig`] from the given configuration and validates the settings. + pub fn build(&self) -> Result { // check all constraints on config if self.config.max_transmit_size < 100 { @@ -895,7 +737,7 @@ impl ConfigBuilder { } } -impl std::fmt::Debug for Config { +impl std::fmt::Debug for GossipsubConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut builder = f.debug_struct("GossipsubConfig"); let _ = builder.field("protocol_id_prefix", &self.protocol_id_prefix); @@ -942,7 +784,7 @@ mod test { #[test] fn create_thing() { - let builder = GossipsubConfigBuilder::default() + let builder: GossipsubConfig = GossipsubConfigBuilder::default() .protocol_id_prefix("purple") .build() .unwrap(); diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 5827c5c5c5b..63add7347f6 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -20,6 +20,7 @@ //! Error types that can result from gossipsub. +use crate::compression::CompressionError; use libp2p_core::identity::error::SigningError; use libp2p_core::upgrade::ProtocolError; use std::fmt; @@ -36,6 +37,8 @@ pub enum PublishError { /// The overall message was too large. This could be due to excessive topics or an excessive /// message size. MessageTooLarge, + /// The compression algorithm failed. + CompressionFailed, } /// Error associated with subscribing to a topic. @@ -80,12 +83,17 @@ pub enum ValidationError { InvalidSequenceNumber, /// The PeerId was invalid InvalidPeerId, - /// Signature existed when validation has been sent to `Anonymous`. + /// Signature existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. SignaturePresent, - /// Sequence number existed when validation has been sent to `Anonymous`. + /// Sequence number existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. SequenceNumberPresent, - /// Message source existed when validation has been sent to `Anonymous`. + /// Message source existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. MessageSourcePresent, + /// The message could not be decompressed. + DecompressionFailed, } impl From for GossipsubHandlerError { @@ -94,6 +102,12 @@ impl From for GossipsubHandlerError { } } +impl From for PublishError { + fn from(_error: CompressionError) -> PublishError { + PublishError::CompressionFailed + } +} + impl fmt::Display for GossipsubHandlerError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self) diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 8c885547d34..17f249b2e09 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -46,8 +46,8 @@ //! encoded) by setting the `hash_topics` configuration parameter to true. //! //! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! `PeerId` and a nonce (sequence number) of the message. The sequence numbers in this -//! implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned +//! [`libp2p_core::PeerId`] and a nonce (sequence number) of the message. The sequence numbers in +//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned //! integers. They are chosen at random in this implementation of gossipsub, but are sequential in //! the current go implementation. //! @@ -55,22 +55,22 @@ //! //! ## GossipsubConfig //! -//! The [`Config`] struct specifies various network performance/tuning configuration +//! The [`GossipsubConfig`] struct specifies various network performance/tuning configuration //! parameters. Specifically it specifies: //! -//! [`Config`]: struct.Config.html +//! [`GossipsubConfig`]: struct.Config.html //! -//! This struct implements the `Default` trait and can be initialised via -//! `Config::default()`. +//! This struct implements the [`Default`] trait and can be initialised via +//! [`GossipsubConfig::default()`]. //! //! //! ## Gossipsub //! -//! The [`GenericGossipsub`] struct implements the `NetworkBehaviour` trait allowing it to act as the -//! routing behaviour in a `Swarm`. This struct requires an instance of `PeerId` and -//! [`Config`]. +//! The [`Gossipsub`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to +//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of +//! [`libp2p_core::PeerId`] and [`GossipsubConfig`]. //! -//! [`GenericGossipsub`]: struct.GenericGossipsub.html +//! [`Gossipsub`]: struct.Gossipsub.html //! ## Example //! @@ -104,7 +104,7 @@ //! // set default parameters for gossipsub //! let gossipsub_config = libp2p_gossipsub::GossipsubConfig::default(); //! // build a gossipsub network behaviour -//! let mut gossipsub = +//! let mut gossipsub: libp2p_gossipsub::Gossipsub = //! libp2p_gossipsub::Gossipsub::new(message_authenticity, gossipsub_config).unwrap(); //! // subscribe to the topic //! gossipsub.subscribe(&topic); @@ -127,6 +127,7 @@ pub mod protocol; mod backoff; mod behaviour; +mod compression; mod config; mod gossip_promises; mod handler; @@ -143,20 +144,20 @@ extern crate derive_builder; mod rpc_proto; -pub use self::behaviour::{ - Event, GenericGossipsub, Gossipsub, GossipsubEvent, MessageAuthenticity, -}; -pub use self::config::{ - Config, ConfigBuilder, GossipsubConfig, GossipsubConfigBuilder, ValidationMode, -}; +pub use self::behaviour::{Gossipsub, GossipsubEvent, MessageAuthenticity}; +#[cfg(feature = "snappy")] +pub use self::compression::SnappyCompression; +pub use self::compression::{CompressionError, MessageCompression, NoCompression}; + +pub use self::config::{GossipsubConfig, GossipsubConfigBuilder, ValidationMode}; pub use self::peer_score::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, }; pub use self::topic::{Hasher, Topic, TopicHash}; pub use self::types::{ - FastMessageId, GenericGossipsubMessage, GossipsubMessage, GossipsubRpc, MessageAcceptance, - MessageId, RawGossipsubMessage, + FastMessageId, GossipsubMessage, GossipsubRpc, MessageAcceptance, MessageId, + RawGossipsubMessage, }; pub type IdentTopic = Topic; pub type Sha256Topic = Topic; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index 79165e4d4fa..6e8aab91e4b 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::topic::TopicHash; -use crate::types::{GossipsubMessageWithId, MessageId}; +use crate::types::{MessageId, RawGossipsubMessage}; use libp2p_core::PeerId; use log::debug; use std::fmt::Debug; @@ -34,8 +34,8 @@ pub struct CacheEntry { /// MessageCache struct holding history of messages. #[derive(Clone)] -pub struct MessageCache { - msgs: HashMap>, +pub struct MessageCache { + msgs: HashMap, /// For every message and peer the number of times this peer asked for the message iwant_counts: HashMap>, history: Vec>, @@ -45,7 +45,7 @@ pub struct MessageCache { gossip: usize, } -impl> fmt::Debug for MessageCache { +impl fmt::Debug for MessageCache { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MessageCache") .field("msgs", &self.msgs) @@ -56,7 +56,7 @@ impl> fmt::Debug for MessageCache { } /// Implementation of the MessageCache. -impl MessageCache { +impl MessageCache { pub fn new(gossip: usize, history_capacity: usize) -> Self { MessageCache { gossip, @@ -69,8 +69,11 @@ impl MessageCache { /// Put a message into the memory cache. /// /// Returns the message if it already exists. - pub fn put(&mut self, msg: GossipsubMessageWithId) -> Option> { - let message_id = msg.message_id(); + pub fn put( + &mut self, + message_id: &MessageId, + msg: RawGossipsubMessage, + ) -> Option { debug!("Put message {:?} in mcache", message_id); let cache_entry = CacheEntry { mid: message_id.clone(), @@ -87,7 +90,7 @@ impl MessageCache { /// Get a message with `message_id` #[cfg(test)] - pub fn get(&self, message_id: &MessageId) -> Option<&GossipsubMessageWithId> { + pub fn get(&self, message_id: &MessageId) -> Option<&RawGossipsubMessage> { self.msgs.get(message_id) } @@ -97,7 +100,7 @@ impl MessageCache { &mut self, message_id: &MessageId, peer: &PeerId, - ) -> Option<(&GossipsubMessageWithId, u32)> { + ) -> Option<(&RawGossipsubMessage, u32)> { let iwant_counts = &mut self.iwant_counts; self.msgs.get(message_id).and_then(|message| { if !message.validated { @@ -117,14 +120,14 @@ impl MessageCache { } /// Gets a message with [`MessageId`] and tags it as validated. - pub fn validate(&mut self, message_id: &MessageId) -> Option<&GossipsubMessageWithId> { + pub fn validate(&mut self, message_id: &MessageId) -> Option<&RawGossipsubMessage> { self.msgs.get_mut(message_id).map(|message| { message.validated = true; &*message }) } - /// Get a list of `MessageIds` for a given topic. + /// Get a list of [`MessageId`]s for a given topic. pub fn get_gossip_message_ids(&self, topic: &TopicHash) -> Vec { self.history[..self.gossip] .iter() @@ -178,7 +181,7 @@ impl MessageCache { } /// Removes a message from the cache and returns it if existent - pub fn remove(&mut self, message_id: &MessageId) -> Option> { + pub fn remove(&mut self, message_id: &MessageId) -> Option { //We only remove the message from msgs and iwant_count and keep the message_id in the // history vector. Zhe id in the history vector will simply be ignored on popping. @@ -191,10 +194,10 @@ impl MessageCache { mod tests { use super::*; use crate::types::RawGossipsubMessage; - use crate::{GossipsubMessage, IdentTopic as Topic, TopicHash}; + use crate::{IdentTopic as Topic, TopicHash}; use libp2p_core::PeerId; - fn gen_testm(x: u64, topic: TopicHash) -> GossipsubMessage { + fn gen_testm(x: u64, topic: TopicHash) -> (MessageId, RawGossipsubMessage) { let default_id = |message: &RawGossipsubMessage| { // default message id is: source + sequence number let mut source_string = message.source.as_ref().unwrap().to_base58(); @@ -217,10 +220,10 @@ mod tests { }; let id = default_id(&m); - GossipsubMessage::new(m, id) + (id, m) } - fn new_cache(gossip_size: usize, history: usize) -> MessageCache> { + fn new_cache(gossip_size: usize, history: usize) -> MessageCache { MessageCache::new(gossip_size, history) } @@ -239,13 +242,13 @@ mod tests { let mut mc = new_cache(10, 15); let topic1_hash = Topic::new("topic1").hash().clone(); - let m = gen_testm(10, topic1_hash); + let (id, m) = gen_testm(10, topic1_hash); - mc.put(m.clone()); + mc.put(&id, m.clone()); assert!(mc.history[0].len() == 1); - let fetched = mc.get(m.message_id()); + let fetched = mc.get(&id); assert_eq!(fetched.is_none(), false); assert_eq!(fetched.is_some(), true); @@ -263,9 +266,9 @@ mod tests { let mut mc = new_cache(10, 15); let topic1_hash = Topic::new("topic1").hash().clone(); - let m = gen_testm(10, topic1_hash); + let (id, m) = gen_testm(10, topic1_hash); - mc.put(m.clone()); + mc.put(&id, m.clone()); // Try to get an incorrect ID let wrong_id = MessageId::new(b"wrongid"); @@ -293,8 +296,8 @@ mod tests { // Build the message for i in 0..10 { - let m = gen_testm(i, topic1_hash.clone()); - mc.put(m.clone()); + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); } mc.shift(); @@ -316,8 +319,8 @@ mod tests { // Build the message for i in 0..10 { - let m = gen_testm(i, topic1_hash.clone()); - mc.put(m.clone()); + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); } mc.shift(); @@ -342,8 +345,8 @@ mod tests { // Build the message for i in 0..10 { - let m = gen_testm(i, topic1_hash.clone()); - mc.put(m.clone()); + let (id, m) = gen_testm(i, topic1_hash.clone()); + mc.put(&id, m.clone()); } // Shift right until deleting messages diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 8f1da06a9b3..42765837195 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -31,7 +31,6 @@ use std::time::{Duration, Instant}; mod params; use crate::error::ValidationError; -use crate::types::GossipsubMessageWithId; pub use params::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, @@ -49,7 +48,7 @@ pub(crate) struct PeerScore { peer_stats: HashMap, /// Tracking peers per IP. peer_ips: HashMap>, - /// Message delivery tracking. This is a time-cache of `DeliveryRecord`s. + /// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s. deliveries: TimeCache, /// callback for monitoring message delivery times message_delivery_time_callback: Option, @@ -141,7 +140,7 @@ enum MeshStatus { } impl MeshStatus { - /// Initialises a new `Active` mesh status. + /// Initialises a new [`MeshStatus::Active`] mesh status. pub fn new_active() -> Self { MeshStatus::Active { graft_time: Instant::now(), @@ -193,7 +192,7 @@ impl Default for DeliveryRecord { } impl PeerScore { - /// Creates a new `PeerScore` using a given set of peer scoring parameters. + /// Creates a new [`PeerScore`] using a given set of peer scoring parameters. #[allow(dead_code)] pub fn new(params: PeerScoreParams) -> Self { Self::new_with_message_delivery_time_callback(params, None) @@ -421,7 +420,7 @@ impl PeerScore { }); } - /// Adds a connected peer to `PeerScore`, initialising with empty ips (ips get added later + /// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later /// through add_ip. pub fn add_peer(&mut self, peer_id: PeerId) { let peer_stats = self.peer_stats.entry(peer_id).or_default(); @@ -550,32 +549,31 @@ impl PeerScore { } } - pub fn validate_message(&mut self, _from: &PeerId, _msg: &GossipsubMessageWithId) { + pub fn validate_message(&mut self, _from: &PeerId, msg_id: &MessageId, topic_hash: &TopicHash) { // adds an empty record with the message id self.deliveries - .entry(_msg.message_id().clone()) + .entry(msg_id.clone()) .or_insert_with(DeliveryRecord::default); if let Some(callback) = self.message_delivery_time_callback { - let topic = &_msg.topic; if self .peer_stats .get(_from) - .and_then(|s| s.topics.get(topic)) + .and_then(|s| s.topics.get(topic_hash)) .map(|ts| ts.in_mesh()) .unwrap_or(false) { - callback(_from, topic, 0.0); + callback(_from, topic_hash, 0.0); } } } - pub fn deliver_message(&mut self, from: &PeerId, msg: &GossipsubMessageWithId) { - self.mark_first_message_delivery(from, msg); + pub fn deliver_message(&mut self, from: &PeerId, msg_id: &MessageId, topic_hash: &TopicHash) { + self.mark_first_message_delivery(from, topic_hash); let record = self .deliveries - .entry(msg.message_id().clone()) + .entry(msg_id.clone()) .or_insert_with(DeliveryRecord::default); // this should be the first delivery trace @@ -590,25 +588,32 @@ impl PeerScore { // this check is to make sure a peer can't send us a message twice and get a double // count if it is a first delivery if &peer != from { - self.mark_duplicate_message_delivery(&peer, msg, None); + self.mark_duplicate_message_delivery(&peer, topic_hash, None); } } } - pub fn reject_message( + /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + pub fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { + debug!( + "Message from {} rejected because of ValidationError or SelfOrigin", + from + ); + self.mark_invalid_message_delivery(from, topic_hash); + } + + // Reject a message. + pub fn reject_message( &mut self, from: &PeerId, - msg: &GossipsubMessageWithId, + msg_id: &MessageId, + topic_hash: &TopicHash, reason: RejectReason, ) { match reason { // these messages are not tracked, but the peer is penalized as they are invalid RejectReason::ValidationError(_) | RejectReason::SelfOrigin => { - debug!( - "Message from {} rejected because of ValidationError or SelfOrigin", - from - ); - self.mark_invalid_message_delivery(from, msg); + self.reject_invalid_message(from, topic_hash); return; } // we ignore those messages, so do nothing. @@ -621,7 +626,7 @@ impl PeerScore { let peers: Vec<_> = { let mut record = self .deliveries - .entry(msg.message_id().clone()) + .entry(msg_id.clone()) .or_insert_with(DeliveryRecord::default); // this should be the first delivery trace @@ -644,16 +649,21 @@ impl PeerScore { record.peers.drain().collect() }; - self.mark_invalid_message_delivery(from, msg); + self.mark_invalid_message_delivery(from, topic_hash); for peer_id in peers.iter() { - self.mark_invalid_message_delivery(peer_id, msg) + self.mark_invalid_message_delivery(peer_id, topic_hash) } } - pub fn duplicated_message(&mut self, from: &PeerId, msg: &GossipsubMessageWithId) { + pub fn duplicated_message( + &mut self, + from: &PeerId, + msg_id: &MessageId, + topic_hash: &TopicHash, + ) { let record = self .deliveries - .entry(msg.message_id().clone()) + .entry(msg_id.clone()) .or_insert_with(DeliveryRecord::default); if record.peers.get(from).is_some() { @@ -667,15 +677,14 @@ impl PeerScore { } else { 0.0 }; - let topic = &msg.topic; if self .peer_stats .get(from) - .and_then(|s| s.topics.get(topic)) + .and_then(|s| s.topics.get(topic_hash)) .map(|ts| ts.in_mesh()) .unwrap_or(false) { - callback(from, topic, time); + callback(from, topic_hash, time); } } @@ -688,11 +697,11 @@ impl PeerScore { DeliveryStatus::Valid(validated) => { // mark the peer delivery time to only count a duplicate delivery once. record.peers.insert(from.clone()); - self.mark_duplicate_message_delivery(from, msg, Some(validated)); + self.mark_duplicate_message_delivery(from, topic_hash, Some(validated)); } DeliveryStatus::Invalid => { // we no longer track delivery time - self.mark_invalid_message_delivery(from, msg); + self.mark_invalid_message_delivery(from, topic_hash); } DeliveryStatus::Ignored => { // the message was ignored; do nothing (we don't know if it was valid) @@ -748,13 +757,8 @@ impl PeerScore { /// Increments the "invalid message deliveries" counter for all scored topics the message /// is published in. - fn mark_invalid_message_delivery( - &mut self, - peer_id: &PeerId, - msg: &GossipsubMessageWithId, - ) { + fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - let topic_hash = &msg.topic; if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { @@ -771,13 +775,8 @@ impl PeerScore { /// Increments the "first message deliveries" counter for all scored topics the message is /// published in, as well as the "mesh message deliveries" counter, if the peer is in the /// mesh for the topic. - fn mark_first_message_delivery( - &mut self, - peer_id: &PeerId, - msg: &GossipsubMessageWithId, - ) { + fn mark_first_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - let topic_hash = &msg.topic; if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { @@ -815,10 +814,10 @@ impl PeerScore { /// Increments the "mesh message deliveries" counter for messages we've seen before, as long the /// message was received within the P3 window. - fn mark_duplicate_message_delivery( + fn mark_duplicate_message_delivery( &mut self, peer_id: &PeerId, - msg: &GossipsubMessageWithId, + topic_hash: &TopicHash, validated_time: Option, ) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { @@ -827,7 +826,6 @@ impl PeerScore { } else { None }; - let topic_hash = &msg.topic; if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index 17ad3114823..c4159bb6ec1 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -57,7 +57,8 @@ pub struct PeerScoreThresholds { pub publish_threshold: f64, /// The score threshold below which message processing is suppressed altogether, - /// implementing an effective graylist according to peer score; should be negative and <= `publish_threshold`. + /// implementing an effective graylist according to peer score; should be negative and + /// <= `publish_threshold`. pub graylist_threshold: f64, /// The score threshold below which px will be ignored; this should be positive @@ -247,11 +248,11 @@ pub struct TopicScoreParams { pub first_message_deliveries_cap: f64, /// P3: mesh message deliveries - /// This is the number of message deliveries in the mesh, within the `mesh_message_deliveries_window` of - /// message validation; deliveries during validation also count and are retroactively applied - /// when validation succeeds. - /// This window accounts for the minimum time before a hostile mesh peer trying to game the score - /// could replay back a valid message we just sent them. + /// This is the number of message deliveries in the mesh, within the + /// `mesh_message_deliveries_window` of message validation; deliveries during validation also + /// count and are retroactively applied when validation succeeds. + /// This window accounts for the minimum time before a hostile mesh peer trying to game the + /// score could replay back a valid message we just sent them. /// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer /// before we have forwarded it to them. /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 330221db863..4ede29b7c51 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -33,8 +33,8 @@ fn within_variance(value: f64, expected: f64, variance: f64) -> bool { } // generates a random gossipsub message with sequence number i -fn make_test_message(seq: u64) -> GossipsubMessage { - let m = RawGossipsubMessage { +fn make_test_message(seq: u64) -> (MessageId, RawGossipsubMessage) { + let raw_message = RawGossipsubMessage { source: Some(PeerId::random()), data: vec![12, 34, 56], sequence_number: Some(seq), @@ -44,18 +44,25 @@ fn make_test_message(seq: u64) -> GossipsubMessage { validated: true, }; - let id = default_message_id()(&m); - GossipsubMessage::new(m, id) + let message = GossipsubMessage { + source: raw_message.source.clone(), + data: raw_message.data.clone(), + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }; + + let id = default_message_id()(&message); + (id, raw_message) } -fn default_message_id() -> fn(&RawGossipsubMessage) -> MessageId { +fn default_message_id() -> fn(&GossipsubMessage) -> MessageId { |message| { // default message id is: source + sequence number // NOTE: If either the peer_id or source is not provided, we set to 0; let mut source_string = if let Some(peer_id) = message.source.as_ref() { peer_id.to_base58() } else { - PeerId::from_bytes(vec![0, 1, 0]) + PeerId::from_bytes(&vec![0, 1, 0]) .expect("Valid peer id") .to_base58() }; @@ -185,9 +192,9 @@ fn test_score_first_message_deliveries() { // deliver a bunch of messages from the peer let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id, &msg); - peer_score.deliver_message(&peer_id, &msg); + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); } peer_score.refresh_scores(); @@ -229,9 +236,9 @@ fn test_score_first_message_deliveries_cap() { // deliver a bunch of messages from the peer let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id, &msg); - peer_score.deliver_message(&peer_id, &msg); + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); } peer_score.refresh_scores(); @@ -270,9 +277,9 @@ fn test_score_first_message_deliveries_decay() { // deliver a bunch of messages from the peer let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id, &msg); - peer_score.deliver_message(&peer_id, &msg); + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id, &id, &msg.topic); + peer_score.deliver_message(&peer_id, &id, &msg.topic); } peer_score.refresh_scores(); @@ -360,18 +367,18 @@ fn test_score_mesh_message_deliveries() { let messages = 100; let mut messages_to_send = Vec::new(); for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id_a, &msg); - peer_score.deliver_message(&peer_id_a, &msg); + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); - peer_score.duplicated_message(&peer_id_b, &msg); - messages_to_send.push(msg); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + messages_to_send.push((id, msg)); } std::thread::sleep(topic_params.mesh_message_deliveries_window + Duration::from_millis(20)); - for msg in messages_to_send { - peer_score.duplicated_message(&peer_id_c, &msg); + for (id, msg) in messages_to_send { + peer_score.duplicated_message(&peer_id_c, &id, &msg.topic); } peer_score.refresh_scores(); @@ -435,9 +442,9 @@ fn test_score_mesh_message_deliveries_decay() { // deliver a bunch of messages from peer A let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id_a, &msg); - peer_score.deliver_message(&peer_id_a, &msg); + let (id, msg) = make_test_message(seq); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); } // we should have a positive score, since we delivered more messages than the threshold @@ -508,9 +515,10 @@ fn test_score_mesh_failure_penalty() { // deliver a bunch of messages from peer A let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.validate_message(&peer_id_a, &msg); - peer_score.deliver_message(&peer_id_a, &msg); + let (id, msg) = make_test_message(seq); + + peer_score.validate_message(&peer_id_a, &id, &msg.topic); + peer_score.deliver_message(&peer_id_a, &id, &msg.topic); } // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet @@ -579,8 +587,8 @@ fn test_score_invalid_message_deliveries() { // reject a bunch of messages from peer A let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationFailed); + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); } peer_score.refresh_scores(); @@ -625,8 +633,8 @@ fn test_score_invalid_message_deliveris_decay() { // reject a bunch of messages from peer A let messages = 100; for seq in 0..messages { - let msg = make_test_message(seq); - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationFailed); + let (id, msg) = make_test_message(seq); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); } peer_score.refresh_scores(); @@ -681,12 +689,12 @@ fn test_score_reject_message_deliveries() { peer_score.add_peer(peer_id.clone()); } - let msg = make_test_message(1); + let (id, msg) = make_test_message(1); // these should have no effect in the score - peer_score.reject_message(&peer_id_a, &msg, RejectReason::BlackListedPeer); - peer_score.reject_message(&peer_id_a, &msg, RejectReason::BlackListedSource); - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationIgnored); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedPeer); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedSource); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); @@ -696,12 +704,12 @@ fn test_score_reject_message_deliveries() { assert_eq!(score_b, 0.0, "Should have no effect on the score"); // insert a record in the message deliveries - peer_score.validate_message(&peer_id_a, &msg); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); // this should have no effect in the score, and subsequent duplicate messages should have no // effect either - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationIgnored); - peer_score.duplicated_message(&peer_id_b, &msg); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); @@ -714,12 +722,12 @@ fn test_score_reject_message_deliveries() { peer_score.deliveries.clear(); // insert a record in the message deliveries - peer_score.validate_message(&peer_id_a, &msg); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); // this should have no effect in the score, and subsequent duplicate messages should have no // effect either - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationIgnored); - peer_score.duplicated_message(&peer_id_b, &msg); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); @@ -732,11 +740,11 @@ fn test_score_reject_message_deliveries() { peer_score.deliveries.clear(); // insert a new record in the message deliveries - peer_score.validate_message(&peer_id_a, &msg); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); // and reject the message to make sure duplicates are also penalized - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationFailed); - peer_score.duplicated_message(&peer_id_b, &msg); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); @@ -749,11 +757,11 @@ fn test_score_reject_message_deliveries() { peer_score.deliveries.clear(); // insert a new record in the message deliveries - peer_score.validate_message(&peer_id_a, &msg); + peer_score.validate_message(&peer_id_a, &id, &msg.topic); // and reject the message after a duplicate has arrived - peer_score.duplicated_message(&peer_id_b, &msg); - peer_score.reject_message(&peer_id_a, &msg, RejectReason::ValidationFailed); + peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); + peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed); peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 7b8bae3c243..711b965ee27 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -194,7 +194,7 @@ impl GossipsubCodec { } }; - let source = match PeerId::from_bytes(from.clone()) { + let source = match PeerId::from_bytes(&from) { Ok(v) => v, Err(_) => { debug!("Signature verification failed: Invalid Peer Id"); @@ -218,7 +218,7 @@ impl GossipsubCodec { .map(|key| PublicKey::from_protobuf_encoding(&key)) { Some(Ok(key)) => key, - _ => match PublicKey::from_protobuf_encoding(&source.as_bytes()[2..]) { + _ => match PublicKey::from_protobuf_encoding(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { warn!("Signature verification failed: No valid public key supplied"); @@ -416,7 +416,7 @@ impl Decoder for GossipsubCodec { let source = if verify_source { if let Some(bytes) = message.from { if !bytes.is_empty() { - match PeerId::from_bytes(bytes) { + match PeerId::from_bytes(&bytes) { Ok(peer_id) => Some(peer_id), // valid peer id Err(_) => { // invalid peer id, add to invalid messages @@ -502,6 +502,7 @@ impl Decoder for GossipsubCodec { .into_iter() .filter_map(|info| { info.peer_id + .as_ref() .and_then(|id| PeerId::from_bytes(id).ok()) .map(|peer_id| //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 @@ -566,14 +567,16 @@ mod tests { // generate an arbitrary GossipsubMessage using the behaviour signing functionality let config = GossipsubConfig::default(); - let gs = Gossipsub::new( + let gs: Gossipsub = Gossipsub::new( crate::MessageAuthenticity::Signed(keypair.0.clone()), config, ) .unwrap(); - let data = (0..g.gen_range(10, 10024)).map(|_| g.gen()).collect(); + let data = (0..g.gen_range(10, 10024)) + .map(|_| g.gen()) + .collect::>(); let topic_id = TopicId::arbitrary(g).0; - Message(gs.build_message(topic_id, data).unwrap()) + Message(gs.build_raw_message(topic_id, &data).unwrap()) } } diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 027a9731ee1..b9fa8106c6a 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -37,7 +37,7 @@ mod test { let topic2 = Topic::new("t2").hash(); let new_message1 = super::Message { - from: Some(PeerId::random().as_bytes().to_vec()), + from: Some(PeerId::random().to_bytes()), data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), topic: topic1.clone().into_string(), @@ -45,7 +45,7 @@ mod test { key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), }; let old_message1 = compat_proto::Message { - from: Some(PeerId::random().as_bytes().to_vec()), + from: Some(PeerId::random().to_bytes()), data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), topic_ids: vec![topic1.clone().into_string()], @@ -53,7 +53,7 @@ mod test { key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), }; let old_message2 = compat_proto::Message { - from: Some(PeerId::random().as_bytes().to_vec()), + from: Some(PeerId::random().to_bytes()), data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()), seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()), topic_ids: vec![topic1.clone().into_string(), topic2.clone().into_string()], diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 0f38157623a..7aa94416183 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -29,7 +29,7 @@ pub trait TopicSubscriptionFilter { /// Filters a list of incoming subscriptions and returns a filtered set /// By default this deduplicates the subscriptions and calls - /// `Self::filter_incoming_subscription_set` on the filtered set. + /// [`Self::filter_incoming_subscription_set`] on the filtered set. fn filter_incoming_subscriptions<'a>( &mut self, subscriptions: &'a [GossipsubSubscription], @@ -56,7 +56,7 @@ pub trait TopicSubscriptionFilter { } /// Filters a set of deduplicated subscriptions - /// By default this filters the elements based on `Self::allow_incoming_subscription`. + /// By default this filters the elements based on [`Self::allow_incoming_subscription`]. fn filter_incoming_subscription_set<'a>( &mut self, mut subscriptions: HashSet<&'a GossipsubSubscription>, diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index 2139243158e..7e8afca2d9e 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -34,7 +34,7 @@ pub trait Hasher { #[derive(Debug, Clone)] pub struct IdentityHash {} impl Hasher for IdentityHash { - /// Creates a `TopicHash` as a raw string. + /// Creates a [`TopicHash`] as a raw string. fn hash(topic_string: String) -> TopicHash { TopicHash { hash: topic_string } } @@ -43,7 +43,7 @@ impl Hasher for IdentityHash { #[derive(Debug, Clone)] pub struct Sha256Hash {} impl Hasher for Sha256Hash { - /// Creates a `TopicHash` by SHA256 hashing the topic then base64 encoding the + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the /// hash. fn hash(topic_string: String) -> TopicHash { let topic_descripter = rpc_proto::TopicDescriptor { diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index e53b9885cf6..a79a11213ef 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,6 +19,7 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. +use crate::compression::{CompressionError, MessageCompression}; use crate::rpc_proto; use crate::TopicHash; use libp2p_core::PeerId; @@ -93,14 +94,14 @@ pub enum PeerKind { NotSupported, } -/// A message received by the gossipsub system. -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct GenericGossipsubMessage { +/// A message received by the gossipsub system and stored locally in caches.. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct RawGossipsubMessage { /// Id of the peer that published this message. pub source: Option, /// Content of the message. Its meaning is out of scope of this library. - pub data: T, + pub data: Vec, /// A random sequence number. pub sequence_number: Option, @@ -111,80 +112,50 @@ pub struct GenericGossipsubMessage { /// The signature of the message if it's signed. pub signature: Option>, - /// The public key of the message if it is signed and the source `PeerId` cannot be inlined. + /// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined. pub key: Option>, /// Flag indicating if this message has been validated by the application or not. pub validated: bool, } -impl GenericGossipsubMessage { - pub fn from>(m: GenericGossipsubMessage) -> Self { - Self { - source: m.source, - data: m.data.into(), - sequence_number: m.sequence_number, - topic: m.topic, - signature: m.signature, - key: m.key, - validated: m.validated, - } - } -} - -pub type RawGossipsubMessage = GenericGossipsubMessage>; +/// The message sent to the user after a [`RawGossipsubMessage`] has been decompressed . +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct GossipsubMessage { + /// Id of the peer that published this message. + pub source: Option, -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct DataWithId { - pub id: MessageId, - pub data: T, -} + /// Decompressed content of the message. + pub data: Vec, -impl>> Into> for DataWithId { - fn into(self) -> Vec { - self.data.into() - } -} + /// A random sequence number. + pub sequence_number: Option, -impl> AsRef<[u8]> for DataWithId { - fn as_ref(&self) -> &[u8] { - self.data.as_ref() - } + /// The topic this message belongs to + pub topic: TopicHash, } -pub type GossipsubMessageWithId = GenericGossipsubMessage>; - -impl GossipsubMessageWithId { - pub fn new(m: GenericGossipsubMessage, id: MessageId) -> Self { - Self { - source: m.source, - data: DataWithId { id, data: m.data }, - sequence_number: m.sequence_number, - topic: m.topic, - signature: m.signature, - key: m.key, - validated: m.validated, - } - } - - pub fn message_id(&self) -> &MessageId { - &self.data.id - } - - pub fn data(&self) -> &T { - &self.data.data +impl GossipsubMessage { + pub fn from_raw( + compression: &C, + raw_message: RawGossipsubMessage, + max_size: usize, + ) -> Result { + Ok(GossipsubMessage { + source: raw_message.source, + data: compression.decompress_message(raw_message.data, max_size)?, + sequence_number: raw_message.sequence_number, + topic: raw_message.topic, + }) } } -// for backwards compatibility -pub type GossipsubMessage = GossipsubMessageWithId>; - -impl> fmt::Debug for GenericGossipsubMessage { +impl fmt::Debug for GossipsubMessage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GossipsubMessage") .field( "data", - &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data.as_ref())), + &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)), ) .field("source", &self.source) .field("sequence_number", &self.sequence_number) @@ -277,7 +248,7 @@ impl Into for GossipsubRpc { for message in self.messages.into_iter() { let message = rpc_proto::Message { - from: message.source.map(|m| m.into_bytes()), + from: message.source.map(|m| m.to_bytes()), data: Some(message.data), seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()), topic: TopicHash::into_string(message.topic), @@ -343,7 +314,7 @@ impl Into for GossipsubRpc { peers: peers .into_iter() .map(|info| rpc_proto::PeerInfo { - peer_id: info.peer_id.map(|id| id.into_bytes()), + peer_id: info.peer_id.map(|id| id.to_bytes()), /// TODO, see https://github.com/libp2p/specs/pull/217 signed_peer_record: None, }) diff --git a/protocols/gossipsub/tests/smoke_compression.rs b/protocols/gossipsub/tests/smoke_compression.rs new file mode 100644 index 00000000000..b1c9e7f4108 --- /dev/null +++ b/protocols/gossipsub/tests/smoke_compression.rs @@ -0,0 +1,270 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#[cfg(feature = "snappy")] +mod compression_tests { + use futures::prelude::*; + use log::debug; + use quickcheck::{QuickCheck, TestResult}; + use rand::{random, seq::SliceRandom, SeedableRng}; + use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, + }; + + use futures::StreamExt; + use libp2p_core::{ + identity, multiaddr::Protocol, transport::MemoryTransport, upgrade, Multiaddr, Transport, + }; + use libp2p_gossipsub::{ + subscription_filter::AllowAllSubscriptionFilter, Gossipsub, GossipsubConfigBuilder, + GossipsubEvent, IdentTopic as Topic, MessageAuthenticity, SnappyCompression, + ValidationMode, + }; + use libp2p_plaintext::PlainText2Config; + use libp2p_swarm::Swarm; + use libp2p_yamux as yamux; + + struct Graph { + pub nodes: Vec<( + Multiaddr, + Swarm>, + )>, + } + + impl Future for Graph { + type Output = (Multiaddr, GossipsubEvent); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + for (addr, node) in &mut self.nodes { + match node.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => return Poll::Ready((addr.clone(), event)), + Poll::Ready(None) => panic!("unexpected None when polling nodes"), + Poll::Pending => {} + } + } + + Poll::Pending + } + } + + impl Graph { + fn new_connected(num_nodes: usize, seed: u64) -> Graph { + if num_nodes == 0 { + panic!("expecting at least one node"); + } + + let mut rng = rand::rngs::StdRng::seed_from_u64(seed); + + let mut not_connected_nodes = std::iter::once(()) + .cycle() + .take(num_nodes) + .map(|_| build_node()) + .collect::>, + )>>(); + + let mut connected_nodes = vec![not_connected_nodes.pop().unwrap()]; + + while !not_connected_nodes.is_empty() { + connected_nodes.shuffle(&mut rng); + not_connected_nodes.shuffle(&mut rng); + + let mut next = not_connected_nodes.pop().unwrap(); + let connected_addr = &connected_nodes[0].0; + + // Memory transport can not handle addresses with `/p2p` suffix. + let mut connected_addr_no_p2p = connected_addr.clone(); + let p2p_suffix_connected = connected_addr_no_p2p.pop(); + + debug!( + "Connect: {} -> {}", + next.0.clone().pop().unwrap(), + p2p_suffix_connected.unwrap() + ); + + Swarm::dial_addr(&mut next.1, connected_addr_no_p2p).unwrap(); + + connected_nodes.push(next); + } + + Graph { + nodes: connected_nodes, + } + } + + /// Polls the graph and passes each event into the provided FnMut until the closure returns + /// `true`. + /// + /// Returns [`true`] on success and [`false`] on timeout. + fn wait_for bool>(&mut self, mut f: F) -> bool { + let fut = futures::future::poll_fn(move |cx| match self.poll_unpin(cx) { + Poll::Ready((_addr, ev)) if f(&ev) => Poll::Ready(()), + _ => Poll::Pending, + }); + + let fut = async_std::future::timeout(Duration::from_secs(10), fut); + + futures::executor::block_on(fut).is_ok() + } + + /// Polls the graph until Poll::Pending is obtained, completing the underlying polls. + fn drain_poll(self) -> Self { + // The future below should return self. Given that it is a FnMut and not a FnOnce, one needs + // to wrap `self` in an Option, leaving a `None` behind after the final `Poll::Ready`. + let mut this = Some(self); + + let fut = futures::future::poll_fn(move |cx| match &mut this { + Some(graph) => loop { + match graph.poll_unpin(cx) { + Poll::Ready(_) => {} + Poll::Pending => return Poll::Ready(this.take().unwrap()), + } + }, + None => panic!("future called after final return"), + }); + let fut = async_std::future::timeout(Duration::from_secs(10), fut); + futures::executor::block_on(fut).unwrap() + } + } + + fn build_node() -> ( + Multiaddr, + Swarm>, + ) { + let key = identity::Keypair::generate_ed25519(); + let public_key = key.public(); + + let transport = MemoryTransport::default() + .upgrade(upgrade::Version::V1) + .authenticate(PlainText2Config { + local_public_key: public_key.clone(), + }) + .multiplex(yamux::YamuxConfig::default()) + .boxed(); + + let peer_id = public_key.clone().into_peer_id(); + + // NOTE: The graph of created nodes can be disconnected from the mesh point of view as nodes + // can reach their d_lo value and not add other nodes to their mesh. To speed up this test, we + // reduce the default values of the heartbeat, so that all nodes will receive gossip in a + // timely fashion. + + let config = GossipsubConfigBuilder::default() + .heartbeat_initial_delay(Duration::from_millis(100)) + .heartbeat_interval(Duration::from_millis(200)) + .history_length(10) + .history_gossip(10) + .validation_mode(ValidationMode::Permissive) + .build() + .unwrap(); + let behaviour = Gossipsub::::new( + MessageAuthenticity::Author(peer_id.clone()), + config, + ) + .unwrap(); + let mut swarm = Swarm::new(transport, behaviour, peer_id); + + let port = 1 + random::(); + let mut addr: Multiaddr = Protocol::Memory(port).into(); + Swarm::listen_on(&mut swarm, addr.clone()).unwrap(); + + addr = addr.with(libp2p_core::multiaddr::Protocol::P2p( + public_key.into_peer_id().into(), + )); + + (addr, swarm) + } + + #[test] + fn multi_hop_propagation() { + let _ = env_logger::try_init(); + + fn prop(num_nodes: u8, seed: u64) -> TestResult { + if num_nodes < 2 || num_nodes > 50 { + return TestResult::discard(); + } + + debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); + + let mut graph = Graph::new_connected(num_nodes as usize, seed); + let number_nodes = graph.nodes.len(); + + // Subscribe each node to the same topic. + let topic = Topic::new("test-net"); + for (_addr, node) in &mut graph.nodes { + node.subscribe(&topic).unwrap(); + } + + // Wait for all nodes to be subscribed. + let mut subscribed = 0; + let all_subscribed = graph.wait_for(move |ev| { + if let GossipsubEvent::Subscribed { .. } = ev { + subscribed += 1; + if subscribed == (number_nodes - 1) * 2 { + return true; + } + } + + false + }); + if !all_subscribed { + return TestResult::error(format!( + "Timed out waiting for all nodes to subscribe but only have {:?}/{:?}.", + subscribed, num_nodes, + )); + } + + // It can happen that the publish occurs before all grafts have completed causing this test + // to fail. We drain all the poll messages before publishing. + graph = graph.drain_poll(); + + // Publish a single message. + graph.nodes[0].1.publish(topic, vec![1, 2, 3]).unwrap(); + + // Wait for all nodes to receive the published message. + let mut received_msgs = 0; + let all_received = graph.wait_for(move |ev| { + if let GossipsubEvent::Message { .. } = ev { + received_msgs += 1; + if received_msgs == number_nodes - 1 { + return true; + } + } + + false + }); + if !all_received { + return TestResult::error(format!( + "Timed out waiting for all nodes to receive the msg but only have {:?}/{:?}.", + received_msgs, num_nodes, + )); + } + + TestResult::passed() + } + + QuickCheck::new() + .max_tests(5) + .quickcheck(prop as fn(u8, u64) -> TestResult) + } +} diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index db7796a29bc..3556a3b2463 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 3606a3cb87b..9b81b82cd9d 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-identify" edition = "2018" description = "Nodes identifcation protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } log = "0.4.1" prost = "0.6.1" smallvec = "1.0" diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 4c42e37d1f7..74ad00f4749 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.27.0 [unreleased] + +- Update `libp2p-core` and `libp2p-swarm`. + # 0.26.0 [2020-11-25] - Update `libp2p-core` and `libp2p-swarm`. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 2c23310e616..975f2913bb4 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-kad" edition = "2018" description = "Kademlia protocol for libp2p" -version = "0.26.0" +version = "0.27.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,8 +17,8 @@ fnv = "1.0" futures_codec = "0.4" futures = "0.3.1" log = "0.4" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } prost = "0.6.1" rand = "0.7.2" sha2 = "0.9.1" diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 2034e66e647..5cfd12ca30e 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -47,7 +47,7 @@ use libp2p_swarm::{ }; use log::{info, debug, warn}; use smallvec::SmallVec; -use std::{borrow::{Borrow, Cow}, error, iter, time::Duration}; +use std::{borrow::Cow, error, iter, time::Duration}; use std::collections::{HashSet, VecDeque}; use std::fmt; use std::num::NonZeroUsize; @@ -337,7 +337,7 @@ where /// Creates a new `Kademlia` network behaviour with the given configuration. pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self { - let local_key = kbucket::Key::new(id.clone()); + let local_key = kbucket::Key::from(id); let put_record_job = config .record_replication_interval @@ -428,7 +428,7 @@ where /// If the routing table has been updated as a result of this operation, /// a [`KademliaEvent::RoutingUpdated`] event is emitted. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { - let key = kbucket::Key::new(peer.clone()); + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { if entry.value().insert(address) { @@ -495,7 +495,7 @@ where pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) -> Option, Addresses>> { - let key = kbucket::Key::new(peer.clone()); + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { @@ -524,7 +524,7 @@ where pub fn remove_peer(&mut self, peer: &PeerId) -> Option, Addresses>> { - let key = kbucket::Key::new(peer.clone()); + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { kbucket::Entry::Present(entry, _) => { Some(entry.remove()) @@ -551,9 +551,9 @@ where pub fn kbucket(&mut self, key: K) -> Option, Addresses>> where - K: Borrow<[u8]> + Clone + K: Into> + Clone { - self.kbuckets.bucket(&kbucket::Key::new(key)) + self.kbuckets.bucket(&key.into()) } /// Initiates an iterative query for the closest peers to the given key. @@ -562,10 +562,10 @@ where /// [`KademliaEvent::QueryResult{QueryResult::GetClosestPeers}`]. pub fn get_closest_peers(&mut self, key: K) -> QueryId where - K: Borrow<[u8]> + Clone + K: Into> + Into> + Clone { - let info = QueryInfo::GetClosestPeers { key: key.borrow().to_vec() }; - let target = kbucket::Key::new(key); + let info = QueryInfo::GetClosestPeers { key: key.clone().into() }; + let target: kbucket::Key = key.into(); let peers = self.kbuckets.closest_keys(&target); let inner = QueryInner::new(info); self.queries.add_iter_closest(target.clone(), peers, inner) @@ -823,7 +823,7 @@ where if &node_id == kbuckets.local_key().preimage() { Some(local_addrs.iter().cloned().collect::>()) } else { - let key = kbucket::Key::new(node_id.clone()); + let key = kbucket::Key::from(node_id); kbuckets.entry(&key).view().map(|e| e.node.value.clone().into_vec()) } } else { @@ -870,7 +870,7 @@ where /// Updates the routing table with a new connection status and address of a peer. fn connection_updated(&mut self, peer: PeerId, address: Option, new_status: NodeStatus) { - let key = kbucket::Key::new(peer.clone()); + let key = kbucket::Key::from(peer); match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, old_status) => { if let Some(address) = address { @@ -985,13 +985,13 @@ where // Pr(bucket-253) = 1 - (7/8)^16 ~= 0.88 // Pr(bucket-252) = 1 - (15/16)^16 ~= 0.64 // ... - let mut target = kbucket::Key::new(PeerId::random()); + let mut target = kbucket::Key::from(PeerId::random()); for _ in 0 .. 16 { let d = local_key.distance(&target); if b.contains(&d) { break; } - target = kbucket::Key::new(PeerId::random()); + target = kbucket::Key::from(PeerId::random()); } target }).collect::>().into_iter() @@ -1447,7 +1447,7 @@ where fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { // We should order addresses from decreasing likelyhood of connectivity, so start with // the addresses of that peer in the k-buckets. - let key = kbucket::Key::new(peer_id.clone()); + let key = kbucket::Key::from(*peer_id); let mut peer_addrs = if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { let addrs = entry.value().iter().cloned().collect::>(); @@ -1500,7 +1500,7 @@ where let (old, new) = (old.get_remote_address(), new.get_remote_address()); // Update routing table. - if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::new(peer.clone())).value() { + if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(*peer)).value() { if addrs.replace(old, new) { debug!("Address '{}' replaced with '{}' for peer '{}'.", old, new, peer); } else { @@ -1550,7 +1550,7 @@ where err: &dyn error::Error ) { if let Some(peer_id) = peer_id { - let key = kbucket::Key::new(peer_id.clone()); + let key = kbucket::Key::from(*peer_id); if let Some(addrs) = self.kbuckets.entry(&key).value() { // TODO: Ideally, the address should only be removed if the error can @@ -2403,7 +2403,7 @@ impl QueryInfo { fn to_request(&self, query_id: QueryId) -> KademliaHandlerIn { match &self { QueryInfo::Bootstrap { peer, .. } => KademliaHandlerIn::FindNodeReq { - key: peer.clone().into_bytes(), + key: peer.to_bytes(), user_data: query_id, }, QueryInfo::GetClosestPeers { key, .. } => KademliaHandlerIn::FindNodeReq { diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 81533a5b5b7..e1b35105d70 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -239,13 +239,13 @@ fn query_iter() { // Ask the first peer in the list to search a random peer. The search should // propagate forwards through the list of peers. let search_target = PeerId::random(); - let search_target_key = kbucket::Key::new(search_target.clone()); - let qid = swarms[0].get_closest_peers(search_target.clone()); + let search_target_key = kbucket::Key::from(search_target); + let qid = swarms[0].get_closest_peers(search_target); match swarms[0].query(&qid) { Some(q) => match q.info() { QueryInfo::GetClosestPeers { key } => { - assert_eq!(&key[..], search_target.borrow() as &[u8]) + assert_eq!(&key[..], search_target.to_bytes().as_slice()) }, i => panic!("Unexpected query info: {:?}", i) } @@ -268,7 +268,7 @@ fn query_iter() { id, result: QueryResult::GetClosestPeers(Ok(ok)), .. })) => { assert_eq!(id, qid); - assert_eq!(&ok.key[..], search_target.as_bytes()); + assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(swarm_ids[i], expected_swarm_id); assert_eq!(swarm.queries.size(), 0); assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p))); @@ -310,7 +310,7 @@ fn unresponsive_not_returned_direct() { // Ask first to search a random value. let search_target = PeerId::random(); - swarms[0].get_closest_peers(search_target.clone()); + swarms[0].get_closest_peers(search_target); block_on( poll_fn(move |ctx| { @@ -320,7 +320,7 @@ fn unresponsive_not_returned_direct() { Poll::Ready(Some(KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(Ok(ok)), .. })) => { - assert_eq!(&ok.key[..], search_target.as_bytes()); + assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 0); return Poll::Ready(()); } @@ -360,7 +360,7 @@ fn unresponsive_not_returned_indirect() { // Ask second to search a random value. let search_target = PeerId::random(); - swarms[1].get_closest_peers(search_target.clone()); + swarms[1].get_closest_peers(search_target); block_on( poll_fn(move |ctx| { @@ -370,7 +370,7 @@ fn unresponsive_not_returned_indirect() { Poll::Ready(Some(KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(Ok(ok)), .. })) => { - assert_eq!(&ok.key[..], search_target.as_bytes()); + assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 1); assert_eq!(ok.peers[0], first_peer_id); return Poll::Ready(()); @@ -570,8 +570,8 @@ fn put_record() { .cloned() .collect::>(); expected.sort_by(|id1, id2| - kbucket::Key::new(id1.clone()).distance(&key).cmp( - &kbucket::Key::new(id2.clone()).distance(&key))); + kbucket::Key::from(*id1).distance(&key).cmp( + &kbucket::Key::from(*id2).distance(&key))); let expected = expected .into_iter() @@ -838,8 +838,8 @@ fn add_provider() { .collect::>(); let kbucket_key = kbucket::Key::new(key); expected.sort_by(|id1, id2| - kbucket::Key::new(id1.clone()).distance(&kbucket_key).cmp( - &kbucket::Key::new(id2.clone()).distance(&kbucket_key))); + kbucket::Key::from(*id1).distance(&kbucket_key).cmp( + &kbucket::Key::from(*id2).distance(&kbucket_key))); let expected = expected .into_iter() @@ -1084,7 +1084,7 @@ fn manual_bucket_inserts() { routable.push(peer); if expected.is_empty() { for peer in routable.iter() { - let bucket = swarm.kbucket(peer.clone()).unwrap(); + let bucket = swarm.kbucket(*peer).unwrap(); assert!(bucket.iter().all(|e| e.node.key.preimage() != peer)); } return Poll::Ready(()) diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index dc83a04d693..5a7ee8817bb 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -432,7 +432,7 @@ mod tests { let mut bucket = KBucket::, ()>::new(timeout); let num_nodes = g.gen_range(1, K_VALUE.get() + 1); for _ in 0 .. num_nodes { - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key: key.clone(), value: () }; let status = NodeStatus::arbitrary(g); match bucket.insert(node, status) { @@ -464,7 +464,7 @@ mod tests { fn fill_bucket(bucket: &mut KBucket, ()>, status: NodeStatus) { let num_entries_start = bucket.num_entries(); for i in 0 .. K_VALUE.get() - num_entries_start { - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key, value: () }; assert_eq!(InsertResult::Inserted, bucket.insert(node, status)); assert_eq!(bucket.num_entries(), num_entries_start + i + 1); @@ -482,7 +482,7 @@ mod tests { // Fill the bucket, thereby populating the expected lists in insertion order. for status in status { - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key: key.clone(), value: () }; let full = bucket.num_entries() == K_VALUE.get(); match bucket.insert(node, status) { @@ -529,7 +529,7 @@ mod tests { fill_bucket(&mut bucket, NodeStatus::Disconnected); // Trying to insert another disconnected node fails. - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key, value: () }; match bucket.insert(node, NodeStatus::Disconnected) { InsertResult::Full => {}, @@ -544,7 +544,7 @@ mod tests { // Add a connected node, which is expected to be pending, scheduled to // replace the first (i.e. least-recently connected) node. - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key: key.clone(), value: () }; match bucket.insert(node.clone(), NodeStatus::Connected) { InsertResult::Pending { disconnected } => @@ -577,7 +577,7 @@ mod tests { assert_eq!(K_VALUE.get(), bucket.num_entries()); // Trying to insert another connected node fails. - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key, value: () }; match bucket.insert(node, NodeStatus::Connected) { InsertResult::Full => {}, @@ -593,7 +593,7 @@ mod tests { let first_disconnected = first.clone(); // Add a connected pending node. - let key = Key::new(PeerId::random()); + let key = Key::from(PeerId::random()); let node = Node { key: key.clone(), value: () }; if let InsertResult::Pending { disconnected } = bucket.insert(node, NodeStatus::Connected) { assert_eq!(&disconnected, &first_disconnected.key); diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 56f4140bd23..e8d1af3e8b1 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -103,7 +103,11 @@ impl From for Key { impl From for Key { fn from(p: PeerId) -> Self { - Key::new(p) + let bytes = KeyBytes(Sha256::digest(&p.to_bytes())); + Key { + preimage: p, + bytes + } } } diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 58ca1c3d840..7bac1dd7c2a 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -101,7 +101,7 @@ impl TryFrom for KadPeer { fn try_from(peer: proto::message::Peer) -> Result { // TODO: this is in fact a CID; not sure if this should be handled in `from_bytes` or // as a special case here - let node_id = PeerId::from_bytes(peer.id) + let node_id = PeerId::from_bytes(&peer.id) .map_err(|_| invalid_data("invalid peer id"))?; let mut addrs = Vec::with_capacity(peer.addrs.len()); @@ -126,7 +126,7 @@ impl TryFrom for KadPeer { impl Into for KadPeer { fn into(self) -> proto::message::Peer { proto::message::Peer { - id: self.node_id.into_bytes(), + id: self.node_id.to_bytes(), addrs: self.multiaddrs.into_iter().map(|a| a.to_vec()).collect(), connection: { let ct: proto::message::ConnectionType = self.connection_ty.into(); @@ -533,7 +533,7 @@ fn record_from_proto(record: proto::Record) -> Result { let publisher = if !record.publisher.is_empty() { - PeerId::from_bytes(record.publisher) + PeerId::from_bytes(&record.publisher) .map(Some) .map_err(|_| invalid_data("Invalid publisher peer ID."))? } else { @@ -554,7 +554,7 @@ fn record_to_proto(record: Record) -> proto::Record { proto::Record { key: record.key.to_vec(), value: record.value, - publisher: record.publisher.map(PeerId::into_bytes).unwrap_or_default(), + publisher: record.publisher.map(|id| id.to_bytes()).unwrap_or_default(), ttl: record.expires .map(|t| { let now = Instant::now(); diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index 7002444073c..b295355634b 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -770,7 +770,7 @@ mod tests { impl Graph { fn get_closest_peer(&self, target: &KeyBytes) -> PeerId { self.0.iter() - .map(|(peer_id, _)| (target.distance(&Key::new(peer_id.clone())), peer_id)) + .map(|(peer_id, _)| (target.distance(&Key::from(*peer_id)), peer_id)) .fold(None, |acc, (distance_b, peer_id_b)| { match acc { None => Some((distance_b, peer_id_b)), @@ -848,7 +848,7 @@ mod tests { let mut known_closest_peers = graph.0.iter() .take(K_VALUE.get()) - .map(|(key, _peers)| Key::new(key.clone())) + .map(|(key, _peers)| Key::from(*key)) .collect::>(); known_closest_peers.sort_unstable_by(|a, b| { target.distance(a).cmp(&target.distance(b)) @@ -934,7 +934,7 @@ mod tests { } } - let mut result = iter.into_result().into_iter().map(Key::new).collect::>(); + let mut result = iter.into_result().into_iter().map(Key::from).collect::>(); result.sort_unstable_by(|a, b| { target.distance(a).cmp(&target.distance(b)) }); diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 6f2b95da648..c0bb219ae49 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -78,7 +78,7 @@ impl MemoryStore { /// Creates a new `MemoryRecordStore` with the given configuration. pub fn with_config(local_id: PeerId, config: MemoryStoreConfig) -> Self { MemoryStore { - local_key: kbucket::Key::new(local_id), + local_key: kbucket::Key::from(local_id), config, records: HashMap::default(), provided: HashSet::default(), @@ -161,9 +161,9 @@ impl<'a> RecordStore<'a> for MemoryStore { // It is a new provider record for that key. let local_key = self.local_key.clone(); let key = kbucket::Key::new(record.key.clone()); - let provider = kbucket::Key::new(record.provider.clone()); + let provider = kbucket::Key::from(record.provider); if let Some(i) = providers.iter().position(|p| { - let pk = kbucket::Key::new(p.provider.clone()); + let pk = kbucket::Key::from(p.provider); provider.distance(&key) < pk.distance(&key) }) { // Insert the new provider. @@ -225,7 +225,7 @@ mod tests { fn distance(r: &ProviderRecord) -> kbucket::Distance { kbucket::Key::new(r.key.clone()) - .distance(&kbucket::Key::new(r.provider.clone())) + .distance(&kbucket::Key::from(r.provider)) } #[test] @@ -318,4 +318,3 @@ mod tests { } } } - diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 16c58ca0f9e..7c45c4dc268 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,4 +1,12 @@ -# 0.26.0 [unreleased] +# 0.27.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + +# 0.26.0 [2020-12-08] + +- Create multiple multicast response packets as required to avoid + hitting the limit of 9000 bytes per MDNS packet. + [PR 1877](https://github.com/libp2p/rust-libp2p/pull/1877). - Detect interface changes and join the MDNS multicast group on all interfaces as they become available. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index d1af2fde669..8a326677f92 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libp2p-mdns" edition = "2018" -version = "0.26.0" +version = "0.27.0" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -16,8 +16,8 @@ dns-parser = "0.8.0" futures = "0.3.8" if-watch = "0.1.6" lazy_static = "1.4.0" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } log = "0.4.11" rand = "0.7.3" smallvec = "1.5.0" diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index f2e9ee524fd..6d3dcde688c 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -199,13 +199,14 @@ impl NetworkBehaviour for Mdns { MdnsPacket::Query(query) => { // MaybeBusyMdnsService should always be Free. if let MdnsBusyWrapper::Free(ref mut service) = self.service { - let resp = build_query_response( + for packet in build_query_response( query.query_id(), params.local_peer_id().clone(), params.listened_addresses().into_iter(), MDNS_RESPONSE_TTL, - ); - service.enqueue_response(resp.unwrap()); + ) { + service.enqueue_response(packet) + } } else { debug_assert!(false); } }, MdnsPacket::Response(response) => { diff --git a/protocols/mdns/src/dns.rs b/protocols/mdns/src/dns.rs index 81adcdc28c6..4627d7d5e4e 100644 --- a/protocols/mdns/src/dns.rs +++ b/protocols/mdns/src/dns.rs @@ -18,16 +18,37 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Contains methods that handle the DNS encoding and decoding capabilities not available in the -//! `dns_parser` library. +//! (M)DNS encoding and decoding on top of the `dns_parser` library. use crate::{META_QUERY_SERVICE, SERVICE_NAME}; use libp2p_core::{Multiaddr, PeerId}; use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; -/// Maximum size of a DNS label as per RFC1035 +/// Maximum size of a DNS label as per RFC1035. const MAX_LABEL_LENGTH: usize = 63; +/// DNS TXT records can have up to 255 characters as a single string value. +/// +/// Current values are usually around 170-190 bytes long, varying primarily +/// with the length of the contained `Multiaddr`. +const MAX_TXT_VALUE_LENGTH: usize = 255; + +/// A conservative maximum size (in bytes) of a complete TXT record, +/// as encoded by [`append_txt_record`]. +const MAX_TXT_RECORD_SIZE: usize = MAX_TXT_VALUE_LENGTH + 45; + +/// The maximum DNS packet size is 9000 bytes less the maximum +/// sizes of the IP (60) and UDP (8) headers. +const MAX_PACKET_SIZE: usize = 9000 - 68; + +/// A conservative maximum number of records that can be packed into +/// a single DNS UDP packet, allowing up to 100 bytes of MDNS packet +/// header data to be added by [`query_response_packet()`]. +const MAX_RECORDS_PER_PACKET: usize = (MAX_PACKET_SIZE - 100) / MAX_TXT_RECORD_SIZE; + +/// An encoded MDNS packet. +pub type MdnsPacket = Vec; + /// Decodes a `` (as defined by RFC1035) into a `Vec` of ASCII characters. // TODO: better error type? pub fn decode_character_string(mut from: &[u8]) -> Result, ()> { @@ -49,7 +70,7 @@ pub fn decode_character_string(mut from: &[u8]) -> Result, ()> { } /// Builds the binary representation of a DNS query to send on the network. -pub fn build_query() -> Vec { +pub fn build_query() -> MdnsPacket { let mut out = Vec::with_capacity(33); // Program-generated transaction ID; unused by our implementation. @@ -80,7 +101,7 @@ pub fn build_query() -> Vec { out } -/// Builds the response to the DNS query. +/// Builds the response to an address discovery DNS query. /// /// If there are more than 2^16-1 addresses, ignores the rest. pub fn build_query_response( @@ -88,60 +109,59 @@ pub fn build_query_response( peer_id: PeerId, addresses: impl ExactSizeIterator, ttl: Duration, -) -> Result, MdnsResponseError> { +) -> Vec { // Convert the TTL into seconds. let ttl = duration_to_secs(ttl); // Add a limit to 2^16-1 addresses, as the protocol limits to this number. - let addresses = addresses.take(65535); + let mut addresses = addresses.take(65535); - // This capacity was determined empirically and is a reasonable upper limit. - let mut out = Vec::with_capacity(320); + let peer_id_bytes = encode_peer_id(&peer_id); + debug_assert!(peer_id_bytes.len() <= 0xffff); - append_u16(&mut out, id); - // 0x84 flag for an answer. - append_u16(&mut out, 0x8400); - // Number of questions, answers, authorities, additionals. - append_u16(&mut out, 0x0); - append_u16(&mut out, 0x1); - append_u16(&mut out, 0x0); - append_u16(&mut out, addresses.len() as u16); + // The accumulated response packets. + let mut packets = Vec::new(); - // Our single answer. - // The name. - append_qname(&mut out, SERVICE_NAME); + // The records accumulated per response packet. + let mut records = Vec::with_capacity(addresses.len() * MAX_TXT_RECORD_SIZE); - // Flags. - append_u16(&mut out, 0x000c); - append_u16(&mut out, 0x0001); - - // TTL for the answer - append_u32(&mut out, ttl); + // Encode the addresses as TXT records, and multiple TXT records into a + // response packet. + while let Some(addr) = addresses.next() { + let txt_to_send = format!("dnsaddr={}/p2p/{}", addr.to_string(), peer_id.to_base58()); + let mut txt_record = Vec::with_capacity(txt_to_send.len()); + match append_txt_record(&mut txt_record, &peer_id_bytes, ttl, &txt_to_send) { + Ok(()) => { + records.push(txt_record); + } + Err(e) => { + log::warn!("Excluding address {} from response: {:?}", addr, e); + } + } - // Peer Id. - let peer_id_bytes = encode_peer_id(&peer_id); - debug_assert!(peer_id_bytes.len() <= 0xffff); - append_u16(&mut out, peer_id_bytes.len() as u16); - out.extend_from_slice(&peer_id_bytes); + if records.len() == MAX_RECORDS_PER_PACKET { + packets.push(query_response_packet(id, &peer_id_bytes, &records, ttl)); + records.clear(); + } + } - // The TXT records for answers. - for addr in addresses { - let txt_to_send = format!("dnsaddr={}/p2p/{}", addr.to_string(), peer_id.to_base58()); - let mut txt_to_send_bytes = Vec::with_capacity(txt_to_send.len()); - append_character_string(&mut txt_to_send_bytes, txt_to_send.as_bytes())?; - append_txt_record(&mut out, &peer_id_bytes, ttl, Some(&txt_to_send_bytes[..]))?; + // If there are still unpacked records, i.e. if the number of records is not + // a multiple of `MAX_RECORDS_PER_PACKET`, create a final packet. + if !records.is_empty() { + packets.push(query_response_packet(id, &peer_id_bytes, &records, ttl)); } - // The DNS specs specify that the maximum allowed size is 9000 bytes. - if out.len() > 9000 { - return Err(MdnsResponseError::ResponseTooLong); + // If no packets have been built at all, because `addresses` is empty, + // construct an empty response packet. + if packets.is_empty() { + packets.push(query_response_packet(id, &peer_id_bytes, &Vec::new(), ttl)); } - Ok(out) + packets } -/// Builds the response to the DNS query. -pub fn build_service_discovery_response(id: u16, ttl: Duration) -> Vec { +/// Builds the response to a service discovery DNS query. +pub fn build_service_discovery_response(id: u16, ttl: Duration) -> MdnsPacket { // Convert the TTL into seconds. let ttl = duration_to_secs(ttl); @@ -182,6 +202,42 @@ pub fn build_service_discovery_response(id: u16, ttl: Duration) -> Vec { out } +/// Constructs an MDNS query response packet for an address lookup. +fn query_response_packet(id: u16, peer_id: &Vec, records: &Vec>, ttl: u32) -> MdnsPacket { + let mut out = Vec::with_capacity(records.len() * MAX_TXT_RECORD_SIZE); + + append_u16(&mut out, id); + // 0x84 flag for an answer. + append_u16(&mut out, 0x8400); + // Number of questions, answers, authorities, additionals. + append_u16(&mut out, 0x0); + append_u16(&mut out, 0x1); + append_u16(&mut out, 0x0); + append_u16(&mut out, records.len() as u16); + + // Our single answer. + // The name. + append_qname(&mut out, SERVICE_NAME); + + // Flags. + append_u16(&mut out, 0x000c); + append_u16(&mut out, 0x0001); + + // TTL for the answer + append_u32(&mut out, ttl); + + // Peer Id. + append_u16(&mut out, peer_id.len() as u16); + out.extend_from_slice(&peer_id); + + // The TXT records. + for record in records { + out.extend_from_slice(&record); + } + + out +} + /// Returns the number of secs of a duration. fn duration_to_secs(duration: Duration) -> u32 { let secs = duration @@ -225,7 +281,7 @@ fn segment_peer_id(peer_id: String) -> String { /// Combines and encodes a `PeerId` and service name for a DNS query. fn encode_peer_id(peer_id: &PeerId) -> Vec { // DNS-safe encoding for the Peer ID - let raw_peer_id = data_encoding::BASE32_DNSCURVE.encode(&peer_id.as_bytes()); + let raw_peer_id = data_encoding::BASE32_DNSCURVE.encode(&peer_id.to_bytes()); // ensure we don't have any labels over 63 bytes long let encoded_peer_id = segment_peer_id(raw_peer_id); let service_name = str::from_utf8(SERVICE_NAME).expect("SERVICE_NAME is always ASCII"); @@ -262,21 +318,19 @@ fn append_qname(out: &mut Vec, name: &[u8]) { } /// Appends a `` (as defined by RFC1035) to the `Vec`. -fn append_character_string(out: &mut Vec, ascii_str: &[u8]) -> Result<(), MdnsResponseError> { +fn append_character_string(out: &mut Vec, ascii_str: &str) -> Result<(), MdnsResponseError> { if !ascii_str.is_ascii() { return Err(MdnsResponseError::NonAsciiMultiaddr); } - if !ascii_str.iter().any(|&c| c == b' ') { - for &chr in ascii_str.iter() { - out.push(chr); - } + if !ascii_str.bytes().any(|c| c == b' ') { + out.extend_from_slice(ascii_str.as_bytes()); return Ok(()); } out.push(b'"'); - for &chr in ascii_str.iter() { + for &chr in ascii_str.as_bytes() { if chr == b'\\' { out.push(b'\\'); out.push(b'\\'); @@ -292,19 +346,19 @@ fn append_character_string(out: &mut Vec, ascii_str: &[u8]) -> Result<(), Md Ok(()) } -/// Appends a TXT record to the answer in `out`. +/// Appends a TXT record to `out`. fn append_txt_record<'a>( out: &mut Vec, name: &[u8], ttl_secs: u32, - entries: impl IntoIterator, + value: &str, ) -> Result<(), MdnsResponseError> { // The name. out.extend_from_slice(name); // Flags. out.push(0x00); - out.push(0x10); // TXT record. + out.push(0x10); // TXT record. out.push(0x80); out.push(0x01); @@ -312,35 +366,23 @@ fn append_txt_record<'a>( append_u32(out, ttl_secs); // Add the strings. - let mut buffer = Vec::new(); - for entry in entries { - if entry.len() > u8::max_value() as usize { - return Err(MdnsResponseError::TxtRecordTooLong); - } - buffer.push(entry.len() as u8); - buffer.extend_from_slice(entry); - } - - // It is illegal to have an empty TXT record, but we can have one zero-bytes entry, which does - // the same. - if buffer.is_empty() { - buffer.push(0); - } - - if buffer.len() > u16::max_value() as usize { + if value.len() > MAX_TXT_VALUE_LENGTH { return Err(MdnsResponseError::TxtRecordTooLong); } + let mut buffer = Vec::new(); + buffer.push(value.len() as u8); + append_character_string(&mut buffer, value)?; + append_u16(out, buffer.len() as u16); out.extend_from_slice(&buffer); Ok(()) } -/// Error that can happen when producing a DNS response. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum MdnsResponseError { +/// Errors that can occur on encoding an MDNS response. +#[derive(Debug)] +enum MdnsResponseError { TxtRecordTooLong, NonAsciiMultiaddr, - ResponseTooLong, } impl fmt::Display for MdnsResponseError { @@ -349,11 +391,8 @@ impl fmt::Display for MdnsResponseError { MdnsResponseError::TxtRecordTooLong => { write!(f, "TXT record invalid because it is too long") } - MdnsResponseError::NonAsciiMultiaddr => write!( - f, - "A multiaddr contains non-ASCII characters when serializd" - ), - MdnsResponseError::ResponseTooLong => write!(f, "DNS response is too long"), + MdnsResponseError::NonAsciiMultiaddr => + write!(f, "A multiaddr contains non-ASCII characters when serialized"), } } } @@ -378,14 +417,15 @@ mod tests { let my_peer_id = identity::Keypair::generate_ed25519().public().into_peer_id(); let addr1 = "/ip4/1.2.3.4/tcp/5000".parse().unwrap(); let addr2 = "/ip6/::1/udp/10000".parse().unwrap(); - let query = build_query_response( + let packets = build_query_response( 0xf8f8, my_peer_id, vec![addr1, addr2].into_iter(), Duration::from_secs(60), - ) - .unwrap(); - assert!(Packet::parse(&query).is_ok()); + ); + for packet in packets { + assert!(Packet::parse(&packet).is_ok()); + } } #[test] diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index e8e152b9c8e..1d3ffa03d10 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -30,9 +30,9 @@ //! struct will automatically discover other libp2p nodes on the local network. //! -/// Hardcoded name of the mDNS service. Part of the mDNS libp2p specifications. +/// The DNS service name for all libp2p peers used to query for addresses. const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; -/// Hardcoded name of the service used for DNS-SD. +/// The meta query for looking up the `SERVICE_NAME`. const META_QUERY_SERVICE: &[u8] = b"_services._dns-sd._udp.local"; pub use crate::{ diff --git a/protocols/mdns/src/service.rs b/protocols/mdns/src/service.rs index 1b4dcfa727d..84c535d8fc6 100644 --- a/protocols/mdns/src/service.rs +++ b/protocols/mdns/src/service.rs @@ -29,7 +29,7 @@ use log::warn; use socket2::{Socket, Domain, Type}; use std::{convert::TryFrom, fmt, io, net::{IpAddr, Ipv4Addr, UdpSocket, SocketAddr}, str, time::{Duration, Instant}}; -pub use dns::{MdnsResponseError, build_query_response, build_service_discovery_response}; +pub use dns::{build_query_response, build_service_discovery_response}; lazy_static! { static ref IPV4_MDNS_MULTICAST_ADDRESS: SocketAddr = SocketAddr::from(( @@ -76,13 +76,15 @@ lazy_static! { /// match packet { /// MdnsPacket::Query(query) => { /// println!("Query from {:?}", query.remote_addr()); -/// let resp = build_query_response( +/// let packets = build_query_response( /// query.query_id(), /// my_peer_id.clone(), /// vec![].into_iter(), /// Duration::from_secs(120), -/// ).unwrap(); -/// service.enqueue_response(resp); +/// ); +/// for packet in packets { +/// service.enqueue_response(packet); +/// } /// } /// MdnsPacket::Response(response) => { /// for peer in response.discovered_peers() { @@ -448,7 +450,7 @@ impl MdnsResponse { peer_name.retain(|c| c != '.'); let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) { - Ok(bytes) => match PeerId::from_bytes(bytes) { + Ok(bytes) => match PeerId::from_bytes(&bytes) { Ok(id) => id, Err(_) => return None, }, @@ -609,8 +611,10 @@ mod tests { peer_id.clone(), vec![].into_iter(), Duration::from_secs(120), - ).unwrap(); - service.enqueue_response(resp); + ); + for r in resp { + service.enqueue_response(r); + } } MdnsPacket::Response(response) => { for peer in response.discovered_peers() { diff --git a/protocols/noise/CHANGELOG.md b/protocols/noise/CHANGELOG.md index 29a6e0cbbc1..70ae16c6806 100644 --- a/protocols/noise/CHANGELOG.md +++ b/protocols/noise/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.28.0 [unreleased] + +- Update `libp2p-core`. + # 0.27.0 [2020-11-25] - Update `libp2p-core`. diff --git a/protocols/noise/Cargo.toml b/protocols/noise/Cargo.toml index 59b6f9eac7b..135de2def70 100644 --- a/protocols/noise/Cargo.toml +++ b/protocols/noise/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libp2p-noise" description = "Cryptographic handshake protocol using the noise framework." -version = "0.27.0" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,7 @@ bytes = "0.5" curve25519-dalek = "3.0.0" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4" prost = "0.6.1" rand = "0.7.2" diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index a86557c4b9f..f2c6e9ef749 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 3aa96f74857..51abeb2ecf6 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-ping" edition = "2018" description = "Ping protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } log = "0.4.1" rand = "0.7.2" void = "1.0" diff --git a/protocols/plaintext/CHANGELOG.md b/protocols/plaintext/CHANGELOG.md index 635ae3cee98..31557d66374 100644 --- a/protocols/plaintext/CHANGELOG.md +++ b/protocols/plaintext/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/protocols/plaintext/Cargo.toml b/protocols/plaintext/Cargo.toml index 017d2611cd8..25c8fe60b21 100644 --- a/protocols/plaintext/Cargo.toml +++ b/protocols/plaintext/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-plaintext" edition = "2018" description = "Plaintext encryption dummy protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] bytes = "0.5" futures = "0.3.1" futures_codec = "0.4.0" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.8" prost = "0.6.1" unsigned-varint = { version = "0.5.1", features = ["futures-codec"] } diff --git a/protocols/plaintext/src/handshake.rs b/protocols/plaintext/src/handshake.rs index 983a2d0ee8f..dda29af496d 100644 --- a/protocols/plaintext/src/handshake.rs +++ b/protocols/plaintext/src/handshake.rs @@ -53,7 +53,7 @@ pub struct Remote { impl HandshakeContext { fn new(config: PlainText2Config) -> Result { let exchange = Exchange { - id: Some(config.local_public_key.clone().into_peer_id().into_bytes()), + id: Some(config.local_public_key.clone().into_peer_id().to_bytes()), pubkey: Some(config.local_public_key.clone().into_protobuf_encoding()) }; let mut buf = Vec::with_capacity(exchange.encoded_len()); @@ -86,7 +86,7 @@ impl HandshakeContext { return Err(PlainTextError::InvalidPayload(None)); }, }; - let peer_id = match PeerId::from_bytes(prop.id.unwrap_or_default()) { + let peer_id = match PeerId::from_bytes(&prop.id.unwrap_or_default()) { Ok(p) => p, Err(_) => { debug!("failed to parse remote's exchange's id protobuf"); diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index ed0d5d0e5e6..3f993ea9912 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,15 @@ +# 0.8.0 [unreleased] + +- Update `libp2p-swarm` and `libp2p-core`. + +# 0.7.0 [2020-12-08] + +- Refine emitted events for inbound requests, introducing + the `ResponseSent` event and the `ResponseOmission` + inbound failures. This effectively removes previous + support for one-way protocols without responses. + [PR 1867](https://github.com/libp2p/rust-libp2p/pull/1867). + # 0.6.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 89d97a6f0ec..5b51cd8b57a 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-request-response" edition = "2018" description = "Generic Request/Response Protocols" -version = "0.6.0" +version = "0.8.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"] async-trait = "0.1" bytes = "0.5.6" futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } -libp2p-swarm = { version = "0.25.0", path = "../../swarm" } +libp2p-core = { version = "0.26.0", path = "../../core" } +libp2p-swarm = { version = "0.26.0", path = "../../swarm" } log = "0.4.11" lru = "0.6" minicbor = { version = "0.7", features = ["std", "derive"] } diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index fe374f54877..b3f11888131 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -119,22 +119,29 @@ pub enum RequestResponseHandlerEvent where TCodec: RequestResponseCodec { - /// An inbound request. + /// A request has been received. Request { request_id: RequestId, request: TCodec::Request, sender: oneshot::Sender }, - /// An inbound response. + /// A response has been received. Response { request_id: RequestId, response: TCodec::Response }, - /// An outbound upgrade (i.e. request) timed out. + /// A response to an inbound request has been sent. + ResponseSent(RequestId), + /// A response to an inbound request was omitted as a result + /// of dropping the response `sender` of an inbound `Request`. + ResponseOmission(RequestId), + /// An outbound request timed out while sending the request + /// or waiting for the response. OutboundTimeout(RequestId), /// An outbound request failed to negotiate a mutually supported protocol. OutboundUnsupportedProtocols(RequestId), - /// An inbound request timed out. + /// An inbound request timed out while waiting for the request + /// or sending the response. InboundTimeout(RequestId), /// An inbound request failed to negotiate a mutually supported protocol. InboundUnsupportedProtocols(RequestId), @@ -187,9 +194,16 @@ where fn inject_fully_negotiated_inbound( &mut self, - (): (), - _: RequestId + sent: bool, + request_id: RequestId ) { + if sent { + self.pending_events.push_back( + RequestResponseHandlerEvent::ResponseSent(request_id)) + } else { + self.pending_events.push_back( + RequestResponseHandlerEvent::ResponseOmission(request_id)) + } } fn inject_fully_negotiated_outbound( diff --git a/protocols/request-response/src/handler/protocol.rs b/protocols/request-response/src/handler/protocol.rs index 0fc2b99df9f..81e19e4b180 100644 --- a/protocols/request-response/src/handler/protocol.rs +++ b/protocols/request-response/src/handler/protocol.rs @@ -93,7 +93,7 @@ impl InboundUpgrade for ResponseProtocol where TCodec: RequestResponseCodec + Send + 'static, { - type Output = (); + type Output = bool; type Error = io::Error; type Future = BoxFuture<'static, Result>; @@ -105,10 +105,12 @@ where if let Ok(response) = self.response_receiver.await { let write = self.codec.write_response(&protocol, &mut io, response); write.await?; + } else { + return Ok(false) } } io.close().await?; - Ok(()) + Ok(true) }.boxed() } } diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index 31ee6af9304..286c3f29a91 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -46,18 +46,6 @@ //! For that purpose, [`RequestResponseCodec::Protocol`] is typically //! instantiated with a sum type. //! -//! ## One-Way Protocols -//! -//! The implementation supports one-way protocols that do not -//! have responses. In these cases the [`RequestResponseCodec::Response`] can -//! be defined as `()` and [`RequestResponseCodec::read_response`] as well as -//! [`RequestResponseCodec::write_response`] given the obvious implementations. -//! Note that `RequestResponseMessage::Response` will still be emitted, -//! immediately after the request has been sent, since `RequestResponseCodec::read_response` -//! will not actually read anything from the given I/O stream. -//! [`RequestResponse::send_response`] need not be called for one-way protocols, -//! i.e. the [`ResponseChannel`] may just be dropped. -//! //! ## Limited Protocol Support //! //! It is possible to only support inbound or outbound requests for @@ -115,9 +103,11 @@ pub enum RequestResponseMessage, }, /// A response message. @@ -151,6 +141,14 @@ pub enum RequestResponseEvent error: OutboundFailure, }, /// An inbound request failed. + /// + /// > **Note**: The case whereby a connection on which a response is sent + /// > closes after [`RequestResponse::send_response`] already succeeded + /// > but before the response could be sent on the connection is reflected + /// > by there being no [`RequestResponseEvent::ResponseSent`] event. + /// > Code interested in ensuring a response has been successfully + /// > handed to the transport layer, e.g. before continuing with the next + /// > step in a multi-step protocol, should listen to these events. InboundFailure { /// The peer from whom the request was received. peer: PeerId, @@ -159,6 +157,16 @@ pub enum RequestResponseEvent /// The error that occurred. error: InboundFailure, }, + /// A response to an inbound request has been sent. + /// + /// When this event is received, the response has been flushed on + /// the underlying transport connection. + ResponseSent { + /// The peer to whom the response was sent. + peer: PeerId, + /// The ID of the inbound request whose response was sent. + request_id: RequestId, + }, } /// Possible failures occurring in the context of sending @@ -186,14 +194,17 @@ pub enum OutboundFailure { #[derive(Debug)] pub enum InboundFailure { /// The inbound request timed out, either while reading the - /// incoming request or before a response is sent, i.e. if + /// incoming request or before a response is sent, e.g. if /// [`RequestResponse::send_response`] is not called in a /// timely manner. Timeout, - /// The local peer supports none of the requested protocols. + /// The local peer supports none of the protocols requested + /// by the remote. UnsupportedProtocols, - /// The connection closed before a response was delivered. - ConnectionClosed, + /// The local peer failed to respond to an inbound request + /// due to the [`ResponseChannel`] being dropped instead of + /// being passed to [`RequestResponse::send_response`]. + ResponseOmission, } /// A channel for sending a response to an inbound request. @@ -379,17 +390,18 @@ where /// Initiates sending a response to an inbound request. /// - /// If the `ResponseChannel` is already closed due to a timeout, - /// the response is discarded and eventually [`RequestResponseEvent::InboundFailure`] - /// is emitted by `RequestResponse::poll`. + /// If the `ResponseChannel` is already closed due to a timeout or + /// the connection being closed, the response is returned as an `Err` + /// for further handling. Once the response has been successfully sent + /// on the corresponding connection, [`RequestResponseEvent::ResponseSent`] + /// is emitted. /// - /// The provided `ResponseChannel` is obtained from a + /// The provided `ResponseChannel` is obtained from an inbound /// [`RequestResponseMessage::Request`]. - pub fn send_response(&mut self, ch: ResponseChannel, rs: TCodec::Response) { - // Fails only if the inbound upgrade timed out waiting for the response, - // in which case the handler emits `RequestResponseHandlerEvent::InboundTimeout` - // which in turn results in `RequestResponseEvent::InboundFailure`. - let _ = ch.sender.send(rs); + pub fn send_response(&mut self, ch: ResponseChannel, rs: TCodec::Response) + -> Result<(), TCodec::Response> + { + ch.sender.send(rs) } /// Adds a known address for a peer that can be used for @@ -577,6 +589,20 @@ where RequestResponseEvent::Message { peer, message } )); } + RequestResponseHandlerEvent::ResponseSent(request_id) => { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::ResponseSent { peer, request_id })); + } + RequestResponseHandlerEvent::ResponseOmission(request_id) => { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::InboundFailure { + peer, + request_id, + error: InboundFailure::ResponseOmission + })); + } RequestResponseHandlerEvent::OutboundTimeout(request_id) => { if let Some((peer, _conn)) = self.pending_responses.remove(&request_id) { self.pending_events.push_back( diff --git a/protocols/request-response/src/throttled.rs b/protocols/request-response/src/throttled.rs index d66b7efb46e..8c12564cb1a 100644 --- a/protocols/request-response/src/throttled.rs +++ b/protocols/request-response/src/throttled.rs @@ -42,7 +42,7 @@ use futures::ready; use libp2p_core::{ConnectedPoint, connection::ConnectionId, Multiaddr, PeerId}; use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use lru::LruCache; -use std::{collections::{HashMap, VecDeque}, task::{Context, Poll}}; +use std::{collections::{HashMap, HashSet, VecDeque}, task::{Context, Poll}}; use std::{cmp::max, num::NonZeroU16}; use super::{ ProtocolSupport, @@ -75,21 +75,20 @@ where limit_overrides: HashMap, /// Pending events to report in `Throttled::poll`. events: VecDeque>>, - /// Current outbound credit grants in flight. - credit_messages: HashMap, /// The current credit ID. - credit_id: u64 + next_grant_id: u64 } -/// Credit information that is sent to remote peers. +/// Information about a credit grant that is sent to remote peers. #[derive(Clone, Copy, Debug)] -struct Credit { - /// A credit ID. Used to deduplicate retransmitted credit messages. - id: u64, +struct Grant { + /// The grant ID. Used to deduplicate retransmitted credit grants. + id: GrantId, /// The ID of the outbound credit grant message. request: RequestId, - /// The number of requests the remote is allowed to send. - amount: u16 + /// The credit given in this grant, i.e. the number of additional + /// requests the remote is allowed to send. + credit: u16 } /// Max. number of inbound requests that can be received. @@ -130,28 +129,81 @@ impl Limit { } } +type GrantId = u64; + +/// Information related to the current send budget with a peer. +#[derive(Clone, Debug)] +struct SendBudget { + /// The last received credit grant. + grant: Option, + /// The remaining credit for requests to send. + remaining: u16, + /// Credit grant requests received and acknowledged where the outcome + /// of the acknowledgement (i.e. response sent) is still undetermined. + /// Used to avoid emitting events for successful (`ResponseSent`) or failed + /// acknowledgements. + received: HashSet, +} + +/// Information related to the current receive budget with a peer. +#[derive(Clone, Debug)] +struct RecvBudget { + /// The grant currently given to the remote but yet to be acknowledged. + /// + /// Set to `Some` when a new grant is sent to the remote, followed + /// by `None` when an acknowledgment or a request is received. The + /// latter is seen as an implicit acknowledgement. + grant: Option, + /// The limit for new credit grants when the `remaining` credit is + /// exhausted. + limit: Limit, + /// The remaining credit for requests to receive. + remaining: u16, + /// Credit grants sent whose outcome is still undetermined. + /// Used to avoid emitting events for failed credit grants. + /// + /// > **Note**: While receiving an inbound request is an implicit + /// > acknowledgement for the last sent `grant`, the outcome of + /// > the outbound request remains undetermined until a success or + /// > failure event is received for that request or the corresponding + /// > connection closes. + sent: HashSet, +} + /// Budget information about a peer. #[derive(Clone, Debug)] struct PeerInfo { - /// Limit that applies to this peer. - limit: Limit, - /// Remaining number of outbound requests that can be sent. - send_budget: u16, - /// Remaining number of inbound requests that can be received. - recv_budget: u16, - /// The ID of the credit message that granted the current `send_budget`. - send_budget_id: Option + send_budget: SendBudget, + recv_budget: RecvBudget, } impl PeerInfo { - fn new(limit: Limit) -> Self { + fn new(recv_limit: Limit) -> Self { PeerInfo { - limit, - send_budget: 1, - recv_budget: 1, - send_budget_id: None + send_budget: SendBudget { + grant: None, + remaining: 1, + received: HashSet::new(), + }, + recv_budget: RecvBudget { + grant: None, + limit: recv_limit, + remaining: 1, + sent: HashSet::new(), + } } } + + fn into_disconnected(mut self) -> Self { + self.send_budget.received = HashSet::new(); + self.send_budget.remaining = 1; + self.recv_budget.sent = HashSet::new(); + self.recv_budget.remaining = max(1, self.recv_budget.remaining); + // Since we potentially reset the remaining receive budget, + // we forget about the potentially still unacknowledged last grant. + self.recv_budget.grant = None; + self + } } impl Throttled @@ -180,8 +232,7 @@ where default_limit: Limit::new(NonZeroU16::new(1).expect("1 > 0")), limit_overrides: HashMap::new(), events: VecDeque::new(), - credit_messages: HashMap::new(), - credit_id: 0 + next_grant_id: 0 } } @@ -195,9 +246,9 @@ where pub fn override_receive_limit(&mut self, p: &PeerId, limit: NonZeroU16) { log::debug!("{:08x}: override limit for {}: {:?}", self.id, p, limit); if let Some(info) = self.peer_info.get_mut(p) { - info.limit.set(limit) + info.recv_budget.limit.set(limit) } else if let Some(info) = self.offline_peer_info.get_mut(p) { - info.limit.set(limit) + info.recv_budget.limit.set(limit) } self.limit_overrides.insert(p.clone(), Limit::new(limit)); } @@ -210,7 +261,7 @@ where /// Has the limit of outbound requests been reached for the given peer? pub fn can_send(&mut self, p: &PeerId) -> bool { - self.peer_info.get(p).map(|i| i.send_budget > 0).unwrap_or(true) + self.peer_info.get(p).map(|i| i.send_budget.remaining > 0).unwrap_or(true) } /// Send a request to a peer. @@ -219,33 +270,32 @@ where /// returned. Sending more outbound requests should only be attempted /// once [`Event::ResumeSending`] has been received from [`NetworkBehaviour::poll`]. pub fn send_request(&mut self, p: &PeerId, req: C::Request) -> Result { - let info = - if let Some(info) = self.peer_info.get_mut(p) { - info - } else if let Some(info) = self.offline_peer_info.pop(p) { - if info.recv_budget > 1 { - self.send_credit(p, info.recv_budget - 1) + let connected = &mut self.peer_info; + let disconnected = &mut self.offline_peer_info; + let remaining = + if let Some(info) = connected.get_mut(p).or_else(|| disconnected.get_mut(p)) { + if info.send_budget.remaining == 0 { + log::trace!("{:08x}: no more budget to send another request to {}", self.id, p); + return Err(req) } - self.peer_info.entry(p.clone()).or_insert(info) + info.send_budget.remaining -= 1; + info.send_budget.remaining } else { let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit); - self.peer_info.entry(p.clone()).or_insert(PeerInfo::new(limit)) + let mut info = PeerInfo::new(limit); + info.send_budget.remaining -= 1; + let remaining = info.send_budget.remaining; + self.offline_peer_info.put(p.clone(), info); + remaining }; - if info.send_budget == 0 { - log::trace!("{:08x}: no more budget to send another request to {}", self.id, p); - return Err(req) - } - - info.send_budget -= 1; - let rid = self.behaviour.send_request(p, Message::request(req)); - log::trace! { "{:08x}: sending request {} to {} (send budget = {})", + log::trace! { "{:08x}: sending request {} to {} (budget remaining = {})", self.id, rid, p, - info.send_budget + 1 + remaining }; Ok(rid) @@ -254,16 +304,21 @@ where /// Answer an inbound request with a response. /// /// See [`RequestResponse::send_response`] for details. - pub fn send_response(&mut self, ch: ResponseChannel>, res: C::Response) { + pub fn send_response(&mut self, ch: ResponseChannel>, res: C::Response) + -> Result<(), C::Response> + { log::trace!("{:08x}: sending response {} to peer {}", self.id, ch.request_id(), &ch.peer); if let Some(info) = self.peer_info.get_mut(&ch.peer) { - if info.recv_budget == 0 { // need to send more credit to the remote peer - let crd = info.limit.switch(); - info.recv_budget = info.limit.max_recv.get(); - self.send_credit(&ch.peer, crd) + if info.recv_budget.remaining == 0 { // need to send more credit to the remote peer + let crd = info.recv_budget.limit.switch(); + info.recv_budget.remaining = info.recv_budget.limit.max_recv.get(); + self.send_credit(&ch.peer, crd); } } - self.behaviour.send_response(ch, Message::response(res)) + match self.behaviour.send_response(ch, Message::response(res)) { + Ok(()) => Ok(()), + Err(m) => Err(m.into_parts().1.expect("Missing response data.")), + } } /// Add a known peer address. @@ -295,19 +350,16 @@ where } /// Send a credit grant to the given peer. - fn send_credit(&mut self, p: &PeerId, amount: u16) { - let cid = self.next_credit_id(); - let rid = self.behaviour.send_request(p, Message::credit(amount, cid)); - log::trace!("{:08x}: sending {} as credit {} to {}", self.id, amount, cid, p); - let credit = Credit { id: cid, request: rid, amount }; - self.credit_messages.insert(p.clone(), credit); - } - - /// Create a new credit message ID. - fn next_credit_id(&mut self) -> u64 { - let n = self.credit_id; - self.credit_id += 1; - n + fn send_credit(&mut self, p: &PeerId, credit: u16) { + if let Some(info) = self.peer_info.get_mut(p) { + let cid = self.next_grant_id; + self.next_grant_id += 1; + let rid = self.behaviour.send_request(p, Message::credit(credit, cid)); + log::trace!("{:08x}: sending {} credit as grant {} to {}", self.id, credit, cid, p); + let grant = Grant { id: cid, request: rid, credit }; + info.recv_budget.grant = Some(grant); + info.recv_budget.sent.insert(rid); + } } } @@ -346,15 +398,15 @@ where fn inject_connection_closed(&mut self, peer: &PeerId, id: &ConnectionId, end: &ConnectedPoint) { self.behaviour.inject_connection_closed(peer, id, end); - if self.is_connected(peer) { - if let Some(credit) = self.credit_messages.get_mut(peer) { + if let Some(info) = self.peer_info.get_mut(peer) { + if let Some(grant) = &mut info.recv_budget.grant { log::debug! { "{:08x}: resending credit grant {} to {} after connection closed", self.id, - credit.id, + grant.id, peer }; - let msg = Message::credit(credit.amount, credit.id); - credit.request = self.behaviour.send_request(peer, msg) + let msg = Message::credit(grant.credit, grant.id); + grant.request = self.behaviour.send_request(peer, msg) } } } @@ -364,28 +416,24 @@ where self.behaviour.inject_connected(p); // The limit may have been added by `Throttled::send_request` already. if !self.peer_info.contains_key(p) { - let info = - if let Some(info) = self.offline_peer_info.pop(p) { - if info.recv_budget > 1 { - self.send_credit(p, info.recv_budget - 1) - } - info - } else { - let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit); - PeerInfo::new(limit) - }; - self.peer_info.insert(p.clone(), info); + if let Some(info) = self.offline_peer_info.pop(p) { + let recv_budget = info.recv_budget.remaining; + self.peer_info.insert(p.clone(), info); + if recv_budget > 1 { + self.send_credit(p, recv_budget - 1); + } + } else { + let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit); + self.peer_info.insert(p.clone(), PeerInfo::new(limit)); + } } } fn inject_disconnected(&mut self, p: &PeerId) { log::trace!("{:08x}: disconnected from {}", self.id, p); - if let Some(mut info) = self.peer_info.remove(p) { - info.send_budget = 1; - info.recv_budget = max(1, info.recv_budget); - self.offline_peer_info.put(p.clone(), info); + if let Some(info) = self.peer_info.remove(p) { + self.offline_peer_info.put(p.clone(), info.into_disconnected()); } - self.credit_messages.remove(p); self.behaviour.inject_disconnected(p) } @@ -413,11 +461,14 @@ where | RequestResponseMessage::Response { request_id, response } => match &response.header().typ { | Some(Type::Ack) => { - if let Some(id) = self.credit_messages.get(&peer).map(|c| c.id) { - if Some(id) == response.header().ident { - log::trace!("{:08x}: received ack {} from {}", self.id, id, peer); - self.credit_messages.remove(&peer); + if let Some(info) = self.peer_info.get_mut(&peer) { + if let Some(id) = info.recv_budget.grant.as_ref().map(|c| c.id) { + if Some(id) == response.header().ident { + log::trace!("{:08x}: received ack {} from {}", self.id, id, peer); + info.recv_budget.grant = None; + } } + info.recv_budget.sent.remove(&request_id); } continue } @@ -464,15 +515,23 @@ where id, peer }; - if info.send_budget_id < Some(id) { - if info.send_budget == 0 && credit > 0 { + if info.send_budget.grant < Some(id) { + if info.send_budget.remaining == 0 && credit > 0 { log::trace!("{:08x}: sending to peer {} can resume", self.id, peer); self.events.push_back(Event::ResumeSending(peer.clone())) } - info.send_budget += credit; - info.send_budget_id = Some(id) + info.send_budget.remaining += credit; + info.send_budget.grant = Some(id); + } + match self.behaviour.send_response(channel, Message::ack(id)) { + Err(_) => log::debug! { + "{:08x}: Failed to send ack for credit grant {}.", + self.id, id + }, + Ok(()) => { + info.send_budget.received.insert(request_id); + } } - self.behaviour.send_response(channel, Message::ack(id)) } continue } @@ -481,18 +540,18 @@ where log::trace! { "{:08x}: received request {} (recv. budget = {})", self.id, request_id, - info.recv_budget + info.recv_budget.remaining }; - if info.recv_budget == 0 { + if info.recv_budget.remaining == 0 { log::debug!("{:08x}: peer {} exceeds its budget", self.id, peer); self.events.push_back(Event::TooManyInboundRequests(peer.clone())); continue } - info.recv_budget -= 1; + info.recv_budget.remaining -= 1; // We consider a request as proof that our credit grant has // reached the peer. Usually, an ACK has already been // received. - self.credit_messages.remove(&peer); + info.recv_budget.grant = None; } if let Some(rq) = request.into_parts().1 { RequestResponseMessage::Request { request_id, request: rq, channel } @@ -524,16 +583,25 @@ where request_id, error }) => { - if let Some(credit) = self.credit_messages.get_mut(&peer) { - if credit.request == request_id { - log::debug! { "{:08x}: failed to send {} as credit {} to {}; retrying...", - self.id, - credit.amount, - credit.id, - peer - }; - let msg = Message::credit(credit.amount, credit.id); - credit.request = self.behaviour.send_request(&peer, msg) + if let Some(info) = self.peer_info.get_mut(&peer) { + if let Some(grant) = info.recv_budget.grant.as_mut() { + if grant.request == request_id { + log::debug! { + "{:08x}: failed to send {} as credit {} to {}; retrying...", + self.id, + grant.credit, + grant.id, + peer + }; + let msg = Message::credit(grant.credit, grant.id); + grant.request = self.behaviour.send_request(&peer, msg); + } + } + + // If the outbound failure was for a credit message, don't report it on + // the public API and retry the sending. + if info.recv_budget.sent.remove(&request_id) { + continue } } let event = RequestResponseEvent::OutboundFailure { peer, request_id, error }; @@ -544,9 +612,39 @@ where request_id, error }) => { + // If the inbound failure occurred in the context of responding to a + // credit grant, don't report it on the public API. + if let Some(info) = self.peer_info.get_mut(&peer) { + if info.send_budget.received.remove(&request_id) { + log::debug! { + "{:08}: failed to acknowledge credit grant from {}: {:?}", + self.id, peer, error + }; + continue + } + } let event = RequestResponseEvent::InboundFailure { peer, request_id, error }; NetworkBehaviourAction::GenerateEvent(Event::Event(event)) } + | NetworkBehaviourAction::GenerateEvent(RequestResponseEvent::ResponseSent { + peer, + request_id + }) => { + // If this event is for an ACK response that was sent for + // the last received credit grant, skip it. + if let Some(info) = self.peer_info.get_mut(&peer) { + if info.send_budget.received.remove(&request_id) { + log::trace! { + "{:08}: successfully sent ACK for credit grant {:?}.", + self.id, + info.send_budget.grant, + } + continue + } + } + NetworkBehaviourAction::GenerateEvent(Event::Event( + RequestResponseEvent::ResponseSent { peer, request_id })) + } | NetworkBehaviourAction::DialAddress { address } => NetworkBehaviourAction::DialAddress { address }, | NetworkBehaviourAction::DialPeer { peer_id, condition } => diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 9aa7f093300..de4d1983689 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -77,8 +77,13 @@ fn ping_protocol() { } => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); - swarm1.send_response(channel, pong.clone()); + swarm1.send_response(channel, pong.clone()).unwrap(); }, + RequestResponseEvent::ResponseSent { + peer, .. + } => { + assert_eq!(&peer, &peer2_id); + } e => panic!("Peer1: Unexpected event: {:?}", e) } } @@ -159,8 +164,13 @@ fn ping_protocol_throttled() { }) => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); - swarm1.send_response(channel, pong.clone()); + swarm1.send_response(channel, pong.clone()).unwrap(); }, + throttled::Event::Event(RequestResponseEvent::ResponseSent { + peer, .. + }) => { + assert_eq!(&peer, &peer2_id); + } e => panic!("Peer1: Unexpected event: {:?}", e) } if i % 31 == 0 { diff --git a/protocols/secio/CHANGELOG.md b/protocols/secio/CHANGELOG.md index 3c24d7664c2..dc13b13681d 100644 --- a/protocols/secio/CHANGELOG.md +++ b/protocols/secio/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/protocols/secio/Cargo.toml b/protocols/secio/Cargo.toml index fef7ae16f55..55e0090a3e3 100644 --- a/protocols/secio/Cargo.toml +++ b/protocols/secio/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-secio" edition = "2018" description = "Secio encryption protocol for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -19,7 +19,7 @@ ctr = "0.3" futures = "0.3.1" hmac = "0.9.0" lazy_static = "1.2.0" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.6" prost = "0.6.1" pin-project = "1.0.0" diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index c0437d9eafc..71b4c720642 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.1 [2020-11-26] - Add `ExpandedSwarm::is_connected`. diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 7c82ff67d67..5628b216c02 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-swarm" edition = "2018" description = "The libp2p swarm" -version = "0.25.1" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] either = "1.6.0" futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../core" } +libp2p-core = { version = "0.26.0", path = "../core" } log = "0.4" rand = "0.7" smallvec = "1.0" diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index af69d4c0d62..af417160c7e 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 405edfe2d07..91cc136b422 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-dns" edition = "2018" description = "DNS transport implementation for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -10,6 +10,6 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.1" futures = "0.3.1" diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index cf32e603aa9..6e57124e617 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `async-io`. + # 0.25.1 [2020-11-26] - Lower `async-std` version to `1.6`, for compatibility diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 9717c394e96..4ccf5beb50f 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-tcp" edition = "2018" description = "TCP/IP transport protocol for libp2p" -version = "0.25.1" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,7 +15,7 @@ futures = "0.3.1" futures-timer = "3.0" if-addrs = "0.6.4" ipnet = "2.0.0" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.1" socket2 = { version = "0.3.12" } tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index f9df580ec3c..0bf8608c396 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index 825ecafc470..a669c7d164f 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-uds" edition = "2018" description = "Unix domain sockets transport for libp2p" -version = "0.25.0" +version = "0.26.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"] [target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] async-std = { version = "1.6.2", optional = true } -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.1" futures = "0.3.1" tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md index 03429ba533c..3361551283f 100644 --- a/transports/wasm-ext/CHANGELOG.md +++ b/transports/wasm-ext/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.26.0 [unreleased] + +- Update `libp2p-core`. + # 0.25.0 [2020-11-25] - Update `libp2p-core`. diff --git a/transports/wasm-ext/Cargo.toml b/transports/wasm-ext/Cargo.toml index c21d15a68b6..077a5f44039 100644 --- a/transports/wasm-ext/Cargo.toml +++ b/transports/wasm-ext/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-wasm-ext" -version = "0.25.0" +version = "0.26.0" authors = ["Pierre Krieger "] edition = "2018" description = "Allows passing in an external transport in a WASM environment" @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" js-sys = "0.3.19" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } parity-send-wrapper = "0.1.0" wasm-bindgen = "0.2.42" wasm-bindgen-futures = "0.4.4" diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index f388f7acde1..e2223698d1a 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,19 @@ +# 0.27.0 [unreleased] + +- Update `libp2p-core`. + +# 0.26.3 [2020-12-10] + +- Update `async-tls`. + +# 0.26.2 [2020-12-09] + +- Update minimum patch version for `async-tls`. + +# 0.26.1 [2020-12-07] + +- Update `rustls`. + # 0.26.0 [2020-11-25] - Update dependencies. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index dfd1a471270..385798d287f 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-websocket" edition = "2018" description = "WebSocket transport for libp2p" -version = "0.26.0" +version = "0.27.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -10,13 +10,13 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-tls = "0.10.0" +async-tls = "0.11.0" either = "1.5.3" futures = "0.3.1" -libp2p-core = { version = "0.25.0", path = "../../core" } +libp2p-core = { version = "0.26.0", path = "../../core" } log = "0.4.8" quicksink = "0.1" -rustls = "0.18.0" +rustls = "0.19.0" rw-stream-sink = "0.2.0" soketto = { version = "0.4.1", features = ["deflate"] } url = "2.1" diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index a46cffa1b78..7ffdd057a38 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -168,4 +168,3 @@ impl From for Error { Error::Io(e) } } -