Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
141 commits
Select commit Hold shift + click to select a range
aeba523
Gossipsub message signing
AgeManning May 19, 2020
573e4b6
Update ipfs-private example
AgeManning May 19, 2020
d2babd4
Add optimisation to prevent key calculation each message
AgeManning May 22, 2020
8353474
Support multiple versions
AgeManning May 22, 2020
b7db121
Initial porting of peer_scoring from go-libp2p
AgeManning May 22, 2020
ac3c977
Handle forwarding of signed messages and reviewers comments
AgeManning May 25, 2020
f3ff66b
Merge latest gossipsub signing
AgeManning May 25, 2020
c8335cc
Progress to porting scoring functions
AgeManning May 25, 2020
e9afd95
Merge branch 'master' into gossipsub-signing
AgeManning May 27, 2020
984a662
Finish first port of score params
AgeManning May 27, 2020
80ea7df
Add defaults to scoring
AgeManning May 27, 2020
58f61b4
Generalise topic hashing
AgeManning May 27, 2020
238e305
Update topic hashing structure
AgeManning May 27, 2020
fec112a
Update v1.1 tests
AgeManning May 27, 2020
bc09eb7
Add more unit tests
AgeManning May 27, 2020
3d1a025
protocols/gossipsub/src/protocol: Use quickcheck to test signing (#31)
mxinden Jun 1, 2020
31b6849
Shift signing into behaviour
AgeManning Jun 1, 2020
a8cec0b
Merge latest master
AgeManning Jul 1, 2020
2b38112
Address reviewers suggestions
AgeManning Jul 1, 2020
7d678f5
Send subscriptions to all peers
AgeManning Jul 1, 2020
dc050dd
Update examples
AgeManning Jul 1, 2020
5bf1cfc
Shift signing option into the behaviour
AgeManning Jul 1, 2020
4ffe728
Revert changes to core/
AgeManning Jul 9, 2020
24b738c
Reviewers suggestion
AgeManning Jul 9, 2020
a2e4bf0
Merge remote-tracking branch 'origin/master' into gossipsub-signing
AgeManning Jul 9, 2020
2f20571
Merge latest master
AgeManning Jul 23, 2020
2c50ba2
Revert changes to core/
AgeManning Jul 23, 2020
4de878e
Switch gossipsub state to use sets instead of vectors
rklaehn Jul 23, 2020
e43f0a0
Make tests pass
rklaehn Jul 23, 2020
57f07aa
Some clippy
rklaehn Jul 23, 2020
b6eeb3c
Prevent duplicate finding during JOIN
AgeManning Jul 24, 2020
0d5bde4
Merge #1675
AgeManning Jul 24, 2020
aec51e0
Update tests, improve logging
AgeManning Jul 24, 2020
759a5ad
Update the default d_lo constant
AgeManning Jul 24, 2020
495ffe4
Handle errors correctly
AgeManning Jul 24, 2020
1ea2dd9
Add privacy and validation options in the config
AgeManning Jul 24, 2020
8e69465
Merge branch 'master' into gossipsub-v1.1
blacktemplar Jul 24, 2020
1f73ded
A few improvements (#32)
rklaehn Jul 24, 2020
7b76c8f
Improve signing validation logic
AgeManning Jul 24, 2020
f5d32b2
Change the gossipsub rpc protocol to use bytes for message ids (#34)
rklaehn Jul 25, 2020
c3a7757
Add optional privacy and validation settings
AgeManning Jul 26, 2020
0c43ed4
Correct doc link
AgeManning Jul 27, 2020
efa40e3
Prevent invalid messages from being gossiped
AgeManning Jul 27, 2020
a514c60
Remove unvalidated messages from gossip. Reintroduce duplicate cache
AgeManning Jul 27, 2020
b257025
Send grafts when subscriptions are added to the mesh
AgeManning Jul 28, 2020
b20771b
Merge latest master
AgeManning Jul 28, 2020
cacadc0
Merge gossipsub-signing
AgeManning Jul 28, 2020
6566925
implement explicit peers
blacktemplar Jul 28, 2020
81338d5
Merge branch 'gossipsub-v1.1' into gossipsub-v1.1-explicit-peers
blacktemplar Jul 28, 2020
ecb2d8b
Fix ipfs-kad example
AgeManning Jul 28, 2020
2a830b9
Merge branch 'gossipsub-v1.1' into gossipsub-v1.1-explicit-peers
blacktemplar Jul 28, 2020
b78d771
finish merging + rustfmt
blacktemplar Jul 28, 2020
8bffa91
undo changes in ipfs-private
blacktemplar Jul 28, 2020
2f4a50a
Only add messages to memcache if not duplicates
AgeManning Jul 28, 2020
99a37bb
Merge latest signing
AgeManning Jul 28, 2020
284bf1c
Merge pull request #36 from blacktemplar/gossipsub-v1.1-explicit-peers
blacktemplar Jul 28, 2020
be7c9e2
Add mesh maintenance tests and remove excess peers from mesh
AgeManning Jul 29, 2020
e049ff7
Apply reviewers suggestions
AgeManning Jul 29, 2020
74d2779
Merge latest master
AgeManning Jul 29, 2020
81fca56
Merge latest gossipsub-signing
AgeManning Jul 29, 2020
6bac865
Wrap comments
AgeManning Jul 29, 2020
6e0a6b3
implement px and prune backoff + simplify tests for explicit peers
blacktemplar Jul 29, 2020
1257639
Merge pull request #37 from sigp/gossipsub-v1.1-prune-backoff-and-px
blacktemplar Jul 29, 2020
6c77022
improved data structure for storing backoffs + adding configurable sl…
blacktemplar Jul 30, 2020
8b17725
Merge pull request #38 from sigp/gossipsub-v1.1-prune-backoff-and-px
blacktemplar Jul 30, 2020
2518f9f
allow flood publishing (on by default)
blacktemplar Jul 30, 2020
1994c88
Merge pull request #39 from sigp/gossipsub-v1.1-flood-publishing
blacktemplar Jul 30, 2020
dbe0a84
implement adaptive gossip dissemination + complete config debug output
blacktemplar Jul 30, 2020
7155f5c
Merge pull request #40 from sigp/gossipsub-v1.1-adaptive-gossip-disse…
blacktemplar Jul 31, 2020
da34368
fix bug in backoff storage data structure
blacktemplar Jul 31, 2020
66730dd
make GossipsubConfig fields private + add getters + improve validatio…
blacktemplar Jul 31, 2020
8a0b278
implements a configurable minimum number of mesh peers that are outbound
blacktemplar Jul 31, 2020
4e1375a
Merge pull request #41 from sigp/gossipsub-v1.1-outbound-mesh-quota
blacktemplar Aug 1, 2020
94469cf
Ensure sequence number is sent
AgeManning Aug 2, 2020
60f8a1e
Merge latest master
AgeManning Aug 2, 2020
fef66dd
Maintain the debug trait
AgeManning Aug 2, 2020
799137c
use the score module in the behaviour, calling the score update metho…
blacktemplar Aug 6, 2020
efb59ac
Merge pull request #42 from sigp/gossipsub-v1.1-score-wiring
blacktemplar Aug 6, 2020
b6fbf05
implement opportunistic grafting
blacktemplar Aug 6, 2020
189ffc4
remove done TODO
blacktemplar Aug 6, 2020
f405d2a
remove done TODO
blacktemplar Aug 6, 2020
6fe395a
cargo fmt
blacktemplar Aug 6, 2020
47855d6
Merge branch 'gossipsub-v1.1' into gossip-v1.1-opportunistic-grafting
blacktemplar Aug 6, 2020
5954403
ignore grafts in unknown topics
blacktemplar Aug 6, 2020
a228f3b
Merge pull request #43 from sigp/gossip-v1.1-opportunistic-grafting
blacktemplar Aug 6, 2020
3d69944
Merge branch 'gossipsub-v1.1' into gossipsub-v1.1-spam-protection-mea…
blacktemplar Aug 6, 2020
afba384
cap the number of gossipsub retransmission for the iwants of the same…
blacktemplar Aug 6, 2020
223f0d8
cap the number of processed ihaves per peer per heartbeat, plus the m…
blacktemplar Aug 7, 2020
1ced56e
Replace lru cache with custom duplicatecache
AgeManning Aug 7, 2020
27ac942
track iwant messages we send and penalize peers not responding to them
blacktemplar Aug 7, 2020
04d3e2f
removes todo
blacktemplar Aug 7, 2020
5c52843
Improve stream management and error handling
AgeManning Aug 7, 2020
a135fc0
Merge pull request #44 from sigp/gossipsub-v1.1-spam-protection-measures
blacktemplar Aug 7, 2020
d9ee8e4
Merge remote-tracking branch 'origin/gossipsub-signing' into gossipsu…
blacktemplar Aug 7, 2020
714fee3
Merge branch 'master' into gossipsub-v1.1
blacktemplar Aug 7, 2020
7f7fd17
cargo fmt
blacktemplar Aug 7, 2020
ee7dd71
Gossipsub v1.1 check fmt (#45)
blacktemplar Aug 7, 2020
5109fe7
Merge duplicate cache
AgeManning Aug 10, 2020
b748618
Merge substream updates
AgeManning Aug 10, 2020
7bdb378
Propagate signature verification and peer kinds to behaviour (#46)
AgeManning Aug 12, 2020
83280db
treat peers that do not support the protocol as disconnected (#48)
blacktemplar Aug 12, 2020
5d93c0a
Gossipsub v1.1 various improvements (#49)
blacktemplar Aug 17, 2020
3206da5
Gossipsub 1.1 ammendments (#50)
AgeManning Aug 24, 2020
59034e2
Complete porting of go scoring tests (#51)
AgeManning Aug 25, 2020
521ecdb
Add peer blacklisting (#52)
AgeManning Aug 25, 2020
261f78e
Estimate message size (#53)
AgeManning Aug 26, 2020
f71a1bb
Message fragmentation (#54)
AgeManning Aug 27, 2020
c762d6c
Correct tests and merge latest master (#55)
AgeManning Aug 27, 2020
8ec6b66
Merge latest master
AgeManning Aug 27, 2020
4f92599
Remove github workflow
AgeManning Aug 27, 2020
618334a
Cleanup code add logs (#56)
AgeManning Aug 28, 2020
9a03f94
Further improve logging (#57)
AgeManning Aug 28, 2020
e031bce
Remove lru_time_cache_dep (#58)
AgeManning Aug 30, 2020
c7f0716
Various improvements (#60)
blacktemplar Sep 25, 2020
9da2af7
Cache published message ids (#61)
blacktemplar Oct 2, 2020
464c4da
use generic type for message data + cache message ids (#67)
blacktemplar Oct 8, 2020
88ee95e
Adds a fast message id function and an additional cache for it (#68)
blacktemplar Oct 9, 2020
238e5cf
Merge branch 'master' into gossipsub-v1.1
blacktemplar Oct 9, 2020
6ee2c5f
export FastMessageId
blacktemplar Oct 9, 2020
4c95f86
Gossipsub v1.1 bug fixes (#71)
blacktemplar Oct 14, 2020
c2c4a26
add subscription filters + some useful default filters + refactor tes…
blacktemplar Oct 14, 2020
201d533
remove support for multi-topic messages (#73)
blacktemplar Oct 20, 2020
87633a3
also consider allow self origin when using a published message ids di…
blacktemplar Oct 21, 2020
0e28cba
fix clippy warnings (#83)
blacktemplar Oct 21, 2020
9ba9b8a
Merge branch 'master' into gossipsub-v1.1
blacktemplar Oct 22, 2020
6bb58ec
Merge latest master
AgeManning Nov 3, 2020
7a316e8
Merge latest master
AgeManning Nov 6, 2020
d306455
Address reviewers comments (#88)
AgeManning Nov 8, 2020
2d1170b
implement as_ref and as_static_ref to get a &str from PeerKind (#91)
blacktemplar Nov 16, 2020
8bdc378
Merge latest master
AgeManning Nov 17, 2020
34f58fb
Update to latest master
AgeManning Nov 17, 2020
5157e70
only respond to IWANT if message got already validated (#96)
blacktemplar Nov 26, 2020
82be02d
protocols/gossipsub: Review (#93)
mxinden Dec 7, 2020
ffee445
Merge latest master
AgeManning Dec 7, 2020
23d7d04
Add optional compression to gossipsub (#100)
AgeManning Dec 16, 2020
1b5a35e
Remove feature gating from gossipsub
AgeManning Dec 16, 2020
1b19338
Merge latest master
AgeManning Dec 17, 2020
a15b246
Remove compression in favour of a more general data transform
AgeManning Dec 17, 2020
4cca4d3
Loosen trait bounds on gossipsub
AgeManning Dec 18, 2020
88ed4e5
Merge latest master
AgeManning Jan 6, 2021
f47ae78
Fix doclinks
AgeManning Jan 6, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
294 changes: 200 additions & 94 deletions protocols/gossipsub/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,29 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.

use crate::config::{GossipsubConfig, ValidationMode};
use crate::error::PublishError;
use crate::handler::GossipsubHandler;
use crate::mcache::MessageCache;
use crate::protocol::{
GossipsubControlAction, GossipsubMessage, GossipsubSubscription, GossipsubSubscriptionAction,
MessageId, PeerInfo, SIGNING_PREFIX,
};
use crate::rpc_proto;
use crate::topic::{Hasher, Topic, TopicHash};
use futures::StreamExt;
use libp2p_core::{
connection::ConnectionId, identity::error::SigningError, identity::Keypair, Multiaddr, PeerId,
};
use libp2p_swarm::{
DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters,
ProtocolsHandler,
};
use log::{debug, error, info, trace, warn};
use lru_time_cache::LruCache;
use prost::Message;
use rand;
use rand::{seq::SliceRandom, thread_rng};
use std::collections::hash_map::Entry;
use std::iter::FromIterator;
use std::time::Duration;
Expand All @@ -52,13 +29,35 @@ use std::{
sync::Arc,
task::{Context, Poll},
};

use futures::StreamExt;
use log::{debug, error, info, trace, warn};
use lru_time_cache::LruCache;
use prost::Message;
use rand;
use rand::{seq::SliceRandom, thread_rng};
use wasm_timer::{Instant, Interval};

mod tests;
use libp2p_core::{
connection::ConnectionId, identity::error::SigningError, identity::Keypair, Multiaddr, PeerId,
};
use libp2p_swarm::{
DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters,
ProtocolsHandler,
};

use crate::config::{GossipsubConfig, ValidationMode};
use crate::error::PublishError;
use crate::handler::GossipsubHandler;
use crate::mcache::MessageCache;
use crate::protocol::{
GossipsubControlAction, GossipsubMessage, GossipsubSubscription, GossipsubSubscriptionAction,
MessageId, PeerInfo, SIGNING_PREFIX,
};
use crate::rpc_proto;
use crate::topic::{Hasher, Topic, TopicHash};

/// The number of heartbeat ticks until we clean up the backoff dictionary. Does only affect
/// memory performance and nothing external.
const BACKOFF_CLEAN_UP_TICKS: u64 = 15;
mod tests;

/// Determines if published messages should be signed or not.
///
Expand Down Expand Up @@ -148,6 +147,156 @@ impl From<MessageAuthenticity> for PublishConfig {
}
}

/// Stores backoffs in an efficient manner
struct BackoffStorage {
///stores backoffs and the index in backoffs_by_heartbeat per peer per topic
backoffs: HashMap<TopicHash, HashMap<PeerId, (Instant, usize)>>,
/// stores peer topic pairs per heartbeat (this is cyclic the current index is heartbeat_index)
backoffs_by_heartbeat: Vec<HashSet<(TopicHash, PeerId)>>,
/// the index in the backoffs_by_heartbeat vector corresponding to the current heartbeat
heartbeat_index: usize,
/// the heartbeat interval duration from the config
heartbeat_interval: Duration,
/// backoff_slack from config
backoff_slack: u32,
}

impl BackoffStorage {
fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize {
((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos())
as usize
}

pub fn new(
prune_backoff: &Duration,
heartbeat_interval: Duration,
backoff_slack: u32,
) -> BackoffStorage {
//we add one additional slot for partial heartbeat
let max_heartbeats =
Self::heartbeats(prune_backoff, &heartbeat_interval) + backoff_slack as usize + 1;
BackoffStorage {
backoffs: HashMap::new(),
backoffs_by_heartbeat: vec![HashSet::new(); max_heartbeats],
heartbeat_index: 0,
heartbeat_interval,
backoff_slack,
}
}

/// Updates the backoff for a peer (if there is already a more restrictive backup this call
/// doesn't change anything)
pub fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) {
let instant = Instant::now() + time;
let insert_into_backoffs_by_heartbeat =
|heartbeat_index: usize,
backoffs_by_heartbeat: &mut Vec<HashSet<_>>,
heartbeat_interval,
backoff_slack| {
let pair = (topic.clone(), peer.clone());
let index = (heartbeat_index
+ Self::heartbeats(&time, heartbeat_interval)
+ backoff_slack as usize)
% backoffs_by_heartbeat.len();
backoffs_by_heartbeat[index].insert(pair);
index
};
match self
.backoffs
.entry(topic.clone())
.or_insert_with(HashMap::new)
.entry(peer.clone())
{
Entry::Occupied(mut o) => {
let &(backoff, index) = o.get();
if backoff < instant {
let pair = (topic.clone(), peer.clone());
if let Some(s) = self.backoffs_by_heartbeat.get_mut(index) {
s.remove(&pair);
}
let index = insert_into_backoffs_by_heartbeat(
self.heartbeat_index,
&mut self.backoffs_by_heartbeat,
&self.heartbeat_interval,
self.backoff_slack,
);
o.insert((instant, index));
}
}
Entry::Vacant(v) => {
let index = insert_into_backoffs_by_heartbeat(
self.heartbeat_index,
&mut self.backoffs_by_heartbeat,
&self.heartbeat_interval,
self.backoff_slack,
);
v.insert((instant, index));
}
};
}

/// Checks if a given peer is backoffed for the given topic. This method respects the
/// configured BACKOFF_SLACK and may return true even if the backup is already over.
/// It is guaranteed to return false if the backoff is not over and eventually if enough time
/// passed true if the backoff is over.
///
/// This method should be used for deciding if we can already send a GRAFT to a previously
/// backoffed peer.
pub fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool {
self.backoffs
.get(topic)
.map_or(false, |m| m.contains_key(peer))
}

/// Checks if a given peer is backoffed for the given topic. This method ignores BACKOFF_SLACK.
/// This method should be used for deciding if an incoming GRAFT is allowed.
/// This method returns true exactly if the backoff time is over.
pub fn is_backoff(&self, topic: &TopicHash, peer: &PeerId) -> bool {
Self::is_backoff_from_backoffs(&self.backoffs, topic, peer, Duration::from_secs(0))
}

fn is_backoff_from_backoffs(
backoffs: &HashMap<TopicHash, HashMap<PeerId, (Instant, usize)>>,
topic: &TopicHash,
peer: &PeerId,
slack: Duration,
) -> bool {
backoffs.get(topic).map_or(false, |m| {
m.get(peer)
.map_or(false, |(i, _)| *i + slack > Instant::now())
})
}

/// Applies a heartbeat. That should be called regularly in intervals of length
/// `heartbeat_interval`.
///
/// TODO: Should we use an own instance of `wasm_timer::Interval` with our own interval length
/// to not rely on regular heartbeat calls?
pub fn heartbeat(&mut self) {
//clean up backoffs_by_heartbeat
if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index) {
let backoffs = &mut self.backoffs;
let slack = self.heartbeat_interval * self.backoff_slack;
s.retain(|(topic, peer)| {
let keep = Self::is_backoff_from_backoffs(backoffs, topic, peer, slack);
if !keep {
//remove from backoffs
if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) {
if m.get_mut().remove(peer).is_some() && m.get().is_empty() {
m.remove();
}
}
}

keep
});
}

//increase heartbeat index
self.heartbeat_index += 1;
}
}

/// Network behaviour that handles the gossipsub protocol.
///
/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`GossipsubConfig`] instance. If message signing is
Expand Down Expand Up @@ -188,9 +337,8 @@ pub struct Gossipsub {
/// The last publish time for fanout topics.
fanout_last_pub: HashMap<TopicHash, Instant>,

/// The backoff times for each topic and peer. We do not graft to peers for some topic until
/// the given backoff instant. Values in the past get cleaned up regularly.
backoff: HashMap<TopicHash, HashMap<PeerId, Instant>>,
///Storage for backoffs
backoffs: BackoffStorage,

/// Message cache for the last few heartbeats.
mcache: MessageCache,
Expand Down Expand Up @@ -225,7 +373,11 @@ impl Gossipsub {
mesh: HashMap::new(),
fanout: HashMap::new(),
fanout_last_pub: HashMap::new(),
backoff: HashMap::new(),
backoffs: BackoffStorage::new(
&config.prune_backoff,
config.heartbeat_interval,
config.backoff_slack,
),
mcache: MessageCache::new(
config.history_gossip,
config.history_length,
Expand Down Expand Up @@ -548,12 +700,8 @@ impl Gossipsub {
};

//update backoff
Self::add_backoff(
&mut self.backoff,
peer,
topic_hash,
self.config.prune_backoff,
);
self.backoffs
.update_backoff(topic_hash, peer, self.config.prune_backoff);

GossipsubControlAction::Prune {
topic_hash: topic_hash.clone(),
Expand Down Expand Up @@ -659,29 +807,6 @@ impl Gossipsub {
debug!("Completed IWANT handling for peer: {:?}", peer_id);
}

fn add_backoff(
backoff: &mut HashMap<TopicHash, HashMap<PeerId, Instant>>,
peer_id: &PeerId,
topic: &TopicHash,
time: Duration,
) {
let instant = Instant::now() + time;
match backoff
.entry(topic.clone())
.or_insert_with(HashMap::new)
.entry(peer_id.clone())
{
Entry::Occupied(mut o) => {
if o.get() < &instant {
o.insert(instant);
}
}
Entry::Vacant(v) => {
v.insert(instant);
}
}
}

/// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not,
/// responds with PRUNE messages.
fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec<TopicHash>) {
Expand All @@ -701,22 +826,16 @@ impl Gossipsub {
} else {
for topic_hash in topics {
// make sure we are not backing off that peer
if let Some(backoff_time) = self
.backoff
.get(&topic_hash)
.and_then(|peers| peers.get(peer_id))
{
if backoff_time > &Instant::now() {
debug!("GRAFT: ignoring backed off peer {:?}", peer_id);
//TODO add penalty
//no PX
do_px = false;
//TODO extra penalty if the graft is coming too fast (see
// GossipSubGraftFloodThreshold)

to_prune_topics.insert(topic_hash.clone());
continue;
}
if self.backoffs.is_backoff(&topic_hash, peer_id) {
debug!("GRAFT: ignoring backed off peer {:?}", peer_id);
//TODO add penalty
//no PX
do_px = false;
//TODO extra penalty if the graft is coming too fast (see
// GossipSubGraftFloodThreshold)

to_prune_topics.insert(topic_hash.clone());
continue;
}

//TODO check score of peer
Expand Down Expand Up @@ -780,10 +899,9 @@ impl Gossipsub {
}

// is there a backoff specified by the peer? if so obey it.
Self::add_backoff(
&mut self.backoff,
peer_id,
self.backoffs.update_backoff(
&topic_hash,
peer_id,
if let Some(backoff) = backoff {
Duration::from_secs(backoff)
} else {
Expand Down Expand Up @@ -996,12 +1114,6 @@ impl Gossipsub {
);
}

fn do_backoff(backoff_peers: Option<&HashMap<PeerId, Instant>>, peer: &PeerId) -> bool {
//We do not consider the instants here, as long as there is an entry we backoff
//In the heartbeat we clean up to backoff map to stay up to date.
backoff_peers.map_or(false, |m| m.contains_key(peer))
}

/// Heartbeat function which shifts the memcache and updates the mesh.
fn heartbeat(&mut self) {
debug!("Starting heartbeat");
Expand All @@ -1012,13 +1124,7 @@ impl Gossipsub {
let mut to_prune = HashMap::new();

//clean up expired backoffs
if self.heartbeat_ticks % BACKOFF_CLEAN_UP_TICKS == 0 {
let cutoff = Instant::now();
self.backoff.retain(|_, m| {
m.retain(|_, backoff| &*backoff > &cutoff);
!m.is_empty()
});
}
self.backoffs.heartbeat();

//check connections to explicit peers
if self.heartbeat_ticks % self.config.check_explicit_peers_ticks == 0 {
Expand All @@ -1040,13 +1146,13 @@ impl Gossipsub {
);
// not enough peers - get mesh_n - current_length more
let desired_peers = self.config.mesh_n - peers.len();
let backoff_peers = self.backoff.get(topic_hash);
let backoffs = &self.backoffs;
let peer_list =
Self::get_random_peers(&self.topic_peers, topic_hash, desired_peers, {
|peer| {
!peers.contains(peer)
&& !explicit_peers.contains(peer)
&& !Self::do_backoff(backoff_peers, peer)
&& !backoffs.is_backoff_with_slack(topic_hash, peer)
}
});
for peer in &peer_list {
Expand Down
Loading