Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
6e38b7e
Move bootnodes from individual `SetConfig`s to `PeersetConfig`
dmitry-markin Jun 6, 2023
dae075d
Move `SetId` & `SetConfig` from `peerset` to `protocol_controller`
dmitry-markin Jun 6, 2023
5a4f6a4
Remove unused `DropReason`
dmitry-markin Jun 6, 2023
7acf0e3
Move `Message` & `IncomingIndex` from `peerset` to `protocol_controller`
dmitry-markin Jun 6, 2023
037ead4
Restore running fuzz test
dmitry-markin Jun 6, 2023
efba474
Get rid of `Peerset` in `fuzz` test
dmitry-markin Jun 6, 2023
911d26f
Spawn runners instead of manual polling in `fuzz` test
dmitry-markin Jun 7, 2023
25b63cd
Migrate `Protocol` from `Peerset` to `PeerStore` & `ProtocolController`
dmitry-markin Jun 7, 2023
1fbff99
Migrate `NetworkService` from `Peerset` to `PeerStore` & `ProtocolCon…
dmitry-markin Jun 7, 2023
8923fb5
Migrate `Notifications` from `Peerset` to `ProtocolController`s
dmitry-markin Jun 8, 2023
1350193
Migrate `Notifications` tests from `Peerset` to `ProtocolController`
dmitry-markin Jun 8, 2023
e0cae7e
Fix compilation of `NetworkService` & `Protocol`
dmitry-markin Jun 8, 2023
10ec8b4
Fix borrowing issues in `Notifications`
dmitry-markin Jun 8, 2023
b92ed07
Migrate `RequestResponse`from `Peerset` to `PeerStore`
dmitry-markin Jun 8, 2023
b265eb8
rustfmt
dmitry-markin Jun 8, 2023
5339b2e
Migrate request-response tests from `Peerset` to `PeerStore`
dmitry-markin Jun 8, 2023
71ca009
Migrate `reconnect_after_disconnect` test to `PeerStore` & `ProtocolC…
dmitry-markin Jun 8, 2023
8b64997
Fix `Notifications` tests
dmitry-markin Jun 8, 2023
eb2693e
Remove `Peerset` completely
dmitry-markin Jun 8, 2023
cb3b12c
Fix bug with counting sync peers in `Protocol`
dmitry-markin Jun 9, 2023
649b6c1
Eliminate indirect calls to `PeerStore` via `Protocol`
dmitry-markin Jun 9, 2023
094ffe5
Eliminate indirect calls to `ProtocolController` via `Protocol`
dmitry-markin Jun 9, 2023
dd77fba
Handle `Err` outcome from `remove_peers_from_reserved_set`
dmitry-markin Jun 9, 2023
5cddda0
Add note about disconnecting sync peers in `Protocol`
dmitry-markin Jun 9, 2023
84fb33d
minor: remove unneeded `clone()`
dmitry-markin Jun 9, 2023
7588e32
minor: extra comma removed
dmitry-markin Jun 9, 2023
7a84efc
minor: use `Stream` API of `from_protocol_controllers` channel
dmitry-markin Jun 9, 2023
7b39689
minor: remove TODO
dmitry-markin Jun 9, 2023
a9b65dc
minor: replace `.map().flatten()` with `.flat_map()`
dmitry-markin Jun 9, 2023
ac22673
minor: update `ProtocolController` docs
dmitry-markin Jun 9, 2023
c637260
rustfmt
dmitry-markin Jun 9, 2023
ab05cf0
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jun 12, 2023
6f17714
Apply suggestions from code review
dmitry-markin Jun 13, 2023
b5ec102
Extract `MockPeerStore` to `mock.rs`
dmitry-markin Jun 13, 2023
1e89b2a
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jun 14, 2023
0520230
Move `PeerStore` initialization to `build_network`
dmitry-markin Jun 14, 2023
e46fa49
minor: remove unused import
dmitry-markin Jun 14, 2023
2d336a5
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jun 19, 2023
02334a8
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jul 21, 2023
bbea1d4
minor: clarify error message
dmitry-markin Jul 24, 2023
c516194
Convert `syncs_header_only_forks` test into single-threaded
dmitry-markin Jul 24, 2023
3c37275
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jul 24, 2023
78f8884
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Jul 26, 2023
70d5a11
Merge remote-tracking branch 'origin/master' into dm-get-rid-of-peers…
dmitry-markin Aug 2, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Move bootnodes from individual SetConfigs to PeersetConfig
  • Loading branch information
dmitry-markin committed Jun 6, 2023
commit 6e38b7ed8361aa32b9ee98398234478047c1b69e
11 changes: 3 additions & 8 deletions client/network/src/peerset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,8 @@ impl From<u64> for IncomingIndex {
/// Configuration to pass when creating the peer set manager.
#[derive(Debug)]
pub struct PeersetConfig {
/// Bootnodes.
pub bootnodes: Vec<PeerId>,
/// List of sets of nodes the peerset manages.
pub sets: Vec<SetConfig>,
}
Expand All @@ -205,12 +207,6 @@ pub struct SetConfig {
/// Maximum number of outgoing links to peers.
pub out_peers: u32,

/// List of bootstrap nodes to initialize the set with.
///
/// > **Note**: Keep in mind that the networking has to know an address for these nodes,
/// > otherwise it will not be able to connect to them.
pub bootnodes: Vec<PeerId>,

/// Lists of nodes we should always be connected to.
///
/// > **Note**: Keep in mind that the networking has to know an address for these nodes,
Expand Down Expand Up @@ -244,8 +240,7 @@ pub struct Peerset {
impl Peerset {
/// Builds a new peerset from the given configuration.
pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) {
let default_set_config = &config.sets[0];
let peer_store = PeerStore::new(default_set_config.bootnodes.clone());
let peer_store = PeerStore::new(config.bootnodes);

let (to_notifications, from_controllers) =
tracing_unbounded("mpsc_protocol_controllers_to_notifications", 10_000);
Expand Down
4 changes: 1 addition & 3 deletions client/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ impl<B: BlockT> Protocol<B> {
sets.push(crate::peerset::SetConfig {
in_peers: network_config.default_peers_set.in_peers,
out_peers: network_config.default_peers_set.out_peers,
bootnodes,
reserved_nodes: default_sets_reserved.clone(),
reserved_only: network_config.default_peers_set.non_reserved_mode ==
NonReservedPeerMode::Deny,
Expand All @@ -150,13 +149,12 @@ impl<B: BlockT> Protocol<B> {
sets.push(crate::peerset::SetConfig {
in_peers: set_cfg.set_config.in_peers,
out_peers: set_cfg.set_config.out_peers,
bootnodes: Vec::new(),
reserved_nodes,
reserved_only,
});
}

crate::peerset::Peerset::from_config(crate::peerset::PeersetConfig { sets })
crate::peerset::Peerset::from_config(crate::peerset::PeersetConfig { bootnodes, sets })
};

let behaviour = {
Expand Down
6 changes: 4 additions & 2 deletions client/network/src/protocol/notifications/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2159,12 +2159,14 @@ mod tests {
sets.push(crate::peerset::SetConfig {
in_peers: 25,
out_peers: 25,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
});

crate::peerset::Peerset::from_config(crate::peerset::PeersetConfig { sets })
crate::peerset::Peerset::from_config(crate::peerset::PeersetConfig {
bootnodes: Vec::new(),
sets,
})
};

(
Expand Down
14 changes: 5 additions & 9 deletions client/network/src/protocol/notifications/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,18 +67,14 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {

let (peerset, handle) =
crate::peerset::Peerset::from_config(crate::peerset::PeersetConfig {
bootnodes: if index == 0 {
keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect()
} else {
vec![]
},
sets: vec![crate::peerset::SetConfig {
in_peers: 25,
out_peers: 25,
bootnodes: if index == 0 {
keypairs
.iter()
.skip(1)
.map(|keypair| keypair.public().to_peer_id())
.collect()
} else {
vec![]
},
reserved_nodes: Default::default(),
reserved_only: false,
}],
Expand Down
33 changes: 2 additions & 31 deletions client/network/src/protocol_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -775,7 +775,6 @@ mod tests {
let config = SetConfig {
in_peers: 0,
out_peers: 0,
bootnodes: Vec::new(),
reserved_nodes: std::iter::once(reserved1).collect(),
reserved_only: true,
};
Expand Down Expand Up @@ -838,7 +837,6 @@ mod tests {
let config = SetConfig {
in_peers: 0,
out_peers: 0,
bootnodes: Vec::new(),
reserved_nodes: std::iter::once(reserved1).collect(),
reserved_only: true,
};
Expand Down Expand Up @@ -890,7 +888,6 @@ mod tests {
let config = SetConfig {
in_peers: 0,
out_peers: 0,
bootnodes: Vec::new(),
reserved_nodes: std::iter::once(reserved1).collect(),
reserved_only: true,
};
Expand Down Expand Up @@ -950,7 +947,6 @@ mod tests {
in_peers: 0,
// Less slots than candidates.
out_peers: 2,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -998,13 +994,8 @@ mod tests {
let outgoing_candidates = vec![regular1, regular2];
let reserved_nodes = [reserved1, reserved2].iter().cloned().collect();

let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes,
reserved_only: false,
};
let config =
SetConfig { in_peers: 10, out_peers: 10, reserved_nodes, reserved_only: false };
let (tx, mut rx) = tracing_unbounded("mpsc_test_to_notifications", 100);

let mut peer_store = MockPeerStoreHandle::new();
Expand Down Expand Up @@ -1043,7 +1034,6 @@ mod tests {
in_peers: 0,
// Less slots than candidates.
out_peers: 2,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1114,7 +1104,6 @@ mod tests {
in_peers: 0,
// Make sure we have slots available.
out_peers: 2,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: true,
};
Expand All @@ -1141,7 +1130,6 @@ mod tests {
// Make sure we have slots available.
in_peers: 2,
out_peers: 0,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: true,
};
Expand Down Expand Up @@ -1179,7 +1167,6 @@ mod tests {
in_peers: 0,
// Make sure we have slots available.
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: true,
};
Expand Down Expand Up @@ -1226,7 +1213,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [reserved1, reserved2].iter().cloned().collect(),
reserved_only: false,
};
Expand Down Expand Up @@ -1287,7 +1273,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [reserved1, reserved2].iter().cloned().collect(),
reserved_only: false,
};
Expand Down Expand Up @@ -1320,7 +1305,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [reserved1, reserved2].iter().cloned().collect(),
reserved_only: true,
};
Expand Down Expand Up @@ -1367,7 +1351,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [peer1, peer2].iter().cloned().collect(),
reserved_only: false,
};
Expand Down Expand Up @@ -1414,7 +1397,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1457,7 +1439,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1517,7 +1498,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [reserved1, reserved2].iter().cloned().collect(),
reserved_only: false,
};
Expand Down Expand Up @@ -1574,7 +1554,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1627,7 +1606,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: [reserved1, reserved2].iter().cloned().collect(),
reserved_only: false,
};
Expand Down Expand Up @@ -1688,7 +1666,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1741,7 +1718,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1795,7 +1771,6 @@ mod tests {
let config = SetConfig {
in_peers: 1,
out_peers: 1,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1849,7 +1824,6 @@ mod tests {
let config = SetConfig {
in_peers: 1,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand Down Expand Up @@ -1880,7 +1854,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: HashSet::new(),
reserved_only: false,
};
Expand All @@ -1906,7 +1879,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: std::iter::once(reserved1).collect(),
reserved_only: false,
};
Expand All @@ -1933,7 +1905,6 @@ mod tests {
let config = SetConfig {
in_peers: 10,
out_peers: 10,
bootnodes: Vec::new(),
reserved_nodes: std::iter::once(reserved1).collect(),
reserved_only: false,
};
Expand Down
2 changes: 1 addition & 1 deletion client/network/src/request_responses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1073,10 +1073,10 @@ mod tests {
.boxed();

let config = PeersetConfig {
bootnodes: vec![],
sets: vec![SetConfig {
in_peers: u32::max_value(),
out_peers: u32::max_value(),
bootnodes: vec![],
reserved_nodes: Default::default(),
reserved_only: false,
}],
Expand Down
14 changes: 7 additions & 7 deletions client/network/test/src/peerset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,14 @@ fn test_once() {
let mut reserved_nodes = HashSet::<PeerId>::new();

let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig {
bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng))
.map(|_| {
let id = PeerId::random();
known_nodes.insert(id, State::Disconnected);
id
})
.collect(),
sets: vec![SetConfig {
bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng))
.map(|_| {
let id = PeerId::random();
known_nodes.insert(id, State::Disconnected);
id
})
.collect(),
reserved_nodes: {
(0..Uniform::new_inclusive(0, 2).sample(&mut rng))
.map(|_| {
Expand Down