Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
58feb41
Send high-level consensus telemetry by default
cmichi Mar 18, 2019
ec9d66b
Notify telemetry on finalized
cmichi Mar 21, 2019
1deceea
Send used authority set to telemetry
cmichi Apr 3, 2019
7743147
Do not send commit message telemetry by default
cmichi Apr 3, 2019
8066144
Fix typo
cmichi Apr 9, 2019
b18916f
Revert "Send used authority set to telemetry"
cmichi Apr 10, 2019
539b94f
Allow for notifications on telemetry connect
cmichi Apr 9, 2019
7018af1
Send authority set to telemetry on change
cmichi Apr 10, 2019
a9c8b7f
Merge branch 'master' into 'cmichi-send-high-level-consensus-telemetr…
cmichi Apr 10, 2019
ff8cf0e
Merge branch 'master' into cmichi-send-high-level-consensus-telemetry…
cmichi Apr 10, 2019
8ff8561
Make telemetry onconnect hoook optional
cmichi Apr 10, 2019
f66d7a9
Merge branch 'master' into 'cmichi-send-high-level-consensus-telemetr…
cmichi Apr 10, 2019
b4949f3
Introduce GrandpaParams struct to condense parameters
cmichi Apr 13, 2019
5c639a8
Remove debug statement
cmichi Apr 13, 2019
aa0287e
Fix tests
cmichi Apr 13, 2019
95ae8dd
Rename parameter
cmichi Apr 13, 2019
537ad80
Fix tests
cmichi Apr 13, 2019
6b1f2a8
Rename struct
cmichi Apr 13, 2019
78eb232
Do not send verbosity level
cmichi Apr 15, 2019
75a8cb4
Combine imports
cmichi Apr 15, 2019
389b704
Implement comments
cmichi Apr 15, 2019
61782dd
Merge branch 'master' into cmichi-send-high-level-consensus-telemetry…
cmichi Apr 15, 2019
ac0722a
Run cargo build --all
cmichi Apr 15, 2019
b6a27d5
Remove noisy telemetry
cmichi Apr 15, 2019
4a04a91
Add docs for public items
cmichi Apr 15, 2019
3f55b25
Unbox and support Clone trait
cmichi Apr 16, 2019
474699e
Merge branch 'master' into cmichi-send-high-level-consensus-telemetry…
cmichi Apr 16, 2019
ed45607
Merge branch 'master' into cmichi-send-high-level-consensus-telemetry…
cmichi Apr 23, 2019
1fb49f5
Fix merge
cmichi Apr 23, 2019
862e06c
Fix merge
cmichi Apr 23, 2019
f89f687
Update core/finality-grandpa/src/lib.rs
rphmeier Apr 23, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge branch 'master' into cmichi-send-high-level-consensus-telemetry…
…-by-default
  • Loading branch information
cmichi committed Apr 10, 2019
commit ff8cf0eefc79d6a412b722fbcc501a2b352e3418
70 changes: 70 additions & 0 deletions core/finality-grandpa/src/aux_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,76 @@ mod test {
);
}

#[test]
fn load_decode_from_v1_migrates_data_format() {
let client = test_client::new();

let authorities = vec![(AuthorityId::default(), 100)];
let set_id = 3;
let round_number: u64 = 42;
let round_state = RoundState::<H256, u64> {
prevote_ghost: Some((H256::random(), 32)),
finalized: None,
estimate: None,
completable: false,
};

{
let authority_set = AuthoritySet::<H256, u64> {
current_authorities: authorities.clone(),
pending_standard_changes: ForkTree::new(),
pending_forced_changes: Vec::new(),
set_id,
};

let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone());

client.insert_aux(
&[
(AUTHORITY_SET_KEY, authority_set.encode().as_slice()),
(SET_STATE_KEY, voter_set_state.encode().as_slice()),
(VERSION_KEY, 1u32.encode().as_slice()),
],
&[],
).unwrap();
}

assert_eq!(
load_decode::<_, u32>(&client, VERSION_KEY).unwrap(),
Some(2),
);

let PersistentData { authority_set, set_state, .. } = load_persistent::<test_client::runtime::Block, _, _>(
&client,
H256::random(),
0,
|| unreachable!(),
).unwrap();

assert_eq!(
*authority_set.inner().read(),
AuthoritySet {
current_authorities: authorities,
pending_standard_changes: ForkTree::new(),
pending_forced_changes: Vec::new(),
set_id,
},
);

assert_eq!(
&*set_state.read(),
&VoterSetState::Live {
completed_rounds: CompletedRounds::new(CompletedRound {
number: round_number,
state: round_state.clone(),
base: round_state.prevote_ghost.unwrap(),
votes: vec![],
}),
current_round: HasVoted::No,
},
);
}

#[test]
fn load_decode_from_v1_migrates_data_format() {
let client = test_client::new();
Expand Down
89 changes: 16 additions & 73 deletions core/peerset/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,70 +29,13 @@ use slots::{SlotType, SlotState, Slots};
pub use serde_json::Value;

const PEERSET_SCORES_CACHE_SIZE: usize = 1000;

/// FIFO-ordered list of nodes that we know exist, but we are not connected to.
#[derive(Debug, Default)]
struct Discovered {
/// Nodes we should connect to first.
reserved: LinkedHashMap<PeerId, ()>,
/// All remaining nodes.
common: LinkedHashMap<PeerId, ()>,
}

impl Discovered {
/// Returns true if we already know given node.
fn contains(&self, peer_id: &PeerId) -> bool {
self.reserved.contains_key(peer_id) || self.common.contains_key(peer_id)
}

/// Returns true if given node is reserved.
fn is_reserved(&self, peer_id: &PeerId) -> bool {
self.reserved.contains_key(peer_id)
}

/// Adds new peer of a given type.
fn add_peer(&mut self, peer_id: PeerId, slot_type: SlotType) {
if !self.contains(&peer_id) {
match slot_type {
SlotType::Common => self.common.insert(peer_id, ()),
SlotType::Reserved => self.reserved.insert(peer_id, ()),
};
}
}

/// Pops the oldest peer from the list.
fn pop_peer(&mut self, reserved_only: bool) -> Option<(PeerId, SlotType)> {
if let Some((peer_id, _)) = self.reserved.pop_front() {
return Some((peer_id, SlotType::Reserved));
}

if reserved_only {
return None;
}

self.common.pop_front()
.map(|(peer_id, _)| (peer_id, SlotType::Common))
}

/// Marks the node as not reserved.
fn mark_not_reserved(&mut self, peer_id: &PeerId) {
if let Some(_) = self.reserved.remove(peer_id) {
self.common.insert(peer_id.clone(), ());
}
}

/// Removes the node from the list.
fn remove_peer(&mut self, peer_id: &PeerId) {
self.reserved.remove(peer_id);
self.common.remove(peer_id);
}
}
const DISCOVERED_NODES_LIMIT: u32 = 1000;

#[derive(Debug)]
struct PeersetData {
/// List of nodes that we know exist, but we are not connected to.
/// Elements in this list must never be in `out_slots` or `in_slots`.
discovered: Discovered,
discovered: Slots,
/// If true, we only accept reserved nodes.
reserved_only: bool,
/// Node slots for outgoing connections.
Expand Down Expand Up @@ -216,7 +159,7 @@ impl Peerset {
let (tx, rx) = mpsc::unbounded();

let data = PeersetData {
discovered: Default::default(),
discovered: Slots::new(DISCOVERED_NODES_LIMIT),
reserved_only: config.reserved_only,
out_slots: Slots::new(config.out_peers),
in_slots: Slots::new(config.in_peers),
Expand Down Expand Up @@ -270,10 +213,10 @@ impl Peerset {
self.message_queue.push_back(Message::Drop(removed));
self.message_queue.push_back(Message::Connect(added));
}
SlotState::AlreadyConnected(_) | SlotState::Upgraded(_) => {
SlotState::AlreadyExists(_) | SlotState::Upgraded(_) => {
return;
}
SlotState::MaxConnections(peer_id) => {
SlotState::MaxCapacity(peer_id) => {
self.data.discovered.add_peer(peer_id, SlotType::Reserved);
return;
}
Expand All @@ -285,7 +228,7 @@ impl Peerset {
self.data.out_slots.mark_not_reserved(&peer_id);
self.data.discovered.mark_not_reserved(&peer_id);
if self.data.reserved_only {
if self.data.in_slots.clear_slot(&peer_id) || self.data.out_slots.clear_slot(&peer_id) {
if self.data.in_slots.remove_peer(&peer_id) || self.data.out_slots.remove_peer(&peer_id) {
// insert peer back into discovered list
self.data.discovered.add_peer(peer_id.clone(), SlotType::Common);
self.message_queue.push_back(Message::Drop(peer_id));
Expand Down Expand Up @@ -325,15 +268,15 @@ impl Peerset {
if score < 0 {
// peer will be removed from `in_slots` or `out_slots` in `on_dropped` method
if self.data.in_slots.contains(&peer_id) || self.data.out_slots.contains(&peer_id) {
self.data.in_slots.clear_slot(&peer_id);
self.data.out_slots.clear_slot(&peer_id);
self.data.in_slots.remove_peer(&peer_id);
self.data.out_slots.remove_peer(&peer_id);
self.message_queue.push_back(Message::Drop(peer_id));
}
}
}

fn alloc_slots(&mut self) {
while let Some((peer_id, slot_type)) = self.data.discovered.pop_peer(self.data.reserved_only) {
while let Some((peer_id, slot_type)) = self.data.discovered.pop_most_important_peer(self.data.reserved_only) {
match self.data.out_slots.add_peer(peer_id, slot_type) {
SlotState::Added(peer_id) => {
self.message_queue.push_back(Message::Connect(peer_id));
Expand All @@ -344,10 +287,10 @@ impl Peerset {
self.message_queue.push_back(Message::Drop(removed));
self.message_queue.push_back(Message::Connect(added));
}
SlotState::Upgraded(_) | SlotState::AlreadyConnected(_) => {
SlotState::Upgraded(_) | SlotState::AlreadyExists(_) => {
// TODO: we should never reach this point
},
SlotState::MaxConnections(peer_id) => {
SlotState::MaxCapacity(peer_id) => {
self.data.discovered.add_peer(peer_id, slot_type);
break;
},
Expand Down Expand Up @@ -404,11 +347,11 @@ impl Peerset {
self.message_queue.push_back(Message::Drop(removed));
self.message_queue.push_back(Message::Accept(index));
},
SlotState::AlreadyConnected(_) | SlotState::Upgraded(_) => {
SlotState::AlreadyExists(_) | SlotState::Upgraded(_) => {
// we are already connected. in this case we do not answer
return;
},
SlotState::MaxConnections(peer_id) => {
SlotState::MaxCapacity(peer_id) => {
self.data.discovered.add_peer(peer_id, slot_type);
self.message_queue.push_back(Message::Reject(index));
return;
Expand All @@ -427,14 +370,14 @@ impl Peerset {
peer_id, self.data.in_slots, self.data.out_slots
);
// Automatically connect back if reserved.
if self.data.in_slots.is_connected_and_reserved(&peer_id) || self.data.out_slots.is_connected_and_reserved(&peer_id) {
if self.data.in_slots.is_reserved(&peer_id) || self.data.out_slots.is_reserved(&peer_id) {
self.message_queue.push_back(Message::Connect(peer_id));
return;
}

// Otherwise, free the slot.
self.data.in_slots.clear_slot(&peer_id);
self.data.out_slots.clear_slot(&peer_id);
self.data.in_slots.remove_peer(&peer_id);
self.data.out_slots.remove_peer(&peer_id);

// Note: in this dummy implementation we consider that peers never expire. As soon as we
// are disconnected from a peer, we try again.
Expand Down
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.