diff --git a/Cargo.lock b/Cargo.lock index 00d2eb47ef505..7b7cdd902113b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1940,6 +1940,7 @@ dependencies = [ "srml-democracy 0.1.0", "srml-executive 0.1.0", "srml-fees 0.1.0", + "srml-finality-tracker 0.1.0", "srml-grandpa 0.1.0", "srml-indices 0.1.0", "srml-session 0.1.0", @@ -3212,6 +3213,27 @@ dependencies = [ "substrate-primitives 0.1.0", ] +[[package]] +name = "srml-finality-tracker" +version = "0.1.0" +dependencies = [ + "hex-literal 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec-derive 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.87 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.87 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-io 0.1.0", + "sr-primitives 0.1.0", + "sr-std 0.1.0", + "srml-session 0.1.0", + "srml-support 0.1.0", + "srml-system 0.1.0", + "substrate-inherents 0.1.0", + "substrate-primitives 0.1.0", +] + [[package]] name = "srml-grandpa" version = "0.1.0" @@ -3222,6 +3244,7 @@ dependencies = [ "sr-io 0.1.0", "sr-primitives 0.1.0", "sr-std 0.1.0", + "srml-finality-tracker 0.1.0", "srml-session 0.1.0", "srml-support 0.1.0", "srml-system 0.1.0", @@ -3786,9 +3809,11 @@ dependencies = [ "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 0.1.0", + "srml-finality-tracker 0.1.0", "substrate-client 0.1.0", "substrate-consensus-common 0.1.0", "substrate-finality-grandpa-primitives 0.1.0", + "substrate-inherents 0.1.0", "substrate-keyring 0.1.0", "substrate-network 0.1.0", "substrate-primitives 0.1.0", diff --git a/Cargo.toml b/Cargo.toml index e6e6921e15fd9..699f9a158981f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ members = [ "srml/example", "srml/executive", "srml/fees", + "srml/finality-tracker", "srml/grandpa", "srml/indices", "srml/metadata", diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 8936477e40f2f..a170c7e202609 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -27,7 +27,7 @@ use runtime_primitives::{ }; use consensus::{ Error as ConsensusError, ErrorKind as ConsensusErrorKind, ImportBlock, ImportResult, - BlockOrigin, ForkChoiceStrategy + BlockOrigin, ForkChoiceStrategy, }; use runtime_primitives::traits::{ Block as BlockT, Header as HeaderT, Zero, As, NumberFor, CurrentHeight, BlockNumberToHash, @@ -619,9 +619,10 @@ impl Client where } /// Lock the import lock, and run operations inside. - pub fn lock_import_and_run) -> error::Result>( - &self, f: F - ) -> error::Result { + pub fn lock_import_and_run(&self, f: F) -> Result where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { let inner = || { let _import_lock = self.import_lock.lock(); @@ -827,7 +828,7 @@ impl Client where operation.notify_imported = Some((hash, origin, import_headers.into_post(), is_new_best, storage_changes)); } - Ok(ImportResult::Queued) + Ok(ImportResult::imported()) } fn block_execution( @@ -1391,13 +1392,15 @@ impl consensus::BlockImport for Client blockchain::BlockStatus::InChain => {}, blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), } + match self.backend.blockchain().status(BlockId::Hash(hash)) .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))? { blockchain::BlockStatus::InChain => return Ok(ImportResult::AlreadyInChain), blockchain::BlockStatus::Unknown => {}, } - Ok(ImportResult::Queued) + + Ok(ImportResult::imported()) } } @@ -1414,7 +1417,7 @@ impl consensus::Authorities for Client impl CurrentHeight for Client where B: backend::Backend, - E: CallExecutor + Clone, + E: CallExecutor, Block: BlockT, { type BlockNumber = ::Number; @@ -1425,7 +1428,7 @@ impl CurrentHeight for Client where impl BlockNumberToHash for Client where B: backend::Backend, - E: CallExecutor + Clone, + E: CallExecutor, Block: BlockT, { type BlockNumber = ::Number; diff --git a/core/consensus/common/src/block_import.rs b/core/consensus/common/src/block_import.rs index 6cc5b329be9de..0c2b5330b415d 100644 --- a/core/consensus/common/src/block_import.rs +++ b/core/consensus/common/src/block_import.rs @@ -23,19 +23,40 @@ use std::borrow::Cow; /// Block import result. #[derive(Debug, PartialEq, Eq)] pub enum ImportResult { - /// Added to the import queue. - Queued, - /// Already in the import queue. - AlreadyQueued, + /// Block imported. + Imported(ImportedAux), /// Already in the blockchain. AlreadyInChain, /// Block or parent is known to be bad. KnownBad, /// Block parent is not in the chain. UnknownParent, - /// Added to the import queue but must be justified - /// (usually required to safely enact consensus changes). - NeedsJustification, +} + +/// Auxiliary data associated with an imported block result. +#[derive(Debug, PartialEq, Eq)] +pub struct ImportedAux { + /// Clear all pending justification requests. + pub clear_justification_requests: bool, + /// Request a justification for the given block. + pub needs_justification: bool, +} + +impl Default for ImportedAux { + fn default() -> ImportedAux { + ImportedAux { + clear_justification_requests: false, + needs_justification: false, + } + } +} + +impl ImportResult { + /// Returns default value for `ImportResult::Imported` with both + /// `clear_justification_requests` and `needs_justification` set to false. + pub fn imported() -> ImportResult { + ImportResult::Imported(ImportedAux::default()) + } } /// Block data origin. diff --git a/core/consensus/common/src/import_queue.rs b/core/consensus/common/src/import_queue.rs index 38f79804358b4..882fd7228401b 100644 --- a/core/consensus/common/src/import_queue.rs +++ b/core/consensus/common/src/import_queue.rs @@ -24,7 +24,9 @@ //! The `BasicQueue` and `BasicVerifier` traits allow serial queues to be //! instantiated simply. -use crate::block_import::{ImportBlock, BlockImport, JustificationImport, ImportResult, BlockOrigin}; +use crate::block_import::{ + BlockImport, BlockOrigin, ImportBlock, ImportedAux, ImportResult, JustificationImport, +}; use crossbeam_channel::{self as channel, Receiver, Sender}; use std::sync::Arc; @@ -307,31 +309,37 @@ impl BlockImporter { match result { Ok(BlockImportResult::ImportedKnown(number)) => link.block_imported(&hash, number), - Ok(BlockImportResult::ImportedUnknown(number)) => { - link.block_imported(&hash, number) - } - Ok(BlockImportResult::ImportedUnjustified(number)) => { + Ok(BlockImportResult::ImportedUnknown(number, aux)) => { link.block_imported(&hash, number); - link.request_justification(&hash, number); + + if aux.clear_justification_requests { + trace!(target: "sync", "Block imported clears all pending justification requests {}: {:?}", number, hash); + link.clear_justification_requests(); + } + + if aux.needs_justification { + trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + link.request_justification(&hash, number); + } }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { link.note_useless_and_restart_sync(peer, "Sent block with incomplete header to import"); } - } + }, Err(BlockImportError::VerificationFailed(who, e)) => { if let Some(peer) = who { link.note_useless_and_restart_sync(peer, &format!("Verification failed: {}", e)); } - } + }, Err(BlockImportError::BadBlock(who)) => { if let Some(peer) = who { link.note_useless_and_restart_sync(peer, "Sent us a bad block"); } - } + }, Err(BlockImportError::UnknownParent) | Err(BlockImportError::Error) => { - link.restart() - } + link.restart(); + }, }; } if let Some(link) = self.link.as_ref() { @@ -448,13 +456,15 @@ impl> BlockImportWorker { /// algorithm. pub trait Link: Send { /// Block imported. - fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) { } + fn block_imported(&self, _hash: &B::Hash, _number: NumberFor) {} /// Batch of blocks imported, with or without error. fn blocks_processed(&self, _processed_blocks: Vec, _has_error: bool) {} /// Justification import result. - fn justification_imported(&self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) { } + fn justification_imported(&self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} + /// Clear all pending justification requests. + fn clear_justification_requests(&self) {} /// Request a justification for the given block. - fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) { } + fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} /// Disconnect from peer. fn useless_peer(&self, _who: Origin, _reason: &str) {} /// Disconnect from peer and restart sync. @@ -469,9 +479,7 @@ pub enum BlockImportResult { /// Imported known block. ImportedKnown(N), /// Imported unknown block. - ImportedUnknown(N), - /// Imported unjustified block that requires one. - ImportedUnjustified(N), + ImportedUnknown(N, ImportedAux), } /// Block import error. @@ -520,17 +528,7 @@ pub fn import_single_block>( trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); Ok(BlockImportResult::ImportedKnown(number)) }, - Ok(ImportResult::AlreadyQueued) => { - trace!(target: "sync", "Block already queued {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number)) - }, - Ok(ImportResult::Queued) => { - Ok(BlockImportResult::ImportedUnknown(number)) - }, - Ok(ImportResult::NeedsJustification) => { - trace!(target: "sync", "Block queued but requires justification {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedUnjustified(number)) - }, + Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux)), Ok(ImportResult::UnknownParent) => { debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent); Err(BlockImportError::UnknownParent) @@ -547,7 +545,7 @@ pub fn import_single_block>( }; match import_error(import_handle.check_block(hash, parent))? { - BlockImportResult::ImportedUnknown(_) => (), + BlockImportResult::ImportedUnknown { .. } => (), r @ _ => return Ok(r), // Any other successfull result means that the block is already imported. } @@ -629,7 +627,7 @@ mod tests { assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); // Send an unknown - let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default())), Default::default())]; + let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), Default::default())), Default::default())]; let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); diff --git a/core/consensus/common/src/lib.rs b/core/consensus/common/src/lib.rs index 53ffe1fec3e4d..fb777b837a6a6 100644 --- a/core/consensus/common/src/lib.rs +++ b/core/consensus/common/src/lib.rs @@ -47,7 +47,9 @@ pub mod evaluation; const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; pub use self::error::{Error, ErrorKind}; -pub use block_import::{BlockImport, JustificationImport, ImportBlock, BlockOrigin, ImportResult, ForkChoiceStrategy}; +pub use block_import::{ + BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult, JustificationImport, +}; /// Trait for getting the authorities at a given block. pub trait Authorities { diff --git a/core/finality-grandpa/Cargo.toml b/core/finality-grandpa/Cargo.toml index 9c17a5a77054e..c84e798fe7392 100644 --- a/core/finality-grandpa/Cargo.toml +++ b/core/finality-grandpa/Cargo.toml @@ -17,8 +17,10 @@ consensus_common = { package = "substrate-consensus-common", path = "../consensu substrate-primitives = { path = "../primitives" } substrate-telemetry = { path = "../telemetry" } client = { package = "substrate-client", path = "../client" } +inherents = { package = "substrate-inherents", path = "../../core/inherents" } network = { package = "substrate-network", path = "../network" } service = { package = "substrate-service", path = "../service", optional = true } +srml-finality-tracker = { path = "../../srml/finality-tracker" } fg_primitives = { package = "substrate-finality-grandpa-primitives", path = "primitives" } grandpa = { package = "finality-grandpa", version = "0.6.0", features = ["derive-codec"] } diff --git a/core/finality-grandpa/primitives/src/lib.rs b/core/finality-grandpa/primitives/src/lib.rs index 4878b79efb9a8..1069647f69ca0 100644 --- a/core/finality-grandpa/primitives/src/lib.rs +++ b/core/finality-grandpa/primitives/src/lib.rs @@ -61,6 +61,7 @@ decl_runtime_apis! { /// applied in the runtime after those N blocks have passed. /// /// The consensus protocol will coordinate the handoff externally. + #[api_version(2)] pub trait GrandpaApi { /// Check a digest for pending changes. /// Return `None` if there are no pending changes. @@ -77,6 +78,28 @@ decl_runtime_apis! { fn grandpa_pending_change(digest: &DigestFor) -> Option>>; + /// Check a digest for forced changes. + /// Return `None` if there are no forced changes. Otherwise, return a + /// tuple containing the pending change and the median last finalized + /// block number at the time the change was signalled. + /// + /// Added in version 2. + /// + /// Forced changes are applied after a delay of _imported_ blocks, + /// while pending changes are applied after a delay of _finalized_ blocks. + /// + /// Precedence towards earlier or later digest items can be given + /// based on the rules of the chain. + /// + /// No change should be scheduled if one is already and the delay has not + /// passed completely. + /// + /// This should be a pure function: i.e. as long as the runtime can interpret + /// the digest type it should return the same result regardless of the current + /// state. + fn grandpa_forced_change(digest: &DigestFor) + -> Option<(NumberFor, ScheduledChange>)>; + /// Get the current GRANDPA authorities and weights. This should not change except /// for when changes are scheduled and the corresponding delay has passed. /// diff --git a/core/finality-grandpa/src/authorities.rs b/core/finality-grandpa/src/authorities.rs index b6cc1baed3ee0..bb4992ef1d74e 100644 --- a/core/finality-grandpa/src/authorities.rs +++ b/core/finality-grandpa/src/authorities.rs @@ -39,17 +39,7 @@ impl Clone for SharedAuthoritySet { } } -impl SharedAuthoritySet -where H: PartialEq, - N: Ord, -{ - /// The genesis authority set. - pub(crate) fn genesis(initial: Vec<(Ed25519AuthorityId, u64)>) -> Self { - SharedAuthoritySet { - inner: Arc::new(RwLock::new(AuthoritySet::genesis(initial))) - } - } - +impl SharedAuthoritySet { /// Acquire a reference to the inner read-write lock. pub(crate) fn inner(&self) -> &RwLock> { &*self.inner @@ -93,11 +83,18 @@ pub(crate) struct Status { } /// A set of authorities. -#[derive(Debug, Clone, Encode, Decode)] +#[derive(Debug, Clone, Encode, Decode, PartialEq)] pub(crate) struct AuthoritySet { - current_authorities: Vec<(Ed25519AuthorityId, u64)>, - set_id: u64, - pending_changes: ForkTree>, + pub(crate) current_authorities: Vec<(Ed25519AuthorityId, u64)>, + pub(crate) set_id: u64, + // Tree of pending standard changes across forks. Standard changes are + // enacted on finality and must be enacted (i.e. finalized) in-order across + // a given branch + pub(crate) pending_standard_changes: ForkTree>, + // Pending forced changes across different forks (at most one per fork). + // Forced changes are enacted on block depth (not finality), for this reason + // only one forced change should exist per fork. + pub(crate) pending_forced_changes: Vec>, } impl AuthoritySet @@ -109,7 +106,8 @@ where H: PartialEq, AuthoritySet { current_authorities: initial, set_id: 0, - pending_changes: ForkTree::new(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), } } @@ -124,12 +122,7 @@ where N: Add + Ord + Clone + Debug, H: Clone + Debug { - /// Note an upcoming pending transition. Multiple pending changes on the - /// same branch can be added as long as they don't overlap. This method - /// assumes that changes on the same branch will be added in-order. The - /// given function `is_descendent_of` should return `true` if the second - /// hash (target) is a descendent of the first hash (base). - pub(crate) fn add_pending_change( + fn add_standard_change( &mut self, pending: PendingChange, is_descendent_of: &F, @@ -140,65 +133,180 @@ where let hash = pending.canon_hash.clone(); let number = pending.canon_height.clone(); - debug!(target: "afg", "Inserting potential set change signalled at block {:?} \ + debug!(target: "afg", "Inserting potential standard set change signalled at block {:?} \ (delayed by {:?} blocks).", - (&number, &hash), pending.finalization_depth); + (&number, &hash), pending.delay); - self.pending_changes.import( + self.pending_standard_changes.import( hash.clone(), number.clone(), pending, is_descendent_of, )?; - debug!(target: "afg", "There are now {} alternatives for the next pending change (roots), \ - and a total of {} pending changes (across all forks).", - self.pending_changes.roots().count(), - self.pending_changes.iter().count(), + debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ + and a total of {} pending standard changes (across all forks).", + self.pending_standard_changes.roots().count(), + self.pending_standard_changes.iter().count(), ); Ok(()) } - /// Inspect pending changes. Pending changes in the tree are traversed in pre-order. + fn add_forced_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + for change in self.pending_forced_changes.iter() { + if change.canon_hash == pending.canon_hash || + is_descendent_of(&change.canon_hash, &pending.canon_hash)? + { + return Err(fork_tree::Error::UnfinalizedAncestor); + } + } + + // ordered first by effective number and then by signal-block number. + let key = (pending.effective_number(), pending.canon_height.clone()); + let idx = self.pending_forced_changes + .binary_search_by_key(&key, |change| ( + change.effective_number(), + change.canon_height.clone(), + )) + .unwrap_or_else(|i| i); + + debug!(target: "afg", "Inserting potential forced set change at block {:?} \ + (delayed by {:?} blocks).", + (&pending.canon_height, &pending.canon_hash), pending.delay); + + self.pending_forced_changes.insert(idx, pending); + + debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); + + Ok(()) + } + + /// Note an upcoming pending transition. Multiple pending standard changes + /// on the same branch can be added as long as they don't overlap. Forced + /// changes are restricted to one per fork. This method assumes that changes + /// on the same branch will be added in-order. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + pub(crate) fn add_pending_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + match pending.delay_kind { + DelayKind::Best { .. } => { + self.add_forced_change(pending, is_descendent_of) + }, + DelayKind::Finalized => { + self.add_standard_change(pending, is_descendent_of) + }, + } + } + + /// Inspect pending changes. Standard pending changes are iterated first, + /// and the changes in the tree are traversed in pre-order, afterwards all + /// forced changes are iterated. pub(crate) fn pending_changes(&self) -> impl Iterator> { - self.pending_changes.iter().map(|(_, _, c)| c) + self.pending_standard_changes.iter().map(|(_, _, c)| c) + .chain(self.pending_forced_changes.iter()) } - /// Get the earliest limit-block number, if any. If there are pending - /// changes across different forks, this method will return the earliest - /// effective number (across the different branches). + /// Get the earliest limit-block number, if any. If there are pending changes across + /// different forks, this method will return the earliest effective number (across the + /// different branches). Only standard changes are taken into account for the current + /// limit, since any existing forced change should preclude the voter from voting. pub(crate) fn current_limit(&self) -> Option { - self.pending_changes.roots() + self.pending_standard_changes.roots() .min_by_key(|&(_, _, c)| c.effective_number()) .map(|(_, _, c)| c.effective_number()) } - /// Apply or prune any pending transitions. This method ensures that if - /// there are multiple changes in the same branch, finalizing this block - /// won't finalize past multiple transitions (i.e. transitions must be - /// finalized in-order). The given function `is_descendent_of` should return - /// `true` if the second hash (target) is a descendent of the first hash - /// (base). + /// Apply or prune any pending transitions based on a best-block trigger. + /// + /// Returns `Ok((median, new_set))` when a forced change has occurred. The + /// median represents the median last finalized block at the time the change + /// was signaled, and it should be used as the canon block when starting the + /// new grandpa voter. Only alters the internal state in this case. + /// + /// These transitions are always forced and do not lead to justifications + /// which light clients can follow. + pub(crate) fn apply_forced_changes( + &self, + best_hash: H, + best_number: N, + is_descendent_of: &F, + ) -> Result, E> + where F: Fn(&H, &H) -> Result, + { + let mut new_set = None; + + for change in self.pending_forced_changes.iter() + .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far + .filter(|c| c.effective_number() == best_number) + { + // check if the given best block is in the same branch as the block that signalled the change. + if is_descendent_of(&change.canon_hash, &best_hash)? { + // apply this change: make the set canonical + info!(target: "finality", "Applying authority set change forced at block #{:?}", + change.canon_height); + + let median_last_finalized = match change.delay_kind { + DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), + _ => unreachable!("pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed."), + }; + + new_set = Some((median_last_finalized, AuthoritySet { + current_authorities: change.next_authorities.clone(), + set_id: self.set_id + 1, + pending_standard_changes: ForkTree::new(), // new set, new changes. + pending_forced_changes: Vec::new(), + })); + + break; + } + + // we don't wipe forced changes until another change is + // applied + } + + Ok(new_set) + } + + /// Apply or prune any pending transitions based on a finality trigger. This + /// method ensures that if there are multiple changes in the same branch, + /// finalizing this block won't finalize past multiple transitions (i.e. + /// transitions must be finalized in-order). The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). /// /// When the set has changed, the return value will be `Ok(Some((H, N)))` /// which is the canonical block where the set last changed (i.e. the given /// hash and number). - pub(crate) fn apply_changes( + pub(crate) fn apply_standard_changes( &mut self, finalized_hash: H, finalized_number: N, is_descendent_of: &F, ) -> Result, fork_tree::Error> - where F: Fn(&H, &H) -> Result, - E: std::error::Error, + where F: Fn(&H, &H) -> Result, + E: std::error::Error, { let mut status = Status { changed: false, new_set_block: None, }; - match self.pending_changes.finalize_with_descendent_if( + match self.pending_standard_changes.finalize_with_descendent_if( &finalized_hash, finalized_number.clone(), is_descendent_of, @@ -207,6 +315,10 @@ where fork_tree::FinalizationResult::Changed(change) => { status.changed = true; + // if we are able to finalize any standard change then we can + // discard all pending forced changes (on different forks) + self.pending_forced_changes.clear(); + if let Some(change) = change { info!(target: "finality", "Applying authority set change scheduled at block #{:?}", change.canon_height); @@ -226,13 +338,13 @@ where Ok(status) } - /// Check whether the given finalized block number enacts any authority set - /// change (without triggering it), ensuring that if there are multiple - /// changes in the same branch, finalizing this block won't finalize past - /// multiple transitions (i.e. transitions must be finalized in-order). The - /// given function `is_descendent_of` should return `true` if the second - /// hash (target) is a descendent of the first hash (base). - pub fn enacts_change( + /// Check whether the given finalized block number enacts any standard + /// authority set change (without triggering it), ensuring that if there are + /// multiple changes in the same branch, finalizing this block won't + /// finalize past multiple transitions (i.e. transitions must be finalized + /// in-order). The given function `is_descendent_of` should return `true` if + /// the second hash (target) is a descendent of the first hash (base). + pub fn enacts_standard_change( &self, finalized_hash: H, finalized_number: N, @@ -241,7 +353,7 @@ where where F: Fn(&H, &H) -> Result, E: std::error::Error, { - self.pending_changes.finalizes_any_with_descendent_if( + self.pending_standard_changes.finalizes_any_with_descendent_if( &finalized_hash, finalized_number.clone(), is_descendent_of, @@ -250,27 +362,58 @@ where } } +/// Kinds of delays for pending changes. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) enum DelayKind { + /// Depth in finalized chain. + Finalized, + /// Depth in best chain. The median last finalized block is calculated at the time the + /// change was signalled. + Best { median_last_finalized: N }, +} + /// A pending change to the authority set. /// /// This will be applied when the announcing block is at some depth within -/// the finalized chain. -#[derive(Debug, Clone, Encode, Decode, PartialEq)] +/// the finalized or unfinalized chain. +#[derive(Debug, Clone, Encode, PartialEq)] pub(crate) struct PendingChange { /// The new authorities and weights to apply. pub(crate) next_authorities: Vec<(Ed25519AuthorityId, u64)>, - /// How deep in the finalized chain the announcing block must be + /// How deep in the chain the announcing block must be /// before the change is applied. - pub(crate) finalization_depth: N, + pub(crate) delay: N, /// The announcing block's height. pub(crate) canon_height: N, /// The announcing block's hash. pub(crate) canon_hash: H, + /// The delay kind. + pub(crate) delay_kind: DelayKind, +} + +impl Decode for PendingChange { + fn decode(value: &mut I) -> Option { + let next_authorities = Decode::decode(value)?; + let delay = Decode::decode(value)?; + let canon_height = Decode::decode(value)?; + let canon_hash = Decode::decode(value)?; + + let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); + + Some(PendingChange { + next_authorities, + delay, + canon_height, + canon_hash, + delay_kind, + }) + } } impl + Clone> PendingChange { /// Returns the effective number this change will be applied at. pub fn effective_number(&self) -> N { - self.canon_height.clone() + self.finalization_depth.clone() + self.canon_height.clone() + self.delay.clone() } } @@ -295,28 +438,32 @@ mod tests { let mut authorities = AuthoritySet { current_authorities: Vec::new(), set_id: 0, - pending_changes: ForkTree::new(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), }; let change_a = PendingChange { next_authorities: Vec::new(), - finalization_depth: 10, + delay: 10, canon_height: 5, canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, }; let change_b = PendingChange { next_authorities: Vec::new(), - finalization_depth: 0, + delay: 0, canon_height: 5, canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, }; let change_c = PendingChange { next_authorities: Vec::new(), - finalization_depth: 5, + delay: 5, canon_height: 10, canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, }; authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); @@ -327,9 +474,29 @@ mod tests { _ => unreachable!(), })).unwrap(); + // forced changes are iterated last + let change_d = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 1, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + let change_e = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 0, + canon_hash: "hash_e", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); + authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); + assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c], + vec![&change_b, &change_a, &change_c, &change_e, &change_d], ); } @@ -338,7 +505,8 @@ mod tests { let mut authorities = AuthoritySet { current_authorities: Vec::new(), set_id: 0, - pending_changes: ForkTree::new(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), }; let set_a = vec![([1; 32].into(), 5)]; @@ -347,16 +515,18 @@ mod tests { // two competing changes at the same height on different forks let change_a = PendingChange { next_authorities: set_a.clone(), - finalization_depth: 10, + delay: 10, canon_height: 5, canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, }; let change_b = PendingChange { next_authorities: set_b.clone(), - finalization_depth: 10, + delay: 10, canon_height: 5, canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, }; authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); @@ -368,7 +538,7 @@ mod tests { ); // finalizing "hash_c" won't enact the change signalled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_changes("hash_c", 11, &is_descendent_of(|base, hash| match (*base, *hash) { + let status = authorities.apply_standard_changes("hash_c", 11, &is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_c") => true, ("hash_b", "hash_c") => false, _ => unreachable!(), @@ -382,7 +552,7 @@ mod tests { ); // finalizing "hash_d" will enact the change signalled at "hash_a" - let status = authorities.apply_changes("hash_d", 15, &is_descendent_of(|base, hash| match (*base, *hash) { + let status = authorities.apply_standard_changes("hash_d", 15, &is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_d") => true, _ => unreachable!(), })).unwrap(); @@ -400,7 +570,8 @@ mod tests { let mut authorities = AuthoritySet { current_authorities: Vec::new(), set_id: 0, - pending_changes: ForkTree::new(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), }; let set_a = vec![([1; 32].into(), 5)]; @@ -409,16 +580,18 @@ mod tests { // two competing changes at the same height on different forks let change_a = PendingChange { next_authorities: set_a.clone(), - finalization_depth: 10, + delay: 10, canon_height: 5, canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, }; let change_c = PendingChange { next_authorities: set_c.clone(), - finalization_depth: 10, + delay: 10, canon_height: 30, canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, }; authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); @@ -437,12 +610,12 @@ mod tests { }); // trying to finalize past `change_c` without finalizing `change_a` first - match authorities.apply_changes("hash_d", 40, &is_descendent_of) { + match authorities.apply_standard_changes("hash_d", 40, &is_descendent_of) { Err(fork_tree::Error::UnfinalizedAncestor) => {}, _ => unreachable!(), } - let status = authorities.apply_changes("hash_b", 15, &is_descendent_of).unwrap(); + let status = authorities.apply_standard_changes("hash_b", 15, &is_descendent_of).unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_b", 15))); @@ -450,7 +623,7 @@ mod tests { assert_eq!(authorities.set_id, 1); // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_changes("hash_d", 40, &is_descendent_of).unwrap(); + let status = authorities.apply_standard_changes("hash_d", 40, &is_descendent_of).unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 40))); @@ -459,20 +632,22 @@ mod tests { } #[test] - fn enacts_change_works() { + fn enacts_standard_change_works() { let mut authorities = AuthoritySet { current_authorities: Vec::new(), set_id: 0, - pending_changes: ForkTree::new(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), }; let set_a = vec![([1; 32].into(), 5)]; let change_a = PendingChange { next_authorities: set_a.clone(), - finalization_depth: 10, + delay: 10, canon_height: 5, canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, }; authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); @@ -484,12 +659,84 @@ mod tests { }); // "hash_c" won't finalize the existing change since it isn't a descendent - assert!(!authorities.enacts_change("hash_c", 15, &is_descendent_of).unwrap()); - + assert!(!authorities.enacts_standard_change("hash_c", 15, &is_descendent_of).unwrap()); // "hash_b" at depth 14 won't work either - assert!(!authorities.enacts_change("hash_b", 14, &is_descendent_of).unwrap()); + assert!(!authorities.enacts_standard_change("hash_b", 14, &is_descendent_of).unwrap()); // but it should work at depth 15 (change height + depth) - assert!(authorities.enacts_change("hash_b", 15, &is_descendent_of).unwrap()); + assert!(authorities.enacts_standard_change("hash_b", 15, &is_descendent_of).unwrap()); + } + + #[test] + fn forced_changes() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![([1; 32].into(), 5)]; + let set_b = vec![([2; 32].into(), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { median_last_finalized: 42 }, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + authorities.add_pending_change(change_a, &static_is_descendent_of(false)).unwrap(); + authorities.add_pending_change(change_b, &static_is_descendent_of(false)).unwrap(); + + // there's an effective change triggered at block 15 but not a standard one. + // so this should do nothing. + assert!(!authorities.enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)).unwrap()); + + // throw a standard change into the mix to prove that it's discarded + // for being on the same fork. + // + // NOTE: after https://github.com/paritytech/substrate/issues/1861 + // this should still be rejected based on the "span" rule -- it overlaps + // with another change on the same fork. + let change_c = PendingChange { + next_authorities: set_b.clone(), + delay: 3, + canon_height: 8, + canon_hash: "hash_a8", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + let is_descendent_of_a = is_descendent_of(|base: &&str, _| { + base.starts_with("hash_a") + }); + + assert!(authorities.add_pending_change(change_c, &is_descendent_of_a).is_err()); + + // too early. + assert!(authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true)).unwrap().is_none()); + + // too late. + assert!(authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true)).unwrap().is_none()); + + // on time -- chooses the right change. + assert_eq!( + authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a).unwrap().unwrap(), + (42, AuthoritySet { + current_authorities: set_a, + set_id: 1, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }) + ); } } diff --git a/core/finality-grandpa/src/aux_schema.rs b/core/finality-grandpa/src/aux_schema.rs new file mode 100644 index 0000000000000..930329344b8fb --- /dev/null +++ b/core/finality-grandpa/src/aux_schema.rs @@ -0,0 +1,275 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Schema for stuff in the aux-db. + +use parity_codec::{Encode, Decode}; +use client::backend::AuxStore; +use client::error::{Result as ClientResult, Error as ClientError, ErrorKind as ClientErrorKind}; +use fork_tree::ForkTree; +use grandpa::round::State as RoundState; +use substrate_primitives::Ed25519AuthorityId; +use log::{info, warn}; + +use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; +use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; +use crate::NewAuthoritySet; + +use std::fmt::Debug; +use std::sync::Arc; + +const VERSION_KEY: &[u8] = b"grandpa_schema_version"; +const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; +const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; +const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; + +const CURRENT_VERSION: u32 = 1; + +/// The voter set state. +#[derive(Clone, Encode, Decode)] +pub enum VoterSetState { + /// The voter set state, currently paused. + Paused(u64, RoundState), + /// The voter set state, currently live. + Live(u64, RoundState), +} + +impl VoterSetState { + /// Yields the current state. + pub(crate) fn round(&self) -> (u64, RoundState) { + match *self { + VoterSetState::Paused(n, ref s) => (n, s.clone()), + VoterSetState::Live(n, ref s) => (n, s.clone()), + } + } +} + +type V0VoterSetState = (u64, RoundState); + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V0PendingChange { + next_authorities: Vec<(Ed25519AuthorityId, u64)>, + delay: N, + canon_height: N, + canon_hash: H, +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V0AuthoritySet { + current_authorities: Vec<(Ed25519AuthorityId, u64)>, + set_id: u64, + pending_changes: Vec>, +} + +impl Into> for V0AuthoritySet +where H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, +{ + fn into(self) -> AuthoritySet { + let mut pending_standard_changes = ForkTree::new(); + + for old_change in self.pending_changes { + let new_change = PendingChange { + next_authorities: old_change.next_authorities, + delay: old_change.delay, + canon_height: old_change.canon_height, + canon_hash: old_change.canon_hash, + delay_kind: DelayKind::Finalized, + }; + + if let Err(err) = pending_standard_changes.import::<_, ClientError>( + new_change.canon_hash.clone(), + new_change.canon_height.clone(), + new_change, + // previously we only supported at most one pending change per fork + &|_, _| Ok(false), + ) { + warn!(target: "afg", "Error migrating pending authority set change: {:?}.", err); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + } + } + + AuthoritySet { + current_authorities: self.current_authorities, + set_id: self.set_id, + pending_forced_changes: Vec::new(), + pending_standard_changes + } + } +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .ok_or_else( + || ClientErrorKind::Backend(format!("GRANDPA DB is corrupted.")).into(), + ) + .map(Some) + } +} + +/// Persistent data kept between runs. +pub(crate) struct PersistentData { + pub(crate) authority_set: SharedAuthoritySet, + pub(crate) consensus_changes: SharedConsensusChanges, + pub(crate) set_state: VoterSetState, +} + +/// Load or initialize persistent data from backend. +pub(crate) fn load_persistent( + backend: &B, + genesis_hash: H, + genesis_number: N, + genesis_authorities: G, +) + -> ClientResult> + where + B: AuxStore, + H: Debug + Decode + Encode + Clone + PartialEq, + N: Debug + Decode + Encode + Clone + Ord, + G: FnOnce() -> ClientResult> +{ + let version: Option = load_decode(backend, VERSION_KEY)?; + let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? + .unwrap_or_else(ConsensusChanges::::empty); + + let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); + + match version { + None => { + CURRENT_VERSION.using_encoded(|s| + backend.insert_aux(&[(VERSION_KEY, s)], &[]) + )?; + + if let Some(old_set) = load_decode::<_, V0AuthoritySet>(backend, AUTHORITY_SET_KEY)? { + let new_set: AuthoritySet = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let set_state = match load_decode::<_, V0VoterSetState>(backend, SET_STATE_KEY)? { + Some((number, state)) => VoterSetState::Live(number, state), + None => VoterSetState::Live(0, make_genesis_round()), + }; + + return Ok(PersistentData { + authority_set: new_set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state, + }); + } + } + Some(1) => { + if let Some(set) = load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY)? { + let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => VoterSetState::Live(0, make_genesis_round()), + }; + + return Ok(PersistentData { + authority_set: set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state, + }); + } + } + Some(other) => return Err(ClientErrorKind::Backend( + format!("Unsupported GRANDPA DB version: {:?}", other) + ).into()), + } + + // genesis. + info!(target: "afg", "Loading GRANDPA authority set \ + from genesis on what appears to be first startup."); + + let genesis_set = AuthoritySet::genesis(genesis_authorities()?); + let genesis_state = VoterSetState::Live(0, make_genesis_round()); + backend.insert_aux( + &[ + (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), + (SET_STATE_KEY, genesis_state.encode().as_slice()), + ], + &[], + )?; + + Ok(PersistentData { + authority_set: genesis_set.into(), + set_state: genesis_state, + consensus_changes: Arc::new(consensus_changes.into()), + }) +} + +/// Update the authority set on disk after a change. +pub(crate) fn update_authority_set( + set: &AuthoritySet, + new_set: Option<&NewAuthoritySet>, + write_aux: F +) -> R where + H: Encode + Clone, + N: Encode + Clone, + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + // write new authority set state to disk. + let encoded_set = set.encode(); + + if let Some(new_set) = new_set { + // we also overwrite the "last completed round" entry with a blank slate + // because from the perspective of the finality gadget, the chain has + // reset. + let round_state = RoundState::genesis(( + new_set.canon_hash.clone(), + new_set.canon_number.clone(), + )); + let set_state = VoterSetState::Live(0, round_state); + let encoded = set_state.encode(); + + write_aux(&[ + (AUTHORITY_SET_KEY, &encoded_set[..]), + (SET_STATE_KEY, &encoded[..]), + ]) + } else { + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) + } +} + +/// Write voter set state. +pub(crate) fn write_voter_set_state(backend: &B, state: &VoterSetState) + -> ClientResult<()> + where B: AuxStore, H: Encode, N: Encode +{ + backend.insert_aux( + &[(SET_STATE_KEY, state.encode().as_slice())], + &[] + ) +} + +/// Update the consensus changes. +pub(crate) fn update_consensus_changes( + set: &ConsensusChanges, + write_aux: F +) -> R where + H: Encode + Clone, + N: Encode + Clone, + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) +} + +#[cfg(test)] +pub(crate) fn load_authorities(backend: &B) + -> Option> { + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) + .expect("backend error") +} diff --git a/core/finality-grandpa/src/consensus_changes.rs b/core/finality-grandpa/src/consensus_changes.rs index 9d701152dd7de..cbd7b30f8e7a5 100644 --- a/core/finality-grandpa/src/consensus_changes.rs +++ b/core/finality-grandpa/src/consensus_changes.rs @@ -23,11 +23,14 @@ pub(crate) struct ConsensusChanges { pending_changes: Vec<(N, H)>, } -impl ConsensusChanges { +impl ConsensusChanges { /// Create empty consensus changes. pub(crate) fn empty() -> Self { ConsensusChanges { pending_changes: Vec::new(), } } +} + +impl ConsensusChanges { /// Note unfinalized change of consensus-related data. pub(crate) fn note_change(&mut self, at: (N, H)) { diff --git a/core/finality-grandpa/src/environment.rs b/core/finality-grandpa/src/environment.rs index 71424e8be9161..857d6eafd7113 100644 --- a/core/finality-grandpa/src/environment.rs +++ b/core/finality-grandpa/src/environment.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::fmt; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -22,6 +21,7 @@ use log::{debug, warn, info}; use parity_codec::Encode; use futures::prelude::*; use tokio::timer::Delay; +use parking_lot::RwLock; use client::{ backend::Backend, BlockchainEvents, CallExecutor, Client, error::Error as ClientError @@ -36,14 +36,42 @@ use runtime_primitives::traits::{ use substrate_primitives::{Blake2Hasher, ed25519,Ed25519AuthorityId, H256}; use crate::{ - AUTHORITY_SET_KEY, CONSENSUS_CHANGES_KEY, LAST_COMPLETED_KEY, - Commit, Config, Error, Network, Precommit, Prevote, LastCompleted, + Commit, Config, Error, Network, Precommit, Prevote, + CommandOrError, NewAuthoritySet, VoterCommand, }; -use crate::authorities::{AuthoritySet, SharedAuthoritySet}; + +use crate::authorities::SharedAuthoritySet; use crate::consensus_changes::SharedConsensusChanges; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; +/// Data about a completed round. +pub(crate) type CompletedRound = (u64, RoundState); + +/// A read-only view of the last completed round. +pub(crate) struct LastCompletedRound { + inner: RwLock>, +} + +impl LastCompletedRound { + /// Create a new tracker based on some starting last-completed round. + pub(crate) fn new(round: CompletedRound) -> Self { + LastCompletedRound { inner: RwLock::new(round) } + } + + /// Read the last completed round. + pub(crate) fn read(&self) -> CompletedRound { + self.inner.read().clone() + } + + // NOTE: not exposed outside of this module intentionally. + fn with(&self, f: F) -> R + where F: FnOnce(&mut CompletedRound) -> R + { + f(&mut *self.inner.write()) + } +} + /// The environment we run GRANDPA in. pub(crate) struct Environment, RA> { pub(crate) inner: Arc>, @@ -53,6 +81,7 @@ pub(crate) struct Environment, RA> { pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) network: N, pub(crate) set_id: u64, + pub(crate) last_completed: LastCompletedRound>, } impl, B, E, N, RA> grandpa::Chain> for Environment where @@ -166,54 +195,6 @@ impl, B, E, N, RA> grandpa::Chain { - pub(crate) canon_number: N, - pub(crate) canon_hash: H, - pub(crate) set_id: u64, - pub(crate) authorities: Vec<(Ed25519AuthorityId, u64)>, -} - -/// Signals either an early exit of a voter or an error. -#[derive(Debug)] -pub(crate) enum ExitOrError { - /// An error occurred. - Error(Error), - /// Early exit of the voter: the new set ID and the new authorities along with respective weights. - AuthoritiesChanged(NewAuthoritySet), -} - -impl From for ExitOrError { - fn from(e: Error) -> Self { - ExitOrError::Error(e) - } -} - -impl From for ExitOrError { - fn from(e: ClientError) -> Self { - ExitOrError::Error(Error::Client(e)) - } -} - -impl From for ExitOrError { - fn from(e: grandpa::Error) -> Self { - ExitOrError::Error(Error::from(e)) - } -} - -impl ::std::error::Error for ExitOrError { } - -impl fmt::Display for ExitOrError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ExitOrError::Error(ref e) => write!(f, "{:?}", e), - ExitOrError::AuthoritiesChanged(_) => write!(f, "restarting voter on new authorities"), - } - } -} - - impl, N, RA> voter::Environment> for Environment where Block: 'static, B: Backend + 'static, @@ -237,7 +218,7 @@ impl, N, RA> voter::Environment + Send>; - type Error = ExitOrError>; + type Error = CommandOrError>; fn round_data( &self, @@ -295,17 +276,32 @@ impl, N, RA> voter::Environment, round: u64, commit: Commit) -> Result<(), Self::Error> { + use client::blockchain::HeaderBackend; + + let status = self.inner.backend().blockchain().info()?; + if number <= status.finalized_number && self.inner.backend().blockchain().hash(number)? == Some(hash) { + // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since + // the voter will be restarted at the median last finalized block, which can be lower than the local best + // finalized block. + warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", + hash, + number, + status.finalized_number, + ); + + return Ok(()); + } + finalize_block( &*self.inner, &self.authority_set, @@ -375,7 +371,7 @@ pub(crate) fn finalize_block, E, RA>( hash: Block::Hash, number: NumberFor, justification_or_commit: JustificationOrCommit, -) -> Result<(), ExitOrError>> where +) -> Result<(), CommandOrError>> where B: Backend, E: CallExecutor + Send + Sync, RA: Send + Sync, @@ -385,72 +381,43 @@ pub(crate) fn finalize_block, E, RA>( // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); - // needed in case there is an authority set change, used for reverting in - // case of error - let mut old_last_completed = None; + // holds the old consensus changes in case it is changed below, needed for + // reverting in case of failure + let mut old_consensus_changes = None; let mut consensus_changes = consensus_changes.lock(); - let status = authority_set.apply_changes( - hash, - number, - &is_descendent_of(client, None), - ).map_err(|e| Error::Safety(e.to_string()))?; - - if status.changed { - // write new authority set state to disk. - let encoded_set = authority_set.encode(); - - let write_result = if let Some((ref canon_hash, ref canon_number)) = status.new_set_block { - // we also overwrite the "last completed round" entry with a blank slate - // because from the perspective of the finality gadget, the chain has - // reset. - let round_state = RoundState::genesis((*canon_hash, *canon_number)); - let last_completed: LastCompleted<_, _> = (0, round_state); - let encoded = last_completed.encode(); - - old_last_completed = Backend::get_aux(&**client.backend(), LAST_COMPLETED_KEY)?; - - Backend::insert_aux( - &**client.backend(), - &[ - (AUTHORITY_SET_KEY, &encoded_set[..]), - (LAST_COMPLETED_KEY, &encoded[..]), - ], - &[] - ) - } else { - Backend::insert_aux(&**client.backend(), &[(AUTHORITY_SET_KEY, &encoded_set[..])], &[]) - }; + let canon_at_height = |canon_number| { + // "true" because the block is finalized + canonical_at_height(client, (hash, number), true, canon_number) + }; - if let Err(e) = write_result { - warn!(target: "finality", "Failed to write updated authority set to disk. Bailing."); - warn!(target: "finality", "Node is in a potentially inconsistent state."); + let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { + let status = authority_set.apply_standard_changes( + hash, + number, + &is_descendent_of(client, None), + ).map_err(|e| Error::Safety(e.to_string()))?; - return Err(e.into()); - } - } + // check if this is this is the first finalization of some consensus changes + let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes + .finalize((number, hash), &canon_at_height)?; - // check if this is this is the first finalization of some consensus changes - let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes - .finalize((number, hash), |at_height| canonical_at_height(client, (hash, number), at_height))?; + if alters_consensus_changes { + old_consensus_changes = Some(consensus_changes.clone()); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - if alters_consensus_changes { - old_consensus_changes = Some(consensus_changes.clone()); + let write_result = crate::aux_schema::update_consensus_changes( + &*consensus_changes, + |insert| client.apply_aux(import_op, insert, &[]), + ); - let encoded = consensus_changes.encode(); - let write_result = Backend::insert_aux(&**client.backend(), &[(CONSENSUS_CHANGES_KEY, &encoded[..])], &[]); - if let Err(e) = write_result { - warn!(target: "finality", "Failed to write updated consensus changes to disk. Bailing."); - warn!(target: "finality", "Node is in a potentially inconsistent state."); + if let Err(e) = write_result { + warn!(target: "finality", "Failed to write updated consensus changes to disk. Bailing."); + warn!(target: "finality", "Node is in a potentially inconsistent state."); - return Err(e.into()); + return Err(e.into()); + } } - } - let aux = |authority_set: &AuthoritySet>| { // NOTE: this code assumes that honest voters will never vote past a // transition block, thus we don't have to worry about the case where // we have a transition with `effective_block = N`, but we finalize @@ -496,12 +463,12 @@ pub(crate) fn finalize_block, E, RA>( // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. - client.finalize_block(BlockId::Hash(hash), justification, true).map_err(|e| { + client.apply_finality(import_op, BlockId::Hash(hash), justification, true).map_err(|e| { warn!(target: "finality", "Error applying finality to block {:?}: {:?}", (hash, number), e); e })?; - if let Some((canon_hash, canon_number)) = status.new_set_block { + let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { // the authority set has changed. let (new_id, set_ref) = authority_set.current(); @@ -511,54 +478,46 @@ pub(crate) fn finalize_block, E, RA>( info!("Applying GRANDPA set change to new set {:?}", set_ref); } - Err(ExitOrError::AuthoritiesChanged(NewAuthoritySet { + Some(NewAuthoritySet { canon_hash, canon_number, set_id: new_id, authorities: set_ref.to_vec(), - })) + }) } else { - Ok(()) - } - }; - - match aux(&authority_set) { - Err(ExitOrError::Error(err)) => { - debug!(target: "afg", "Reverting authority set and/or consensus changes after block finalization error: {:?}", err); - - let mut revert_aux = Vec::new(); - - if status.changed { - revert_aux.push((AUTHORITY_SET_KEY, old_authority_set.encode())); - if let Some(old_last_completed) = old_last_completed { - revert_aux.push((LAST_COMPLETED_KEY, old_last_completed)); - } - - *authority_set = old_authority_set.clone(); - } - - if let Some(old_consensus_changes) = old_consensus_changes { - revert_aux.push((CONSENSUS_CHANGES_KEY, old_consensus_changes.encode())); - - *consensus_changes = old_consensus_changes; - } + None + }; - let write_result = Backend::insert_aux( - &**client.backend(), - revert_aux.iter().map(|(k, v)| (*k, &**v)).collect::>().iter(), - &[], + if status.changed { + let write_result = crate::aux_schema::update_authority_set( + &authority_set, + new_authorities.as_ref(), + |insert| client.apply_aux(import_op, insert, &[]), ); if let Err(e) = write_result { - warn!(target: "finality", "Failed to revert consensus changes to disk. Bailing."); + warn!(target: "finality", "Failed to write updated authority set to disk. Bailing."); warn!(target: "finality", "Node is in a potentially inconsistent state."); return Err(e.into()); } + } + + Ok(new_authorities.map(VoterCommand::ChangeAuthorities)) + }); + + match update_res { + Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), + Ok(None) => Ok(()), + Err(e) => { + *authority_set = old_authority_set; + + if let Some(old_consensus_changes) = old_consensus_changes { + *consensus_changes = old_consensus_changes; + } - Err(ExitOrError::Error(err)) - }, - res => res, + Err(CommandOrError::Error(e)) + } } } @@ -567,27 +526,39 @@ pub(crate) fn finalize_block, E, RA>( pub(crate) fn canonical_at_height, RA>( client: &Client, base: (Block::Hash, NumberFor), + base_is_canonical: bool, height: NumberFor, ) -> Result, ClientError> where B: Backend, E: CallExecutor + Send + Sync, { - use runtime_primitives::traits::{One, Zero}; + use runtime_primitives::traits::{One, Zero, BlockNumberToHash}; if height > base.1 { return Ok(None); } if height == base.1 { - return Ok(Some(base.0)); + if base_is_canonical { + return Ok(Some(base.0)); + } else { + return Ok(client.block_number_to_hash(height)); + } + } else if base_is_canonical { + return Ok(client.block_number_to_hash(height)); } - let mut current = match client.header(&BlockId::Hash(base.0))? { + let one = NumberFor::::one(); + + // start by getting _canonical_ block with number at parent position and then iterating + // backwards by hash. + let mut current = match client.header(&BlockId::Number(base.1 - one))? { Some(header) => header, _ => return Ok(None), }; - let mut steps = base.1 - height; + // we've already checked that base > height above. + let mut steps = base.1 - height - one; while steps > NumberFor::::zero() { current = match client.header(&BlockId::Hash(*current.parent_hash()))? { @@ -595,7 +566,7 @@ pub(crate) fn canonical_at_height, RA>( _ => return Ok(None), }; - steps -= NumberFor::::one(); + steps -= one; } Ok(Some(current.hash())) diff --git a/core/finality-grandpa/src/import.rs b/core/finality-grandpa/src/import.rs index 11306f72195c5..3f4dbb3650a86 100644 --- a/core/finality-grandpa/src/import.rs +++ b/core/finality-grandpa/src/import.rs @@ -19,9 +19,12 @@ use std::sync::Arc; use log::{debug, trace, info}; use parity_codec::Encode; use futures::sync::mpsc; +use parking_lot::RwLockWriteGuard; use client::{blockchain, CallExecutor, Client}; +use client::blockchain::HeaderBackend; use client::backend::Backend; +use client::runtime_api::ApiExt; use consensus_common::{ BlockImport, Error as ConsensusError, ErrorKind as ConsensusErrorKind, ImportBlock, ImportResult, JustificationImport, @@ -35,10 +38,10 @@ use runtime_primitives::traits::{ }; use substrate_primitives::{H256, Ed25519AuthorityId, Blake2Hasher}; -use crate::{AUTHORITY_SET_KEY, Error}; -use crate::authorities::SharedAuthoritySet; +use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; +use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; use crate::consensus_changes::SharedConsensusChanges; -use crate::environment::{finalize_block, is_descendent_of, ExitOrError, NewAuthoritySet}; +use crate::environment::{finalize_block, is_descendent_of}; use crate::justification::GrandpaJustification; /// A block-import handler for GRANDPA. @@ -53,7 +56,7 @@ use crate::justification::GrandpaJustification; pub struct GrandpaBlockImport, RA, PRA> { inner: Arc>, authority_set: SharedAuthoritySet>, - authority_set_change: mpsc::UnboundedSender>>, + send_voter_commands: mpsc::UnboundedSender>>, consensus_changes: SharedConsensusChanges>, api: Arc, } @@ -80,7 +83,8 @@ impl, RA, PRA> JustificationImport // request justifications for all pending changes for which change blocks have already been imported let authorities = self.authority_set.inner().read(); for pending_change in authorities.pending_changes() { - if pending_change.effective_number() > chain_info.finalized_number && + if pending_change.delay_kind == DelayKind::Finalized && + pending_change.effective_number() > chain_info.finalized_number && pending_change.effective_number() <= chain_info.best_number { let effective_block_hash = self.inner.best_containing( @@ -109,6 +113,266 @@ impl, RA, PRA> JustificationImport } } +enum AppliedChanges { + Standard, + Forced(NewAuthoritySet), + None, +} + +impl AppliedChanges { + fn needs_justification(&self) -> bool { + match *self { + AppliedChanges::Standard => true, + AppliedChanges::Forced(_) | AppliedChanges::None => false, + } + } +} + +struct PendingSetChanges<'a, Block: 'a + BlockT> { + just_in_case: Option<( + AuthoritySet>, + RwLockWriteGuard<'a, AuthoritySet>>, + )>, + applied_changes: AppliedChanges>, + do_pause: bool, +} + +impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { + // revert the pending set change explicitly. + fn revert(self) { } + + fn defuse(mut self) -> (AppliedChanges>, bool) { + self.just_in_case = None; + let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + (applied_changes, self.do_pause) + } +} + +impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { + fn drop(&mut self) { + if let Some((old_set, mut authorities)) = self.just_in_case.take() { + *authorities = old_set; + } + } +} + +impl, RA, PRA> GrandpaBlockImport where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + DigestItemFor: DigestItem, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, +{ + // check for a new authority set change. + fn check_new_change(&self, header: &Block::Header, hash: Block::Hash) + -> Result>>, ConsensusError> + { + let at = BlockId::hash(*header.parent_hash()); + let digest = header.digest(); + + let api = self.api.runtime_api(); + + // check for forced change. + { + let maybe_change = api.grandpa_forced_change( + &at, + digest, + ); + + match maybe_change { + Err(e) => match api.has_api_with::, _>(&at, |v| v >= 2) { + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(true) => { + // API version is high enough to support forced changes + // but got error, so it is legitimate. + return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()) + }, + Ok(false) => { + // API version isn't high enough to support forced changes + }, + }, + Ok(None) => {}, + Ok(Some((median_last_finalized, change))) => return Ok(Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Best { median_last_finalized }, + })), + } + } + + // check normal scheduled change. + { + let maybe_change = api.grandpa_pending_change( + &at, + digest, + ); + + match maybe_change { + Err(e) => Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + Ok(Some(change)) => Ok(Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Finalized, + })), + Ok(None) => Ok(None), + } + } + } + + fn make_authorities_changes<'a>(&'a self, block: &mut ImportBlock, hash: Block::Hash) + -> Result, ConsensusError> + { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + struct InnerGuard<'a, T: 'a> { + old: Option, + guard: Option>, + } + + impl<'a, T: 'a> InnerGuard<'a, T> { + fn as_mut(&mut self) -> &mut T { + &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") + } + + fn set_old(&mut self, old: T) { + if self.old.is_none() { + // ignore "newer" old changes. + self.old = Some(old); + } + } + + fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { + if let Some(old) = self.old.take() { + Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) + } else { + None + } + } + } + + impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + fn drop(&mut self) { + if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { + *guard = old; + } + } + } + + let number = block.header.number().clone(); + let maybe_change = self.check_new_change( + &block.header, + hash, + )?; + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + let parent_hash = *block.header.parent_hash(); + let is_descendent_of = is_descendent_of(&self.inner, Some((&hash, &parent_hash))); + + let mut guard = InnerGuard { + guard: Some(self.authority_set.inner().write()), + old: None, + }; + + // whether to pause the old authority set -- happens after import + // of a forced change block. + let mut do_pause = false; + + // add any pending changes. + if let Some(change) = maybe_change { + let old = guard.as_mut().clone(); + guard.set_old(old); + + if let DelayKind::Best { .. } = change.delay_kind { + do_pause = true; + } + + guard.as_mut().add_pending_change( + change, + &is_descendent_of, + ).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; + } + + let applied_changes = { + let forced_change_set = guard.as_mut().apply_forced_changes(hash, number, &is_descendent_of) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some((median_last_finalized_number, new_set)) = forced_change_set { + let new_authorities = { + let (set_id, new_authorities) = new_set.current(); + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + let best_finalized_number = self.inner.backend().blockchain().info() + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()))? + .finalized_number; + + let canon_number = best_finalized_number.min(median_last_finalized_number); + + let canon_hash = + self.inner.backend().blockchain().header(BlockId::Number(canon_number)) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string()))? + .expect("the given block number is less or equal than the current best finalized number; \ + current best finalized number must exist in chain; qed.") + .hash(); + + NewAuthoritySet { + canon_number, + canon_hash, + set_id, + authorities: new_authorities.to_vec(), + } + }; + let old = ::std::mem::replace(guard.as_mut(), new_set); + guard.set_old(old); + + AppliedChanges::Forced(new_authorities) + } else { + let did_standard = guard.as_mut().enacts_standard_change(hash, number, &is_descendent_of) + .map_err(|e| ConsensusErrorKind::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if did_standard { + AppliedChanges::Standard + } else { + AppliedChanges::None + } + } + }; + + // consume the guard safely and write necessary changes. + let just_in_case = guard.consume(); + if let Some((_, ref authorities)) = just_in_case { + let authorities_change = match applied_changes { + AppliedChanges::Forced(ref new) => Some(new), + AppliedChanges::Standard => None, // the change isn't actually applied yet. + AppliedChanges::None => None, + }; + + crate::aux_schema::update_authority_set( + authorities, + authorities_change, + |insert| block.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); + } + + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) + } +} + impl, RA, PRA> BlockImport for GrandpaBlockImport where NumberFor: grandpa::BlockNumberOps, @@ -125,12 +389,8 @@ impl, RA, PRA> BlockImport fn import_block(&self, mut block: ImportBlock, new_authorities: Option>) -> Result { - use crate::authorities::PendingChange; - use client::blockchain::HeaderBackend; - let hash = block.post_header().hash(); - let parent_hash = *block.header.parent_hash(); - let number = *block.header.number(); + let number = block.header.number().clone(); // early exit if block already in chain, otherwise the check for // authority changes will error when trying to re-import a change block @@ -140,82 +400,68 @@ impl, RA, PRA> BlockImport Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), } - let maybe_change = self.api.runtime_api().grandpa_pending_change( - &BlockId::hash(parent_hash), - &block.header.digest().clone(), - ); - - let maybe_change = match maybe_change { - Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), - Ok(maybe) => maybe, - }; - - // when we update the authorities, we need to hold the lock - // until the block is written to prevent a race if we need to restore - // the old authority set on error. - let is_descendent_of = is_descendent_of(&self.inner, Some((&hash, &parent_hash))); - let just_in_case = if let Some(change) = maybe_change { - let mut authorities = self.authority_set.inner().write(); - let old_set = authorities.clone(); - - authorities.add_pending_change( - PendingChange { - next_authorities: change.next_authorities, - finalization_depth: change.delay, - canon_height: number, - canon_hash: hash, - }, - &is_descendent_of, - ).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; - - block.auxiliary.push((AUTHORITY_SET_KEY.to_vec(), Some(authorities.encode()))); - Some((old_set, authorities)) - } else { - None - }; + let pending_changes = self.make_authorities_changes(&mut block, hash)?; // we don't want to finalize on `inner.import_block` let justification = block.justification.take(); let enacts_consensus_change = new_authorities.is_some(); let import_result = self.inner.import_block(block, new_authorities); - let import_result = { - // we scope this so that `just_in_case` is dropped eagerly and releases the authorities lock - let revert_authorities = || if let Some((old_set, mut authorities)) = just_in_case { - *authorities = old_set; - }; - + let mut imported_aux = { match import_result { - Ok(ImportResult::Queued) => ImportResult::Queued, + Ok(ImportResult::Imported(aux)) => aux, Ok(r) => { debug!(target: "afg", "Restoring old authority set after block import result: {:?}", r); - revert_authorities(); + pending_changes.revert(); return Ok(r); }, Err(e) => { debug!(target: "afg", "Restoring old authority set after block import error: {:?}", e); - revert_authorities(); + pending_changes.revert(); return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); }, } }; - let enacts_change = self.authority_set.inner().read().enacts_change( - hash, - number, - &is_descendent_of, - ).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; + let (applied_changes, do_pause) = pending_changes.defuse(); + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + if do_pause { + let _ = self.send_voter_commands.unbounded_send( + VoterCommand::Pause(format!("Forced change scheduled after inactivity")) + ); + } - if !enacts_change && !enacts_consensus_change { - return Ok(import_result); + let needs_justification = applied_changes.needs_justification(); + if let AppliedChanges::Forced(new) = applied_changes { + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + let _ = self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); + + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + imported_aux.clear_justification_requests = true; + } + + if !needs_justification && !enacts_consensus_change { + return Ok(ImportResult::Imported(imported_aux)); } match justification { Some(justification) => { - self.import_justification(hash, number, justification, enacts_change)?; + self.import_justification(hash, number, justification, needs_justification)?; }, None => { - if enacts_change { + if needs_justification { trace!( target: "finality", "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", @@ -229,11 +475,11 @@ impl, RA, PRA> BlockImport self.consensus_changes.lock().note_change((number, hash)); } - return Ok(ImportResult::NeedsJustification); + imported_aux.needs_justification = true; } } - Ok(import_result) + Ok(ImportResult::Imported(imported_aux)) } fn check_block( @@ -249,22 +495,22 @@ impl, RA, PRA> GrandpaBlockImport>, authority_set: SharedAuthoritySet>, - authority_set_change: mpsc::UnboundedSender>>, + send_voter_commands: mpsc::UnboundedSender>>, consensus_changes: SharedConsensusChanges>, api: Arc, ) -> GrandpaBlockImport { GrandpaBlockImport { inner, authority_set, - authority_set_change, + send_voter_commands, consensus_changes, api, } } } -impl, RA, PRA> - GrandpaBlockImport where +impl, RA, PRA> GrandpaBlockImport + where NumberFor: grandpa::BlockNumberOps, B: Backend + 'static, E: CallExecutor + 'static + Clone + Send + Sync, @@ -304,13 +550,15 @@ impl, RA, PRA> ); match result { - Err(ExitOrError::AuthoritiesChanged(new)) => { - info!(target: "finality", "Imported justification for block #{} that enacts authority set change, signalling voter.", number); - if let Err(e) = self.authority_set_change.unbounded_send(new) { + Err(CommandOrError::VoterCommand(command)) => { + info!(target: "finality", "Imported justification for block #{} that triggers \ + command {}, signalling voter.", number, command); + + if let Err(e) = self.send_voter_commands.unbounded_send(command) { return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); } }, - Err(ExitOrError::Error(e)) => { + Err(CommandOrError::Error(e)) => { return Err(match e { Error::Grandpa(error) => ConsensusErrorKind::ClientImport(error.to_string()), Error::Network(error) => ConsensusErrorKind::ClientImport(error), diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index 806fc6cfaafb2..a6b3df13fc565 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -66,21 +66,27 @@ use runtime_primitives::traits::{ DigestItemFor, DigestItem, }; use fg_primitives::GrandpaApi; +use inherents::InherentDataProviders; use runtime_primitives::generic::BlockId; use substrate_primitives::{ed25519, H256, Ed25519AuthorityId, Blake2Hasher}; use substrate_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_WARN}; +use srml_finality_tracker; + use grandpa::Error as GrandpaError; use grandpa::{voter, round::State as RoundState, BlockNumberOps, VoterSet}; use network::Service as NetworkService; use network::consensus_gossip as network_gossip; + +use std::fmt; use std::sync::Arc; use std::time::Duration; pub use fg_primitives::ScheduledChange; mod authorities; +mod aux_schema; mod communication; mod consensus_changes; mod environment; @@ -94,9 +100,8 @@ mod service_integration; #[cfg(feature="service-integration")] pub use service_integration::{LinkHalfForService, BlockImportForService}; -use authorities::SharedAuthoritySet; -use consensus_changes::{ConsensusChanges, SharedConsensusChanges}; -use environment::{Environment, ExitOrError, NewAuthoritySet}; +use aux_schema::{PersistentData, VoterSetState}; +use environment::Environment; pub use finality_proof::{prove_finality, check_finality_proof}; use import::GrandpaBlockImport; use until_imported::UntilCommitBlocksImported; @@ -104,17 +109,9 @@ use until_imported::UntilCommitBlocksImported; #[cfg(test)] mod tests; -const LAST_COMPLETED_KEY: &[u8] = b"grandpa_completed_round"; -const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; - const GRANDPA_ENGINE_ID: network::ConsensusEngineId = [b'a', b'f', b'g', b'1']; - const MESSAGE_ROUND_TOLERANCE: u64 = 2; -/// round-number, round-state -type LastCompleted = (u64, RoundState); - /// A GRANDPA message for a substrate chain. pub type Message = grandpa::Message<::Hash, NumberFor>; /// A signed message. @@ -557,13 +554,81 @@ impl, RA> BlockStatus for Arc { + pub(crate) canon_number: N, + pub(crate) canon_hash: H, + pub(crate) set_id: u64, + pub(crate) authorities: Vec<(Ed25519AuthorityId, u64)>, +} + +/// Commands issued to the voter. +#[derive(Debug)] +pub(crate) enum VoterCommand { + /// Pause the voter for given reason. + Pause(String), + /// New authorities. + ChangeAuthorities(NewAuthoritySet) +} + +impl fmt::Display for VoterCommand { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), + VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), + } + } +} + +/// Signals either an early exit of a voter or an error. +#[derive(Debug)] +pub(crate) enum CommandOrError { + /// An error occurred. + Error(Error), + /// A command to the voter. + VoterCommand(VoterCommand), +} + +impl From for CommandOrError { + fn from(e: Error) -> Self { + CommandOrError::Error(e) + } +} + +impl From for CommandOrError { + fn from(e: ClientError) -> Self { + CommandOrError::Error(Error::Client(e)) + } +} + +impl From for CommandOrError { + fn from(e: grandpa::Error) -> Self { + CommandOrError::Error(Error::from(e)) + } +} + +impl From> for CommandOrError { + fn from(e: VoterCommand) -> Self { + CommandOrError::VoterCommand(e) + } +} + +impl ::std::error::Error for CommandOrError { } + +impl fmt::Display for CommandOrError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CommandOrError::Error(ref e) => write!(f, "{:?}", e), + CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), + } + } +} + pub struct LinkHalf, RA> { client: Arc>, - authority_set: SharedAuthoritySet>, - authority_set_change: mpsc::UnboundedReceiver>>, - consensus_changes: SharedConsensusChanges>, + persistent_data: PersistentData>, + voter_commands_rx: mpsc::UnboundedReceiver>>, } /// Make block importer and link half necessary to tie the background voter @@ -577,60 +642,41 @@ pub fn block_import, RA, PRA>( E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, PRA: ProvideRuntimeApi, - PRA::Api: GrandpaApi + PRA::Api: GrandpaApi, { use runtime_primitives::traits::Zero; - let authority_set = match Backend::get_aux(&**client.backend(), AUTHORITY_SET_KEY)? { - None => { - info!(target: "afg", "Loading GRANDPA authorities \ - from genesis on what appears to be first startup."); - - // no authority set on disk: fetch authorities from genesis state. - // if genesis state is not available, we may be a light client, but these - // are unsupported for following GRANDPA directly. + + let chain_info = client.info()?; + let genesis_hash = chain_info.chain.genesis_hash; + + let persistent_data = aux_schema::load_persistent( + &**client.backend(), + genesis_hash, + >::zero(), + || { let genesis_authorities = api.runtime_api() .grandpa_authorities(&BlockId::number(Zero::zero()))?; telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; "authorities_len" => ?genesis_authorities.len() ); - - let authority_set = SharedAuthoritySet::genesis(genesis_authorities); - let encoded = authority_set.inner().read().encode(); - Backend::insert_aux(&**client.backend(), &[(AUTHORITY_SET_KEY, &encoded[..])], &[])?; - - authority_set + Ok(genesis_authorities) } - Some(raw) => crate::authorities::AuthoritySet::decode(&mut &raw[..]) - .ok_or_else(|| ::client::error::ErrorKind::Backend( - format!("GRANDPA authority set kept in invalid format") - ))? - .into(), - }; - - let consensus_changes = Backend::get_aux(&**client.backend(), CONSENSUS_CHANGES_KEY)?; - let consensus_changes = Arc::new(parking_lot::Mutex::new(match consensus_changes { - Some(raw) => ConsensusChanges::decode(&mut &raw[..]) - .ok_or_else(|| ::client::error::ErrorKind::Backend( - format!("GRANDPA consensus changes kept in invalid format") - ))?, - None => ConsensusChanges::empty(), - })); - - let (authority_set_change_tx, authority_set_change_rx) = mpsc::unbounded(); + )?; + + let (voter_commands_tx, voter_commands_rx) = mpsc::unbounded(); Ok(( GrandpaBlockImport::new( client.clone(), - authority_set.clone(), - authority_set_change_tx, - consensus_changes.clone(), + persistent_data.authority_set.clone(), + voter_commands_tx, + persistent_data.consensus_changes.clone(), api, ), LinkHalf { client, - authority_set, - authority_set_change: authority_set_change_rx, - consensus_changes, + persistent_data, + voter_commands_rx, }, )) } @@ -644,11 +690,11 @@ fn committer_communication, B, E, N, RA>( ) -> ( impl Stream< Item = (u64, ::grandpa::CompactCommit, ed25519::Signature, Ed25519AuthorityId>), - Error = ExitOrError>, + Error = CommandOrError>, >, impl Sink< SinkItem = (u64, ::grandpa::Commit, ed25519::Signature, Ed25519AuthorityId>), - SinkError = ExitOrError>, + SinkError = CommandOrError>, >, ) where B: Backend, @@ -687,12 +733,37 @@ fn committer_communication, B, E, N, RA>( (commit_in, commit_out) } +/// Register the finality tracker inherent data provider (which is used by +/// GRANDPA), if not registered already. +fn register_finality_tracker_inherent_data_provider, RA>( + client: Arc>, + inherent_data_providers: &InherentDataProviders, +) -> Result<(), consensus_common::Error> where + B: Backend + 'static, + E: CallExecutor + Send + Sync + 'static, + RA: Send + Sync + 'static, +{ + if !inherent_data_providers.has_provider(&srml_finality_tracker::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(srml_finality_tracker::InherentDataProvider::new(move || { + match client.backend().blockchain().info() { + Err(e) => Err(std::borrow::Cow::Owned(e.to_string())), + Ok(info) => Ok(info.finalized_number), + } + })) + .map_err(|err| consensus_common::ErrorKind::InherentData(err.into()).into()) + } else { + Ok(()) + } +} + /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. pub fn run_grandpa, N, RA>( config: Config, link: LinkHalf, network: N, + inherent_data_providers: InherentDataProviders, on_exit: impl Future + Send + 'static, ) -> ::client::error::Result + Send + 'static> where Block::Hash: Ord, @@ -706,29 +777,18 @@ pub fn run_grandpa, N, RA>( RA: Send + Sync + 'static, { use futures::future::{self, Loop as FutureLoop}; - use runtime_primitives::traits::Zero; let LinkHalf { client, - authority_set, - authority_set_change, - consensus_changes, + persistent_data, + voter_commands_rx, } = link; - - let chain_info = client.info()?; - let genesis_hash = chain_info.chain.genesis_hash; - // we shadow network with the wrapping/rebroadcasting network to avoid // accidental reuse. let (broadcast_worker, network) = communication::rebroadcasting_network(network); + let PersistentData { authority_set, set_state, consensus_changes } = persistent_data; - let (last_round_number, last_state) = match Backend::get_aux(&**client.backend(), LAST_COMPLETED_KEY)? { - None => (0, RoundState::genesis((genesis_hash, >::zero()))), - Some(raw) => LastCompleted::decode(&mut &raw[..]) - .ok_or_else(|| ::client::error::ErrorKind::Backend( - format!("Last GRANDPA round state kept in invalid format") - ))? - }; + register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; let voters = authority_set.current_authorities(); @@ -740,95 +800,131 @@ pub fn run_grandpa, N, RA>( set_id: authority_set.set_id(), authority_set: authority_set.clone(), consensus_changes: consensus_changes.clone(), + last_completed: environment::LastCompletedRound::new(set_state.round()), }); - let initial_state = (initial_environment, last_round_number, last_state, authority_set_change.into_future()); + let initial_state = (initial_environment, set_state, voter_commands_rx.into_future()); let voter_work = future::loop_fn(initial_state, move |params| { - let (env, last_round_number, last_state, authority_set_change) = params; + let (env, set_state, voter_commands_rx) = params; debug!(target: "afg", "{}: Starting new voter with set ID {}", config.name(), env.set_id); telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; "name" => ?config.name(), "set_id" => ?env.set_id ); - let chain_info = match client.info() { - Ok(i) => i, - Err(e) => return future::Either::B(future::err(Error::Client(e))), - }; + let mut maybe_voter = match set_state.clone() { + VoterSetState::Live(last_round_number, last_round_state) => { + let chain_info = match client.info() { + Ok(i) => i, + Err(e) => return future::Either::B(future::err(Error::Client(e))), + }; - let last_finalized = ( - chain_info.chain.finalized_hash, - chain_info.chain.finalized_number, - ); + let last_finalized = ( + chain_info.chain.finalized_hash, + chain_info.chain.finalized_number, + ); - let committer_data = committer_communication( - config.local_key.clone(), - env.set_id, - &env.voters, - &client, - &network, - ); + let committer_data = committer_communication( + config.local_key.clone(), + env.set_id, + &env.voters, + &client, + &network, + ); - let voters = (*env.voters).clone(); + let voters = (*env.voters).clone(); + + Some(voter::Voter::new( + env.clone(), + voters, + committer_data, + last_round_number, + last_round_state, + last_finalized, + )) + } + VoterSetState::Paused(_, _) => None, + }; + + // needs to be combined with another future otherwise it can deadlock. + let poll_voter = future::poll_fn(move || match maybe_voter { + Some(ref mut voter) => voter.poll(), + None => Ok(Async::NotReady), + }); - let voter = voter::Voter::new( - env, - voters, - committer_data, - last_round_number, - last_state, - last_finalized, - ); let client = client.clone(); let config = config.clone(); let network = network.clone(); let authority_set = authority_set.clone(); let consensus_changes = consensus_changes.clone(); - let trigger_authority_set_change = |new: NewAuthoritySet<_, _>, authority_set_change| { - let env = Arc::new(Environment { - inner: client, - config, - voters: Arc::new(new.authorities.into_iter().collect()), - set_id: new.set_id, - network, - authority_set, - consensus_changes, - }); - - // start the new authority set using the block where the - // set changed (not where the signal happened!) as the base. - Ok(FutureLoop::Continue(( - env, - 0, // always start at round 0 when changing sets. - RoundState::genesis((new.canon_hash, new.canon_number)), - authority_set_change, - ))) + let handle_voter_command = move |command: VoterCommand<_, _>, voter_commands_rx| { + match command { + VoterCommand::ChangeAuthorities(new) => { + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let genesis_state = RoundState::genesis((new.canon_hash, new.canon_number)); + let env = Arc::new(Environment { + inner: client, + config, + voters: Arc::new(new.authorities.into_iter().collect()), + set_id: new.set_id, + network, + authority_set, + consensus_changes, + last_completed: environment::LastCompletedRound::new( + (0, genesis_state.clone()) + ), + }); + + + let set_state = VoterSetState::Live( + 0, // always start at round 0 when changing sets. + genesis_state, + ); + + Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) + } + VoterCommand::Pause(reason) => { + info!(target: "afg", "Pausing old validator set: {}", reason); + + // not racing because old voter is shut down. + let (last_round_number, last_round_state) = env.last_completed.read(); + let set_state = VoterSetState::Paused( + last_round_number, + last_round_state, + ); + + aux_schema::write_voter_set_state(&**client.backend(), &set_state)?; + + Ok(FutureLoop::Continue((env, set_state, voter_commands_rx))) + }, + } }; - future::Either::A(voter.select2(authority_set_change).then(move |res| match res { + future::Either::A(poll_voter.select2(voter_commands_rx).then(move |res| match res { Ok(future::Either::A(((), _))) => { // voters don't conclude naturally; this could reasonably be an error. Ok(FutureLoop::Break(())) }, Err(future::Either::B(_)) => { - // the `authority_set_change` stream should not fail. + // the `voter_commands_rx` stream should not fail. Ok(FutureLoop::Break(())) }, Ok(future::Either::B(((None, _), _))) => { - // the `authority_set_change` stream should never conclude since it's never closed. + // the `voter_commands_rx` stream should never conclude since it's never closed. Ok(FutureLoop::Break(())) }, - Err(future::Either::A((ExitOrError::Error(e), _))) => { + Err(future::Either::A((CommandOrError::Error(e), _))) => { // return inner voter error Err(e) } - Ok(future::Either::B(((Some(new), authority_set_change), _))) => { - // authority set change triggered externally through the channel - trigger_authority_set_change(new, authority_set_change.into_future()) + Ok(future::Either::B(((Some(command), voter_commands_rx), _))) => { + // some command issued externally. + handle_voter_command(command, voter_commands_rx.into_future()) } - Err(future::Either::A((ExitOrError::AuthoritiesChanged(new), authority_set_change))) => { - // authority set change triggered internally by finalizing a change block - trigger_authority_set_change(new, authority_set_change) + Err(future::Either::A((CommandOrError::VoterCommand(command), voter_commands_rx))) => { + // some command issued internally. + handle_voter_command(command, voter_commands_rx) }, })) }); diff --git a/core/finality-grandpa/src/tests.rs b/core/finality-grandpa/src/tests.rs index 4821686f38bea..7fba9b654cbdd 100644 --- a/core/finality-grandpa/src/tests.rs +++ b/core/finality-grandpa/src/tests.rs @@ -29,8 +29,7 @@ use client::{ runtime_api::{Core, RuntimeVersion, ApiExt}, }; use test_client::{self, runtime::BlockNumber}; -use parity_codec::Decode; -use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportBlock, ImportResult}; +use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult}; use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport}; use std::collections::{HashMap, HashSet}; use std::result; @@ -40,6 +39,7 @@ use runtime_primitives::ExecutionContext; use substrate_primitives::NativeOrEncoded; use authorities::AuthoritySet; +use consensus_changes::ConsensusChanges; type PeerData = Mutex< @@ -240,6 +240,7 @@ impl Network for MessageRouting { struct TestApi { genesis_authorities: Vec<(Ed25519AuthorityId, u64)>, scheduled_changes: Arc>>>, + forced_changes: Arc)>>>, } impl TestApi { @@ -247,6 +248,7 @@ impl TestApi { TestApi { genesis_authorities, scheduled_changes: Arc::new(Mutex::new(HashMap::new())), + forced_changes: Arc::new(Mutex::new(HashMap::new())), } } } @@ -349,6 +351,24 @@ impl GrandpaApi for RuntimeApi { // extrinsics. Ok(self.inner.scheduled_changes.lock().get(&parent_hash).map(|c| c.clone())).map(NativeOrEncoded::Native) } + + fn grandpa_forced_change_runtime_api_impl( + &self, + at: &BlockId, + _: ExecutionContext, + _: Option<(&DigestFor)>, + _: Vec, + ) + -> Result, ScheduledChange>)>>> { + let parent_hash = match at { + &BlockId::Hash(at) => at, + _ => panic!("not requested by block hash!!"), + }; + + // we take only scheduled changes at given block number where there are no + // extrinsics. + Ok(self.inner.forced_changes.lock().get(&parent_hash).map(|c| c.clone())).map(NativeOrEncoded::Native) + } } const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); @@ -361,7 +381,14 @@ fn make_ids(keys: &[Keyring]) -> Vec<(Ed25519AuthorityId, u64)> { .collect() } -fn run_to_completion(blocks: u64, net: Arc>, peers: &[Keyring]) -> u64 { +// run the voters to completion. provide a closure to be invoked after +// the voters are spawned but before blocking on them. +fn run_to_completion_with( + blocks: u64, + net: Arc>, + peers: &[Keyring], + before_waiting: F, +) -> u64 { use parking_lot::RwLock; let mut finality_notifications = Vec::new(); @@ -402,6 +429,7 @@ fn run_to_completion(blocks: u64, net: Arc>, peers: &[Keyr }, link, MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), futures::empty(), ).expect("all in order with client and network"); @@ -425,6 +453,8 @@ fn run_to_completion(blocks: u64, net: Arc>, peers: &[Keyr .map(|_| ()) .map_err(|_| ()); + (before_waiting)(); + runtime.block_on(wait_for.select(drive_to_completion).map_err(|_| ())).unwrap(); let highest_finalized = *highest_finalized.read(); @@ -432,8 +462,13 @@ fn run_to_completion(blocks: u64, net: Arc>, peers: &[Keyr highest_finalized } +fn run_to_completion(blocks: u64, net: Arc>, peers: &[Keyring]) -> u64 { + run_to_completion_with(blocks, net, peers, || {}) +} + #[test] fn finalize_3_voters_no_observers() { + let _ = env_logger::try_init(); let peers = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; let voters = make_ids(peers); @@ -495,6 +530,7 @@ fn finalize_3_voters_1_observer() { }, link, MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), futures::empty(), ).expect("all in order with client and network"); @@ -552,8 +588,9 @@ fn transition_3_voters_twice_1_observer() { assert_eq!(peer.client().info().unwrap().chain.best_number, 1, "Peer #{} failed to sync", i); - let set_raw = peer.client().backend().get_aux(crate::AUTHORITY_SET_KEY).unwrap().unwrap(); - let set = AuthoritySet::::decode(&mut &set_raw[..]).unwrap(); + let set: AuthoritySet = crate::aux_schema::load_authorities( + &**peer.client().backend() + ).unwrap(); assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -638,8 +675,9 @@ fn transition_3_voters_twice_1_observer() { .take_while(|n| Ok(n.header.number() < &30)) .for_each(move |_| Ok(())) .map(move |()| { - let set_raw = client.backend().get_aux(crate::AUTHORITY_SET_KEY).unwrap().unwrap(); - let set = AuthoritySet::::decode(&mut &set_raw[..]).unwrap(); + let set: AuthoritySet = crate::aux_schema::load_authorities( + &**client.backend() + ).unwrap(); assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -654,6 +692,7 @@ fn transition_3_voters_twice_1_observer() { }, link, MessageRouting::new(net.clone(), peer_id), + InherentDataProviders::new(), futures::empty(), ).expect("all in order with client and network"); @@ -784,7 +823,7 @@ fn sync_justifications_on_change_blocks() { #[test] fn finalizes_multiple_pending_changes_in_order() { - env_logger::init(); + let _ = env_logger::try_init(); let peers_a = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; let peers_b = &[Keyring::Dave, Keyring::Eve, Keyring::Ferdie]; @@ -865,6 +904,60 @@ fn doesnt_vote_on_the_tip_of_the_chain() { assert_eq!(highest, 75); } +#[test] +fn force_change_to_new_set() { + // two of these guys are offline. + let genesis_authorities = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie, Keyring::One, Keyring::Two]; + let peers_a = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + let api = TestApi::new(make_ids(genesis_authorities)); + + let voters = make_ids(peers_a); + let normal_transitions = api.scheduled_changes.clone(); + let forced_transitions = api.forced_changes.clone(); + let net = GrandpaTestNet::new(api, 3); + let net = Arc::new(Mutex::new(net)); + + let runner_net = net.clone(); + let add_blocks = move || { + net.lock().peer(0).push_blocks(1, false); + + { + // add a forced transition at block 12. + let parent_hash = net.lock().peer(0).client().info().unwrap().chain.best_hash; + forced_transitions.lock().insert(parent_hash, (0, ScheduledChange { + next_authorities: voters.clone(), + delay: 10, + })); + + // add a normal transition too to ensure that forced changes take priority. + normal_transitions.lock().insert(parent_hash, ScheduledChange { + next_authorities: make_ids(genesis_authorities), + delay: 5, + }); + } + + net.lock().peer(0).push_blocks(25, false); + net.lock().sync(); + + for (i, peer) in net.lock().peers().iter().enumerate() { + assert_eq!(peer.client().info().unwrap().chain.best_number, 26, + "Peer #{} failed to sync", i); + + let set: AuthoritySet = crate::aux_schema::load_authorities( + &**peer.client().backend() + ).unwrap(); + + assert_eq!(set.current(), (1, voters.as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + }; + + // it will only finalize if the forced transition happens. + // we add_blocks after the voters are spawned because otherwise + // the link-halfs have the wrong AuthoritySet + run_to_completion_with(25, runner_net, peers_a, add_blocks); +} + #[test] fn allows_reimporting_change_blocks() { let peers_a = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; @@ -899,7 +992,7 @@ fn allows_reimporting_change_blocks() { assert_eq!( block_import.import_block(block(), None).unwrap(), - ImportResult::NeedsJustification + ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false }), ); assert_eq!( diff --git a/core/network/src/protocol.rs b/core/network/src/protocol.rs index 237e06751e302..7c6e973c69919 100644 --- a/core/network/src/protocol.rs +++ b/core/network/src/protocol.rs @@ -223,6 +223,8 @@ pub enum ProtocolMsg> { PropagateExtrinsics, /// Tell protocol that a block was imported (sent by the import-queue). BlockImportedSync(B::Hash, NumberFor), + /// Tell protocol to clear all pending justification requests. + ClearJustificationRequests, /// Tell protocol to request justification for a block. RequestJustification(B::Hash, NumberFor), /// Inform protocol whether a justification was successfully imported. @@ -394,6 +396,7 @@ impl, H: ExHashT> Protocol { } ProtocolMsg::AnnounceBlock(hash) => self.announce_block(hash), ProtocolMsg::BlockImportedSync(hash, number) => self.sync.block_imported(&hash, number), + ProtocolMsg::ClearJustificationRequests => self.sync.clear_justification_requests(), ProtocolMsg::RequestJustification(hash, number) => { let mut context = ProtocolContext::new(&mut self.context_data, &self.network_chan); diff --git a/core/network/src/service.rs b/core/network/src/service.rs index 993af60129cb9..194ea462ab311 100644 --- a/core/network/src/service.rs +++ b/core/network/src/service.rs @@ -101,6 +101,10 @@ impl> Link for NetworkLink { } } + fn clear_justification_requests(&self) { + let _ = self.protocol_sender.send(ProtocolMsg::ClearJustificationRequests); + } + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self.protocol_sender.send(ProtocolMsg::RequestJustification(hash.clone(), number)); } diff --git a/core/network/src/sync.rs b/core/network/src/sync.rs index 6ba6617d7af1c..abc215a597bb4 100644 --- a/core/network/src/sync.rs +++ b/core/network/src/sync.rs @@ -293,6 +293,14 @@ impl PendingJustifications { Ok(()) } + + /// Clear all data. + fn clear(&mut self) { + self.justifications = ForkTree::new(); + self.pending_requests.clear(); + self.peer_requests.clear(); + self.previous_requests.clear(); + } } /// Relay chain sync strategy. @@ -691,6 +699,11 @@ impl ChainSync { self.justifications.dispatch(&mut self.peers, protocol); } + /// Clears all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.justifications.clear(); + } + pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { self.justifications.justification_import_result(hash, number, success); } diff --git a/core/network/src/test/block_import.rs b/core/network/src/test/block_import.rs index aca789ce2b860..c8999419aa936 100644 --- a/core/network/src/test/block_import.rs +++ b/core/network/src/test/block_import.rs @@ -48,7 +48,7 @@ fn import_single_good_block_works() { let (_, _hash, number, block) = prepare_good_block(); assert_eq!( import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))), - Ok(BlockImportResult::ImportedUnknown(number)) + Ok(BlockImportResult::ImportedUnknown(number, Default::default())) ); } diff --git a/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm b/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm index f4a22553c152b..13b55df5d1434 100644 Binary files a/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm and b/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm differ diff --git a/core/util/fork-tree/src/lib.rs b/core/util/fork-tree/src/lib.rs index caed51adf67b3..71c83f57ca487 100644 --- a/core/util/fork-tree/src/lib.rs +++ b/core/util/fork-tree/src/lib.rs @@ -79,7 +79,7 @@ pub enum FinalizationResult { /// in order. Each node is uniquely identified by its hash but can be ordered by /// its number. In order to build the tree an external function must be provided /// when interacting with the tree to establish a node's ancestry. -#[derive(Clone, Debug, Decode, Encode)] +#[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct ForkTree { roots: Vec>, best_finalized_number: Option, @@ -353,8 +353,7 @@ impl ForkTree where mod node_implementation { use super::*; - #[derive(Clone, Debug, Decode, Encode)] - #[cfg_attr(test, derive(PartialEq))] + #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct Node { pub hash: H, pub number: N, diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index ff241a4c98a33..9b1139f838e1d 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -112,6 +112,7 @@ construct_service_factory! { }, link_half, grandpa::NetworkBridge::new(service.network()), + service.config.custom.inherent_data_providers.clone(), service.on_exit(), )?); diff --git a/node/runtime/Cargo.toml b/node/runtime/Cargo.toml index ac220ab7a8cfe..d009a544d5946 100644 --- a/node/runtime/Cargo.toml +++ b/node/runtime/Cargo.toml @@ -21,6 +21,7 @@ contract = { package = "srml-contract", path = "../../srml/contract", default-fe council = { package = "srml-council", path = "../../srml/council", default-features = false } democracy = { package = "srml-democracy", path = "../../srml/democracy", default-features = false } executive = { package = "srml-executive", path = "../../srml/executive", default-features = false } +finality-tracker = { package = "srml-finality-tracker", path = "../../srml/finality-tracker", default-features = false } grandpa = { package = "srml-grandpa", path = "../../srml/grandpa", default-features = false } indices = { package = "srml-indices", path = "../../srml/indices", default-features = false } session = { package = "srml-session", path = "../../srml/session", default-features = false } diff --git a/node/runtime/src/lib.rs b/node/runtime/src/lib.rs index ecbdbf691e8f5..d9089f6b0ad5b 100644 --- a/node/runtime/src/lib.rs +++ b/node/runtime/src/lib.rs @@ -60,8 +60,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node"), impl_name: create_runtime_str!("substrate-node"), authoring_version: 10, - spec_version: 32, - impl_version: 32, + spec_version: 33, + impl_version: 33, apis: RUNTIME_API_VERSIONS, }; @@ -192,6 +192,10 @@ impl grandpa::Trait for Runtime { type Event = Event; } +impl finality_tracker::Trait for Runtime { + type OnFinalizationStalled = grandpa::SyncedAuthorities; +} + construct_runtime!( pub enum Runtime with Log(InternalLog: DigestItem) where Block = Block, @@ -211,6 +215,7 @@ construct_runtime!( CouncilVoting: council_voting, CouncilMotions: council_motions::{Module, Call, Storage, Event, Origin}, CouncilSeats: council_seats::{Config}, + FinalityTracker: finality_tracker::{Module, Call, Inherent}, Grandpa: grandpa::{Module, Call, Storage, Config, Log(), Event}, Treasury: treasury, Contract: contract::{Module, Call, Storage, Config, Event}, @@ -295,7 +300,7 @@ impl_runtime_apis! { { for log in digest.logs.iter().filter_map(|l| match l { Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), - _=> None + _ => None }) { if let Some(change) = Grandpa::scrape_digest_change(log) { return Some(change); @@ -304,6 +309,20 @@ impl_runtime_apis! { None } + fn grandpa_forced_change(digest: &DigestFor) + -> Option<(NumberFor, ScheduledChange>)> + { + for log in digest.logs.iter().filter_map(|l| match l { + Log(InternalLog::grandpa(grandpa_signal)) => Some(grandpa_signal), + _ => None + }) { + if let Some(change) = Grandpa::scrape_digest_forced_change(log) { + return Some(change); + } + } + None + } + fn grandpa_authorities() -> Vec<(SessionKey, u64)> { Grandpa::grandpa_authorities() } diff --git a/node/runtime/wasm/Cargo.lock b/node/runtime/wasm/Cargo.lock index c4b1e60dc3705..5bb473efb1840 100644 --- a/node/runtime/wasm/Cargo.lock +++ b/node/runtime/wasm/Cargo.lock @@ -660,6 +660,7 @@ dependencies = [ "srml-democracy 0.1.0", "srml-executive 0.1.0", "srml-fees 0.1.0", + "srml-finality-tracker 0.1.0", "srml-grandpa 0.1.0", "srml-indices 0.1.0", "srml-session 0.1.0", @@ -1424,6 +1425,23 @@ dependencies = [ "substrate-primitives 0.1.0", ] +[[package]] +name = "srml-finality-tracker" +version = "0.1.0" +dependencies = [ + "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec-derive 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 0.1.0", + "sr-std 0.1.0", + "srml-session 0.1.0", + "srml-support 0.1.0", + "srml-system 0.1.0", + "substrate-inherents 0.1.0", +] + [[package]] name = "srml-grandpa" version = "0.1.0" @@ -1433,6 +1451,7 @@ dependencies = [ "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 0.1.0", "sr-std 0.1.0", + "srml-finality-tracker 0.1.0", "srml-session 0.1.0", "srml-support 0.1.0", "srml-system 0.1.0", diff --git a/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm b/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm index b86fbdea66f2b..7c6e43a51622b 100644 Binary files a/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm and b/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm differ diff --git a/srml/finality-tracker/Cargo.toml b/srml/finality-tracker/Cargo.toml new file mode 100644 index 0000000000000..246d48d01c732 --- /dev/null +++ b/srml/finality-tracker/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "srml-finality-tracker" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +hex-literal = "0.1.0" +serde = { version = "1.0", default-features = false } +serde_derive = { version = "1.0", optional = true } +parity-codec = { version = "3.0", default-features = false } +parity-codec-derive = { version = "3.0", default-features = false } +substrate-inherents = { path = "../../core/inherents", default-features = false } +sr-std = { path = "../../core/sr-std", default-features = false } +sr-primitives = { path = "../../core/sr-primitives", default-features = false } +srml-support = { path = "../support", default-features = false } +srml-system = { path = "../system", default-features = false } +srml-session = { path = "../session", default-features = false } + +[dev-dependencies] +substrate-primitives = { path = "../../core/primitives", default-features = false } +sr-io = { path = "../../core/sr-io", default-features = false } +lazy_static = "1.0" +parking_lot = "0.7" + +[features] +default = ["std"] +std = [ + "serde/std", + "serde_derive", + "parity-codec/std", + "sr-std/std", + "srml-support/std", + "sr-primitives/std", + "srml-system/std", + "srml-session/std", + "substrate-inherents/std", +] diff --git a/srml/finality-tracker/src/lib.rs b/srml/finality-tracker/src/lib.rs new file mode 100644 index 0000000000000..43e95912cfb29 --- /dev/null +++ b/srml/finality-tracker/src/lib.rs @@ -0,0 +1,385 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! SRML module that tracks the last finalized block, as perceived by block authors. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[macro_use] +extern crate srml_support; + +use substrate_inherents::{ + RuntimeString, InherentIdentifier, ProvideInherent, + InherentData, MakeFatalError, +}; +use srml_support::StorageValue; +use sr_primitives::traits::{As, One, Zero}; +use sr_std::{prelude::*, result, cmp, vec}; +use parity_codec::Decode; +use srml_system::{ensure_inherent, Trait as SystemTrait}; + +#[cfg(feature = "std")] +use parity_codec::Encode; + +const DEFAULT_WINDOW_SIZE: u64 = 101; +const DEFAULT_DELAY: u64 = 1000; + +/// The identifier for the `finalnum` inherent. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"finalnum"; + +/// Auxiliary trait to extract finalized inherent data. +pub trait FinalizedInherentData { + /// Get finalized inherent data. + fn finalized_number(&self) -> Result; +} + +impl FinalizedInherentData for InherentData { + fn finalized_number(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Finalized number inherent data not found".into())) + } +} + +/// Provider for inherent data. +#[cfg(feature = "std")] +pub struct InherentDataProvider { + inner: F, + _marker: std::marker::PhantomData, +} + +#[cfg(feature = "std")] +impl InherentDataProvider { + pub fn new(final_oracle: F) -> Self { + InherentDataProvider { inner: final_oracle, _marker: Default::default() } + } +} + +#[cfg(feature = "std")] +impl substrate_inherents::ProvideInherentData for InherentDataProvider + where F: Fn() -> Result +{ + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), RuntimeString> { + (self.inner)() + .and_then(|n| inherent_data.put_data(INHERENT_IDENTIFIER, &n)) + } + + fn error_to_string(&self, _error: &[u8]) -> Option { + Some(format!("no further information")) + } +} + + +pub trait Trait: SystemTrait { + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnFinalizationStalled: OnFinalizationStalled; +} + +decl_storage! { + trait Store for Module as Timestamp { + /// Recent hints. + RecentHints get(recent_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// Ordered recent hints. + OrderedHints get(ordered_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// The median. + Median get(median) build(|_| T::BlockNumber::zero()): T::BlockNumber; + /// The number of recent samples to keep from this chain. Default is n-100 + pub WindowSize get(window_size) config(window_size): T::BlockNumber = T::BlockNumber::sa(DEFAULT_WINDOW_SIZE); + /// The delay after which point things become suspicious. + pub ReportLatency get(report_latency) config(report_latency): T::BlockNumber = T::BlockNumber::sa(DEFAULT_DELAY); + + /// Final hint to apply in the block. `None` means "same as parent". + Update: Option; + + // when initialized through config this is set in the beginning. + Initialized get(initialized) build(|_| false): bool; + } +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// Hint that the author of this block thinks the best finalized + /// block is the given number. + fn final_hint(origin, #[compact] hint: T::BlockNumber) { + ensure_inherent(origin)?; + assert!(!::Update::exists(), "Final hint must be updated only once in the block"); + assert!( + srml_system::Module::::block_number() >= hint, + "Finalized height above block number", + ); + ::Update::put(hint); + } + + fn on_finalise() { + Self::update_hint(::Update::take()) + } + } +} + +impl Module { + fn update_hint(hint: Option) { + if !Self::initialized() { + ::RecentHints::put(vec![T::BlockNumber::zero()]); + ::OrderedHints::put(vec![T::BlockNumber::zero()]); + ::Median::put(T::BlockNumber::zero()); + + ::Initialized::put(true); + } + + let mut recent = Self::recent_hints(); + let mut ordered = Self::ordered_hints(); + let window_size = cmp::max(T::BlockNumber::one(), Self::window_size()); + + let hint = hint.unwrap_or_else(|| recent.last() + .expect("always at least one recent sample; qed").clone() + ); + + // prune off the front of the list -- typically 1 except for when + // the sample size has just been shrunk. + { + // take into account the item we haven't pushed yet. + let to_prune = (recent.len() + 1).saturating_sub(window_size.as_() as usize); + + for drained in recent.drain(..to_prune) { + let idx = ordered.binary_search(&drained) + .expect("recent and ordered contain the same items; qed"); + + ordered.remove(idx); + } + } + + // find the position in the ordered list where the new item goes. + let ordered_idx = ordered.binary_search(&hint) + .unwrap_or_else(|idx| idx); + + ordered.insert(ordered_idx, hint); + recent.push(hint); + + let two = T::BlockNumber::one() + T::BlockNumber::one(); + + let median = { + let len = ordered.len(); + assert!(len > 0, "pruning dictated by window_size which is always saturated at 1; qed"); + + if len % 2 == 0 { + let a = ordered[len / 2]; + let b = ordered[(len / 2) - 1]; + + // compute average. + (a + b) / two + } else { + ordered[len / 2] + } + }; + + let our_window_size = recent.len(); + + ::RecentHints::put(recent); + ::OrderedHints::put(ordered); + ::Median::put(median); + + if T::BlockNumber::sa(our_window_size as u64) == window_size { + let now = srml_system::Module::::block_number(); + let latency = Self::report_latency(); + + // the delay is the latency plus half the window size. + let delay = latency + (window_size / two); + // median may be at most n - delay + if median + delay <= now { + T::OnFinalizationStalled::on_stalled(window_size - T::BlockNumber::one()); + } + } + } +} + +/// Called when finalization stalled at a given number. +pub trait OnFinalizationStalled { + /// The parameter here is how many more blocks to wait before applying + /// changes triggered by finality stalling. + fn on_stalled(further_wait: N); +} + +macro_rules! impl_on_stalled { + () => ( + impl OnFinalizationStalled for () { + fn on_stalled(_: N) {} + } + ); + + ( $($t:ident)* ) => { + impl),*> OnFinalizationStalled for ($($t,)*) { + fn on_stalled(further_wait: NUM) { + $($t::on_stalled(further_wait.clone());)* + } + } + } +} + +for_each_tuple!(impl_on_stalled); + +impl ProvideInherent for Module { + type Call = Call; + type Error = MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let final_num = + data.finalized_number().expect("Gets and decodes final number inherent data"); + + // make hint only when not same as last to avoid bloat. + Self::recent_hints().last().and_then(|last| if last == &final_num { + None + } else { + Some(Call::final_hint(final_num)) + }) + } + + fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use sr_io::{with_externalities, TestExternalities}; + use substrate_primitives::H256; + use sr_primitives::BuildStorage; + use sr_primitives::traits::{BlakeTwo256, IdentityLookup, OnFinalise, Header as HeaderT}; + use sr_primitives::testing::{Digest, DigestItem, Header}; + use srml_support::impl_outer_origin; + use srml_system as system; + use lazy_static::lazy_static; + use parking_lot::Mutex; + + #[derive(Clone, PartialEq, Debug)] + pub struct StallEvent { + at: u64, + further_wait: u64, + } + + macro_rules! make_test_context { + () => { + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + + type System = system::Module; + + lazy_static! { + static ref NOTIFICATIONS: Mutex> = Mutex::new(Vec::new()); + } + + pub struct StallTracker; + impl OnFinalizationStalled for StallTracker { + fn on_stalled(further_wait: u64) { + let now = System::block_number(); + NOTIFICATIONS.lock().push(StallEvent { at: now, further_wait }); + } + } + + impl Trait for Test { + type OnFinalizationStalled = StallTracker; + } + + type FinalityTracker = Module; + } + } + + #[test] + fn median_works() { + make_test_context!(); + let t = system::GenesisConfig::::default().build_storage().unwrap().0; + + with_externalities(&mut TestExternalities::new(t), || { + FinalityTracker::update_hint(Some(500)); + assert_eq!(FinalityTracker::median(), 250); + assert!(NOTIFICATIONS.lock().is_empty()); + }); + } + + #[test] + fn notifies_when_stalled() { + make_test_context!(); + let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; + t.extend(GenesisConfig:: { + window_size: 11, + report_latency: 100 + }.build_storage().unwrap().0); + + with_externalities(&mut TestExternalities::new(t), || { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialise(&i, &parent_hash, &Default::default()); + FinalityTracker::on_finalise(i); + let hdr = System::finalise(); + parent_hash = hdr.hash(); + } + + assert_eq!( + NOTIFICATIONS.lock().to_vec(), + vec![StallEvent { at: 105, further_wait: 10 }] + ) + }); + } + + #[test] + fn recent_notifications_prevent_stalling() { + make_test_context!(); + let mut t = system::GenesisConfig::::default().build_storage().unwrap().0; + t.extend(GenesisConfig:: { + window_size: 11, + report_latency: 100 + }.build_storage().unwrap().0); + + with_externalities(&mut TestExternalities::new(t), || { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialise(&i, &parent_hash, &Default::default()); + assert_ok!(FinalityTracker::dispatch( + Call::final_hint(i-1), + Origin::INHERENT, + )); + FinalityTracker::on_finalise(i); + let hdr = System::finalise(); + parent_hash = hdr.hash(); + } + + assert!(NOTIFICATIONS.lock().is_empty()); + }); + } +} diff --git a/srml/grandpa/Cargo.toml b/srml/grandpa/Cargo.toml index c9faeadb5babe..e7a1b701052d3 100644 --- a/srml/grandpa/Cargo.toml +++ b/srml/grandpa/Cargo.toml @@ -16,6 +16,7 @@ primitives = { package = "sr-primitives", path = "../../core/sr-primitives", def srml-support = { path = "../support", default-features = false } system = { package = "srml-system", path = "../system", default-features = false } session = { package = "srml-session", path = "../session", default-features = false } +finality-tracker = { package = "srml-finality-tracker", path = "../finality-tracker", default-features = false } [dev-dependencies] runtime_io = { package = "sr-io", path = "../../core/sr-io" } @@ -33,4 +34,5 @@ std = [ "primitives/std", "system/std", "session/std", + "finality-tracker/std", ] diff --git a/srml/grandpa/src/lib.rs b/srml/grandpa/src/lib.rs index 16e73540d6aa4..f06c84e23507e 100644 --- a/srml/grandpa/src/lib.rs +++ b/srml/grandpa/src/lib.rs @@ -64,6 +64,8 @@ pub type Log = RawLog< pub trait GrandpaChangeSignal { /// Try to cast the log entry as a contained signal. fn as_signal(&self) -> Option>; + /// Try to cast the log entry as a contained forced signal. + fn as_forced_signal(&self) -> Option<(N, ScheduledChange)>; } /// A logs in this module. @@ -71,15 +73,28 @@ pub trait GrandpaChangeSignal { #[derive(Encode, Decode, PartialEq, Eq, Clone)] pub enum RawLog { /// Authorities set change has been signalled. Contains the new set of authorities - /// and the delay in blocks before applying. + /// and the delay in blocks _to finalize_ before applying. AuthoritiesChangeSignal(N, Vec<(SessionKey, u64)>), + /// A forced authorities set change. Contains in this order: the median last + /// finalized block when the change was signaled, the delay in blocks _to import_ + /// before applying and the new set of authorities. + ForcedAuthoritiesChangeSignal(N, N, Vec<(SessionKey, u64)>), } impl RawLog { /// Try to cast the log entry as a contained signal. pub fn as_signal(&self) -> Option<(N, &[(SessionKey, u64)])> { match *self { - RawLog::AuthoritiesChangeSignal(ref n, ref signal) => Some((n.clone(), signal)), + RawLog::AuthoritiesChangeSignal(ref delay, ref signal) => Some((delay.clone(), signal)), + RawLog::ForcedAuthoritiesChangeSignal(_, _, _) => None, + } + } + + /// Try to cast the log entry as a contained forced signal. + pub fn as_forced_signal(&self) -> Option<(N, N, &[(SessionKey, u64)])> { + match *self { + RawLog::ForcedAuthoritiesChangeSignal(ref median, ref delay, ref signal) => Some((median.clone(), delay.clone(), signal)), + RawLog::AuthoritiesChangeSignal(_, _) => None, } } } @@ -96,6 +111,16 @@ impl GrandpaChangeSignal for RawLog .collect(), }) } + + fn as_forced_signal(&self) -> Option<(N, ScheduledChange)> { + RawLog::as_forced_signal(self).map(|(median, delay, next_authorities)| (median, ScheduledChange { + delay, + next_authorities: next_authorities.iter() + .cloned() + .map(|(k, w)| (k.into(), w)) + .collect(), + })) + } } pub trait Trait: system::Trait { @@ -109,8 +134,21 @@ pub trait Trait: system::Trait { type Event: From> + Into<::Event>; } -/// A stored pending change. +/// A stored pending change, old format. +// TODO: remove shim +// https://github.com/paritytech/substrate/issues/1614 #[derive(Encode, Decode)] +pub struct OldStoredPendingChange { + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: Vec<(SessionKey, u64)>, +} + +/// A stored pending change. +#[derive(Encode)] pub struct StoredPendingChange { /// The block number this was scheduled at. pub scheduled_at: N, @@ -118,6 +156,23 @@ pub struct StoredPendingChange { pub delay: N, /// The next authority set. pub next_authorities: Vec<(SessionKey, u64)>, + /// If defined it means the change was forced and the given block number + /// indicates the median last finalized block when the change was signaled. + pub forced: Option, +} + +impl Decode for StoredPendingChange { + fn decode(value: &mut I) -> Option { + let old = OldStoredPendingChange::decode(value)?; + let forced = >::decode(value).unwrap_or(None); + + Some(StoredPendingChange { + scheduled_at: old.scheduled_at, + delay: old.delay, + next_authorities: old.next_authorities, + forced, + }) + } } /// GRANDPA events. @@ -132,6 +187,8 @@ decl_storage! { trait Store for Module as GrandpaFinality { // Pending change: (signalled at, scheduled change). PendingChange get(pending_change): Option>; + // next block number where we can force a change. + NextForced get(next_forced): Option; } add_extra_genesis { config(authorities): Vec<(T::SessionKey, u64)>; @@ -167,10 +224,18 @@ decl_module! { fn on_finalise(block_number: T::BlockNumber) { if let Some(pending_change) = >::get() { if block_number == pending_change.scheduled_at { - Self::deposit_log(RawLog::AuthoritiesChangeSignal( - pending_change.delay, - pending_change.next_authorities.clone(), - )); + if let Some(median) = pending_change.forced { + Self::deposit_log(RawLog::ForcedAuthoritiesChangeSignal( + median, + pending_change.delay, + pending_change.next_authorities.clone(), + )); + } else { + Self::deposit_log(RawLog::AuthoritiesChangeSignal( + pending_change.delay, + pending_change.next_authorities.clone(), + )); + } } if block_number == pending_change.scheduled_at + pending_change.delay { @@ -197,18 +262,39 @@ impl Module { /// `in_blocks` after the current block. This value may be 0, in which /// case the change is applied at the end of the current block. /// + /// If the `forced` parameter is defined, this indicates that the current + /// set has been synchronously determined to be offline and that after + /// `in_blocks` the given change should be applied. The given block number + /// indicates the median last finalized block number and it should be used + /// as the canon block when starting the new grandpa voter. + /// /// No change should be signalled while any change is pending. Returns /// an error if a change is already pending. pub fn schedule_change( next_authorities: Vec<(T::SessionKey, u64)>, in_blocks: T::BlockNumber, + forced: Option, ) -> Result { + use primitives::traits::As; + if Self::pending_change().is_none() { let scheduled_at = system::ChainContext::::default().current_height(); + + if let Some(_) = forced { + if Self::next_forced().map_or(false, |next| next > scheduled_at) { + return Err("Cannot signal forced change so soon after last."); + } + + // only allow the next forced change when twice the window has passed since + // this one. + >::put(scheduled_at + in_blocks * T::BlockNumber::sa(2)); + } + >::put(StoredPendingChange { delay: in_blocks, scheduled_at, next_authorities, + forced, }); Ok(()) @@ -224,12 +310,19 @@ impl Module { } impl Module where Ed25519AuthorityId: core::convert::From<::SessionKey> { - /// See if the digest contains any scheduled change. + /// See if the digest contains any standard scheduled change. pub fn scrape_digest_change(log: &Log) -> Option> { as GrandpaChangeSignal>::as_signal(log) } + + /// See if the digest contains any forced scheduled change. + pub fn scrape_digest_forced_change(log: &Log) + -> Option<(T::BlockNumber, ScheduledChange)> + { + as GrandpaChangeSignal>::as_forced_signal(log) + } } /// Helper for authorities being synchronized with the general session authorities. @@ -266,7 +359,34 @@ impl session::OnSessionChange for SyncedAuthorities where // instant changes let last_authorities = >::grandpa_authorities(); if next_authorities != last_authorities { - let _ = >::schedule_change(next_authorities, Zero::zero()); + let _ = >::schedule_change(next_authorities, Zero::zero(), None); } } } + +impl finality_tracker::OnFinalizationStalled for SyncedAuthorities where + T: Trait, + T: session::Trait, + T: finality_tracker::Trait, + ::ConvertAccountIdToSessionKey: Convert< + ::AccountId, + ::SessionKey, + >, +{ + fn on_stalled(further_wait: T::BlockNumber) { + // when we record old authority sets, we can use `finality_tracker::median` + // to figure out _who_ failed. until then, we can't meaningfully guard + // against `next == last` the way that normal session changes do. + + let next_authorities = >::validators() + .into_iter() + .map(T::ConvertAccountIdToSessionKey::convert) + .map(|key| (key, 1)) // evenly-weighted. + .collect::::SessionKey, u64)>>(); + + let median = >::median(); + + // schedule a change for `further_wait` blocks. + let _ = >::schedule_change(next_authorities, further_wait, Some(median)); + } +} diff --git a/srml/grandpa/src/tests.rs b/srml/grandpa/src/tests.rs index 74c998d873db0..f7e08a20b3b6f 100644 --- a/srml/grandpa/src/tests.rs +++ b/srml/grandpa/src/tests.rs @@ -24,12 +24,14 @@ use runtime_io::with_externalities; use crate::mock::{Grandpa, System, new_test_ext}; use system::{EventRecord, Phase}; use crate::{RawLog, RawEvent}; +use codec::{Decode, Encode}; +use super::*; #[test] fn authorities_change_logged() { with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { System::initialise(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 0).unwrap(); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 0, None).unwrap(); System::note_finished_extrinsics(); Grandpa::on_finalise(1); @@ -54,7 +56,7 @@ fn authorities_change_logged() { fn authorities_change_logged_after_delay() { with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { System::initialise(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1).unwrap(); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); Grandpa::on_finalise(1); let header = System::finalise(); assert_eq!(header.digest, testing::Digest { @@ -84,25 +86,112 @@ fn authorities_change_logged_after_delay() { fn cannot_schedule_change_when_one_pending() { with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { System::initialise(&1, &Default::default(), &Default::default()); - Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1).unwrap(); + Grandpa::schedule_change(vec![(4, 1), (5, 1), (6, 1)], 1, None).unwrap(); assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1).is_err()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); Grandpa::on_finalise(1); let header = System::finalise(); System::initialise(&2, &header.hash(), &Default::default()); assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1).is_err()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); Grandpa::on_finalise(2); let header = System::finalise(); System::initialise(&3, &header.hash(), &Default::default()); assert!(Grandpa::pending_change().is_none()); - assert!(Grandpa::schedule_change(vec![(5, 1)], 1).is_ok()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); Grandpa::on_finalise(3); let _header = System::finalise(); }); } + +#[test] +fn new_decodes_from_old() { + let old = OldStoredPendingChange { + scheduled_at: 5u32, + delay: 100u32, + next_authorities: vec![(1u64, 5), (2u64, 10), (3u64, 2)], + }; + + let encoded = old.encode(); + let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); + assert!(new.forced.is_none()); + assert_eq!(new.scheduled_at, old.scheduled_at); + assert_eq!(new.delay, old.delay); + assert_eq!(new.next_authorities, old.next_authorities); +} + +#[test] +fn dispatch_forced_change() { + with_externalities(&mut new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), || { + System::initialise(&1, &Default::default(), &Default::default()); + Grandpa::schedule_change( + vec![(4, 1), (5, 1), (6, 1)], + 5, + Some(0), + ).unwrap(); + + assert!(Grandpa::pending_change().is_some()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); + + Grandpa::on_finalise(1); + let mut header = System::finalise(); + + for i in 2..7 { + System::initialise(&i, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().unwrap().forced.is_some()); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, Some(0)).is_err()); + + Grandpa::on_finalise(i); + header = System::finalise(); + } + + // change has been applied at the end of block 6. + // add a normal change. + { + System::initialise(&7, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_ok()); + Grandpa::on_finalise(7); + header = System::finalise(); + } + + // run the normal change. + { + System::initialise(&8, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_some()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(4, 1), (5, 1), (6, 1)]); + assert!(Grandpa::schedule_change(vec![(5, 1)], 1, None).is_err()); + Grandpa::on_finalise(8); + header = System::finalise(); + } + + // normal change applied. but we can't apply a new forced change for some + // time. + for i in 9..11 { + System::initialise(&i, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::grandpa_authorities(), vec![(5, 1)]); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1)], 5, Some(0)).is_err()); + Grandpa::on_finalise(i); + header = System::finalise(); + } + + { + System::initialise(&11, &header.hash(), &Default::default()); + assert!(Grandpa::pending_change().is_none()); + assert!(Grandpa::schedule_change(vec![(5, 1), (6, 1), (7, 1)], 5, Some(0)).is_ok()); + assert_eq!(Grandpa::next_forced(), Some(21)); + Grandpa::on_finalise(11); + header = System::finalise(); + } + }); +}