Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
9062f94
value ranges in consensus cache
svyatonik Aug 2, 2019
ce451c3
skip values in cache
svyatonik Aug 2, 2019
2efc6a4
read epoch0 + epoch1 data from genesis in babe
svyatonik Aug 2, 2019
d9d75bc
sync authorities + session validators at genesis
svyatonik Aug 4, 2019
aa491d0
removed some debug printlns
svyatonik Aug 4, 2019
de097ee
fixed cache encoding
svyatonik Aug 5, 2019
4bbebde
Revert "skip values in cache"
svyatonik Aug 5, 2019
2897a83
Revert "value ranges in consensus cache"
svyatonik Aug 5, 2019
2c72a87
get rid of cache::AUTHORITIES in Babe
svyatonik Aug 5, 2019
1c7b2cd
cleaning up
svyatonik Aug 5, 2019
cb2851f
cleaning up
svyatonik Aug 5, 2019
e5c7bd8
update spec version
svyatonik Aug 5, 2019
a838e9b
lost changes
svyatonik Aug 5, 2019
200a638
fixed tests
svyatonik Aug 5, 2019
3c420ff
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 5, 2019
c4c6788
Update node/runtime/src/lib.rs
svyatonik Aug 6, 2019
04f73d5
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 6, 2019
f30f0fb
fix once-per-block condition
svyatonik Aug 6, 2019
773982b
fix standalone babe + temp_storage in BuildGenesis
svyatonik Aug 6, 2019
c922fad
fix benhes compilation
svyatonik Aug 6, 2019
69fe38a
fixed comment
svyatonik Aug 6, 2019
b5e223e
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 8, 2019
54fd261
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 12, 2019
5991212
re-added light nodes to integration tests
svyatonik Aug 12, 2019
d24bb0b
Merge branch 'master' into fix_flaming_fir_light_sync
Demi-Marie Aug 14, 2019
51b79fc
finalize_with_ancestors from extra_requests
svyatonik Aug 14, 2019
651d8ad
Merge branch 'fix_flaming_fir_light_sync' of https://github.com/parit…
svyatonik Aug 14, 2019
8f106ae
post-merge fix
svyatonik Aug 14, 2019
b4dd1bd
aaand removed debug code
svyatonik Aug 14, 2019
d593290
(another one)
svyatonik Aug 14, 2019
a7adc1c
fix warn in logs (do not call ForkTree::finalize twice for the same b…
svyatonik Aug 14, 2019
707b288
sync digest.next_authorities with actual next authorities
svyatonik Aug 14, 2019
f1d0e41
more docs
svyatonik Aug 15, 2019
515587c
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 15, 2019
b328acb
reverting all commits affecting storage
svyatonik Aug 16, 2019
4786ea7
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 16, 2019
e33fd0f
also remove keys from babe trait
svyatonik Aug 16, 2019
e7644e4
fixed warnings
svyatonik Aug 16, 2019
f70bbcc
Merge branch 'master' into fix_flaming_fir_light_sync
svyatonik Aug 16, 2019
89447ee
post-merge fixes
svyatonik Aug 16, 2019
320534d
reverted some redundant changes
svyatonik Aug 16, 2019
5bf8db9
reverted more changes
svyatonik Aug 16, 2019
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
164 changes: 115 additions & 49 deletions core/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,11 @@ impl<Hash, H, B, C, E, I, Error, SO> SlotWorker<B> for BabeWorker<C, E, I, SO> w
let (timestamp, slot_number, slot_duration) =
(slot_info.timestamp, slot_info.number, slot_info.duration);

let epoch = match epoch(client.as_ref(), &BlockId::Hash(chain_head.hash())) {
Ok(authorities) => authorities,
let regular_epoch = epoch_from_runtime(client.as_ref(), &BlockId::Hash(chain_head.hash()))
.ok_or(consensus_common::Error::InvalidAuthoritiesSet);

let epoch = match regular_epoch {
Ok(epoch) => epoch,
Err(e) => {
error!(
target: "babe",
Expand Down Expand Up @@ -654,23 +657,49 @@ impl<B: BlockT, C> Verifier<B> for BabeVerifier<C> where

let hash = header.hash();
let parent_hash = *header.parent_hash();
let Epoch { authorities, randomness, epoch_index, .. } =
epoch(self.api.as_ref(), &BlockId::Hash(parent_hash))
.map_err(|e| format!("Could not fetch epoch at {:?}: {:?}", parent_hash, e))?;

let epoch = epoch(self.api.as_ref(), &BlockId::Hash(parent_hash))
.map_err(|e| format!("Could not fetch epoch at {:?}: {:?}", parent_hash, e))?;
let (epoch, maybe_next_epoch) = epoch.deconstruct();
let Epoch { authorities, randomness, epoch_index, .. } = epoch;

// We add one to allow for some small drift.
// FIXME #1019 in the future, alter this queue to allow deferring of headers
let checked_header = check_header::<B, C>(
let mut checked_header = check_header::<B, C>(
&self.api,
slot_now + 1,
header,
header.clone(),
hash,
&authorities,
randomness,
epoch_index,
self.config.c(),
)?;
);

// if we have failed to check header using (presumably) current epoch AND we're probably in the next epoch
// => check using next epoch
// (this is only possible on the light client at epoch#0)
if epoch_index == 0 && checked_header.is_err() {
if let Some(Epoch { authorities, randomness, epoch_index, .. }) = maybe_next_epoch {
let checked_header_next = check_header::<B, C>(
&self.api,
slot_now + 1,
header,
hash,
&authorities,
randomness,
epoch_index,
self.config.c(),
);

match checked_header_next {
Ok(checked_header_next) => checked_header = Ok(checked_header_next),
Err(_) => (),
}
}
}

let checked_header = checked_header?;
match checked_header {
CheckedHeader::Checked(pre_header, (pre_digest, seal)) => {
let BabePreDigest { slot_number, .. } = pre_digest.as_babe_pre_digest()
Expand Down Expand Up @@ -723,31 +752,75 @@ impl<B: BlockT, C> Verifier<B> for BabeVerifier<C> where
}
}

/// Regular BABE epoch or spanned genesis epoch.
#[derive(Debug, Decode, Encode)]
enum MaybeSpanEpoch {
/// Genesis entry. Has the data for epoch#0 and epoch#1.
Genesis(Epoch, Epoch),
/// Regular entry. Has the data for the epoch after next (i.e. current epoch + 2).
Regular(Epoch),
}

impl MaybeSpanEpoch {
pub fn deconstruct(self) -> (Epoch, Option<Epoch>) {
match self {
MaybeSpanEpoch::Genesis(epoch0, epoch1) => (epoch0, Some(epoch1)),
MaybeSpanEpoch::Regular(epoch) => (epoch, None),
}
}

#[cfg(test)]
pub fn into_regular(self) -> Option<Epoch> {
match self {
MaybeSpanEpoch::Regular(epoch) => Some(epoch),
_ => None,
}
}
}

/// Extract current epoch data from cache and fallback to querying the runtime
/// if the cache isn't populated.
fn epoch<B, C>(client: &C, at: &BlockId<B>) -> Result<Epoch, ConsensusError> where
fn epoch<B, C>(client: &C, at: &BlockId<B>) -> Result<MaybeSpanEpoch, ConsensusError> where
B: BlockT,
C: ProvideRuntimeApi + ProvideCache<B>,
C::Api: BabeApi<B>,
{
client
.cache()
.and_then(|cache| cache.get_at(&well_known_cache_keys::EPOCH, at)
epoch_from_cache(client, at)
.or_else(|| epoch_from_runtime(client, at).map(MaybeSpanEpoch::Regular))
.ok_or(consensus_common::Error::InvalidAuthoritiesSet)
}

/// Extract current epoch data from cache.
fn epoch_from_cache<B, C>(client: &C, at: &BlockId<B>) -> Option<MaybeSpanEpoch> where
B: BlockT,
C: ProvideCache<B>,
{
// the epoch that is BABE-valid at the block is not the epoch that is cache-valid at the block
// we need to go back for maximum two steps
client.cache()
.and_then(|cache| cache
.get_at(&well_known_cache_keys::EPOCH, at)
.and_then(|v| Decode::decode(&mut &v[..])))
.or_else(|| {
if client.runtime_api().has_api::<dyn BabeApi<B>>(at).unwrap_or(false) {
let s = BabeApi::epoch(&*client.runtime_api(), at).ok()?;
if s.authorities.is_empty() {
error!("No authorities!");
None
} else {
Some(s)
}
} else {
error!("bad api!");
None
}
}).ok_or(consensus_common::Error::InvalidAuthoritiesSet)
}

/// Extract current epoch from runtime.
fn epoch_from_runtime<B, C>(client: &C, at: &BlockId<B>) -> Option<Epoch> where
B: BlockT,
C: ProvideRuntimeApi,
C::Api: BabeApi<B>,
{
if client.runtime_api().has_api::<dyn BabeApi<B>>(at).unwrap_or(false) {
let s = BabeApi::epoch(&*client.runtime_api(), at).ok()?;
if s.authorities.is_empty() {
error!("No authorities!");
None
} else {
Some(s)
}
} else {
error!("bad api!");
None
}
}

/// The BABE import queue type.
Expand Down Expand Up @@ -854,7 +927,7 @@ fn initialize_authorities_cache<B, C>(client: &C) -> Result<(), ConsensusError>

// check if we already have initialized the cache
let genesis_id = BlockId::Number(Zero::zero());
let genesis_epoch: Option<Epoch> = cache
let genesis_epoch: Option<MaybeSpanEpoch> = cache
.get_at(&well_known_cache_keys::EPOCH, &genesis_id)
.and_then(|v| Decode::decode(&mut &v[..]));
if genesis_epoch.is_some() {
Expand All @@ -867,7 +940,11 @@ fn initialize_authorities_cache<B, C>(client: &C) -> Result<(), ConsensusError>
error,
)));

let genesis_epoch = epoch(client, &genesis_id)?;
let epoch0 = epoch_from_runtime(client, &genesis_id).ok_or(consensus_common::Error::InvalidAuthoritiesSet)?;
let mut epoch1 = epoch0.clone();
epoch1.epoch_index = 1;

let genesis_epoch = MaybeSpanEpoch::Genesis(epoch0, epoch1);
cache.initialize(&well_known_cache_keys::EPOCH, genesis_epoch.encode())
.map_err(map_err)
}
Expand Down Expand Up @@ -1043,6 +1120,16 @@ impl<B, E, Block, I, RA, PRA> BlockImport<Block> for BabeBlockImport<B, E, Block
// this way we can revert it if there's any error
let mut old_epoch_changes = None;

if let Some(enacted_epoch) = enacted_epoch.as_ref() {
let enacted_epoch = &enacted_epoch.data;

// update the current epoch in the client cache
new_cache.insert(
well_known_cache_keys::EPOCH,
MaybeSpanEpoch::Regular(enacted_epoch.clone()).encode(),
);
}

if let Some(next_epoch) = next_epoch_digest {
if let Some(enacted_epoch) = enacted_epoch {
let enacted_epoch = &enacted_epoch.data;
Expand All @@ -1053,27 +1140,6 @@ impl<B, E, Block, I, RA, PRA> BlockImport<Block> for BabeBlockImport<B, E, Block
next_epoch.epoch_index,
)));
}

// update the current epoch in the client cache
new_cache.insert(
well_known_cache_keys::EPOCH,
enacted_epoch.encode(),
);

let current_epoch = epoch(&*self.api, &BlockId::Hash(parent_hash))?;

// if the authorities have changed then we populate the
// `AUTHORITIES` key with the enacted epoch, so that the inner
// `ImportBlock` can process it (`EPOCH` is specific to BABE).
// e.g. in the case of GRANDPA it would require a justification
// for the block, expecting that the authorities actually
// changed.
if current_epoch.authorities != enacted_epoch.authorities {
new_cache.insert(
well_known_cache_keys::AUTHORITIES,
enacted_epoch.encode(),
);
}
}

old_epoch_changes = Some(epoch_changes.clone());
Expand Down
2 changes: 1 addition & 1 deletion core/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ fn authorities_call_works() {
let client = test_client::new();

assert_eq!(client.info().chain.best_number, 0);
assert_eq!(epoch(&client, &BlockId::Number(0)).unwrap().authorities, vec![
assert_eq!(epoch(&client, &BlockId::Number(0)).unwrap().into_regular().unwrap().authorities, vec![
(Keyring::Alice.into(), 1),
(Keyring::Bob.into(), 1),
(Keyring::Charlie.into(), 1),
Expand Down
2 changes: 1 addition & 1 deletion core/finality-grandpa/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA, SC> BlockImport<Block>

// we don't want to finalize on `inner.import_block`
let mut justification = block.justification.take();
let enacts_consensus_change = new_cache.contains_key(&well_known_cache_keys::AUTHORITIES);
let enacts_consensus_change = !new_cache.is_empty();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The reason I separated the entries into EPOCH and AUTHORITIES is because this way we will be expecting a finality proof for this block, which GRANDPA won't take into account. I believe an epoch change should not be considered a consensus change (hence we don't require finality to safety trigger). We should only consider this when the authority set in the new epoch actually changes.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It will take it into account - please see how enacts_consensus_change is processed: when it's true, it'll fire self.consensus_changes.lock().note_change((number, hash)) and then this will be processed by the environment/finalize_block.rs - see how justification_required is built. So the justification will be emitted for such blocks.

But OK - if you do not consider epoch change as consensus change, then let's not emit justification in this case.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, but the problem is that the GRANDPA voters currently won't take that into account, and will not limit their votes to such blocks (like they do for authority changes), so we might not get a justification for it (we just finalize a further block for example).
@rphmeier Can you comment on whether epoch change should be considered a consensus change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But finalizing further block is OK too - it should just cover block that enacts consensus change. There's currently no such code (iirc) that just stops syncing && waits for exactly that block finalization. We're just trying to get something that finalizes block-that-enacts-consensus-changes asap (by issuing finality proof requests).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only downside I see is that we'll get more justifications. Although the point is kind of moot in general because the GRANDPA authorities will change 99% of the time on the same block as BABE or other ones do. It's unlikely that they'll be out of sync.

let import_result = (&*self.inner).import_block(block, new_cache);

let mut imported_aux = {
Expand Down
2 changes: 1 addition & 1 deletion core/finality-grandpa/src/light_import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(

// we don't want to finalize on `inner.import_block`
let justification = block.justification.take();
let enacts_consensus_change = new_cache.contains_key(&well_known_cache_keys::AUTHORITIES);
let enacts_consensus_change = !new_cache.is_empty();
let import_result = BlockImport::import_block(&mut client, block, new_cache);

let mut imported_aux = match import_result {
Expand Down
3 changes: 3 additions & 0 deletions core/primitives/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ pub mod well_known_keys {
/// Current extrinsic index (u32) is stored under this key.
pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index";

/// Mutated initial (genesis) validators list from the session module.
pub const MUTATED_SESSION_VALIDATORS_KEYS: &'static [u8] = b":mutated_session_validators_keys";

/// Changes trie configuration is stored under this key.
pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie";

Expand Down
7 changes: 4 additions & 3 deletions node/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
// and set impl_version to equal spec_version. If only runtime
// implementation changes and behavior does not, then leave spec_version as
// is and increment impl_version.
spec_version: 126,
impl_version: 126,
spec_version: 127,
impl_version: 127,
apis: RUNTIME_API_VERSIONS,
};

Expand Down Expand Up @@ -134,6 +134,7 @@ parameter_types! {
impl babe::Trait for Runtime {
type EpochDuration = EpochDuration;
type ExpectedBlockTime = ExpectedBlockTime;
type Keys = SessionKeys;
}

impl indices::Trait for Runtime {
Expand Down Expand Up @@ -400,13 +401,13 @@ construct_runtime!(
UncheckedExtrinsic = UncheckedExtrinsic
{
System: system::{Module, Call, Storage, Config, Event},
Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)},
Timestamp: timestamp::{Module, Call, Storage, Inherent},
Authorship: authorship::{Module, Call, Storage},
Indices: indices,
Balances: balances,
Staking: staking::{default, OfflineWorker},
Session: session::{Module, Call, Storage, Event, Config<T>},
Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)},
Democracy: democracy::{Module, Call, Storage, Config, Event<T>},
Council: collective::<Instance1>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
TechnicalCommittee: collective::<Instance2>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
Expand Down
3 changes: 2 additions & 1 deletion srml/babe/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ timestamp = { package = "srml-timestamp", path = "../timestamp", default-feature
session = { package = "srml-session", path = "../session", default-features = false }
babe-primitives = { package = "substrate-consensus-babe-primitives", path = "../../core/consensus/babe/primitives", default-features = false }
runtime_io = { package = "sr-io", path = "../../core/sr-io", default-features = false }
primitives = { package = "substrate-primitives", path = "../../core/primitives", default-features = false }

[dev-dependencies]
lazy_static = "1.3.0"
parking_lot = "0.8.0"
primitives = { package = "substrate-primitives", path = "../../core/primitives" }

[features]
default = ["std"]
Expand All @@ -39,4 +39,5 @@ std = [
"session/std",
"runtime_io/std",
"staking/std",
"primitives/std",
]
Loading