diff --git a/Cargo.lock b/Cargo.lock index f1819c85d7bef..cf1eb38e6d2fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4935,8 +4935,8 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-grandpa", - "pallet-randomness-collective-flip", "pallet-sassafras", + "pallet-session", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -6253,6 +6253,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-session", "pallet-timestamp", @@ -6260,7 +6261,6 @@ dependencies = [ "scale-info", "sp-application-crypto", "sp-consensus-sassafras", - "sp-consensus-vrf", "sp-core", "sp-io", "sp-runtime", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index c99e7bf5ef0ed..9133c2141c837 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -66,7 +66,15 @@ substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build [features] default = [] -runtime-benchmarks = ["node-sassafras-runtime/runtime-benchmarks"] +runtime-benchmarks = [ + "node-sassafras-runtime/runtime-benchmarks" +] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-sassafras-runtime/try-runtime", "try-runtime-cli"] +try-runtime = [ + "node-sassafras-runtime/try-runtime", + "try-runtime-cli" +] +use-session-pallet = [ + "node-sassafras-runtime/use-session-pallet" +] diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index ed189a6964976..965fc197277c8 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -2,14 +2,17 @@ use node_sassafras_runtime::{ AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; +#[cfg(feature = "use-session-pallet")] +use node_sassafras_runtime::{SessionConfig, SessionKeys}; use sc_service::ChainType; -use sp_consensus_sassafras::AuthorityId as SassafrasId; +use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; -// The URL for the telemetry server. -// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +// Genesis constants for Sassafras parameters configuration. +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 32; +const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; @@ -23,7 +26,7 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; -/// Generate an account ID from seed. +/// Generate an account id from seed. pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic: From<::Public>, @@ -31,47 +34,40 @@ where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Generate authority keys from seed. -pub fn authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { - (get_from_seed::(s), get_from_seed::(s)) +/// Generate authority account id and keys from seed. +pub fn authority_keys_from_seed(seed: &str) -> (AccountId, SassafrasId, GrandpaId) { + ( + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) } pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Development", - // ID "dev", ChainType::Development, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, None, - // Properties None, - // Extensions None, )) } @@ -80,19 +76,14 @@ pub fn local_testnet_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Local Testnet", - // ID "local_testnet", ChainType::Local, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -107,19 +98,13 @@ pub fn local_testnet_config() -> Result { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, - // Properties None, None, - // Extensions None, )) } @@ -127,10 +112,9 @@ pub fn local_testnet_config() -> Result { /// Configure initial storage state for FRAME modules. fn testnet_genesis( wasm_binary: &[u8], - initial_authorities: Vec<(SassafrasId, GrandpaId)>, + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, - _enable_println: bool, ) -> GenesisConfig { GenesisConfig { system: SystemConfig { @@ -141,18 +125,39 @@ fn testnet_genesis( // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, - sassafras: SassafrasConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), - epoch_config: Some(node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 0)).collect(), + epoch_config: SassafrasEpochConfiguration { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, }, grandpa: GrandpaConfig { - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect(), }, sudo: SudoConfig { // Assign network admin rights. key: Some(root_key), }, transaction_payment: Default::default(), + #[cfg(feature = "use-session-pallet")] + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + SessionKeys { sassafras: x.1.clone(), grandpa: x.2.clone() }, + ) + }) + .collect::>(), + }, } } diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs index bb2ffa1938107..4ab4d34210c98 100644 --- a/bin/node-sassafras/node/src/cli.rs +++ b/bin/node-sassafras/node/src/cli.rs @@ -36,6 +36,10 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), + /// Sub-commands concerned with benchmarking. + #[clap(subcommand)] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// Try some command against runtime state. #[cfg(feature = "try-runtime")] TryRuntime(try_runtime_cli::TryRuntimeCmd), diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index cf17c37968f54..74ac7dc809802 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -3,6 +3,7 @@ use crate::{ cli::{Cli, Subcommand}, service, }; +use frame_benchmarking_cli::BenchmarkCmd; use node_sassafras_runtime::Block; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -102,6 +103,31 @@ pub fn run() -> sc_cli::Result<()> { Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) }) }, + Some(Subcommand::Benchmark(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + // This switch needs to be in the client, since the client decides + // which sub-commands it wants to support. + match cmd { + BenchmarkCmd::Pallet(cmd) => { + if !cfg!(feature = "runtime-benchmarks") { + return Err( + "Runtime benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into(), + ) + } + + cmd.run::(config) + }, + _ => { + println!("Not implemented..."); + Ok(()) + }, + } + }) + }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index ec8f10c1a59b1..33f66262c6dda 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -116,12 +116,12 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( - sc_consensus_sassafras::Config::get(&*client)?, + sc_consensus_sassafras::configuration(&*client)?, grandpa_block_import, client.clone(), )?; - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let import_queue = sc_consensus_sassafras::import_queue( sassafras_link.clone(), @@ -269,7 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let sassafras_config = sc_consensus_sassafras::SassafrasParams { client: client.clone(), diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 233d9e0e14bbb..823e1dc2bd4eb 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -17,9 +17,9 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } @@ -62,8 +62,8 @@ std = [ "pallet-sassafras/std", "pallet-balances/std", "pallet-grandpa/std", - "pallet-randomness-collective-flip/std", "pallet-sudo/std", + "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -87,6 +87,7 @@ runtime-benchmarks = [ "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -96,8 +97,8 @@ try-runtime = [ "frame-system/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", - "pallet-randomness-collective-flip/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", ] +use-session-pallet = [] diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c458605375ab1..c428931e99dbe 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -6,41 +6,34 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use pallet_grandpa::{ - fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, -}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(feature = "use-session-pallet")] +use sp_runtime::traits::OpaqueKeys; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + ApplyExtrinsicResult, MultiSignature, Perbill, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// A few exports that help ease life for downstream crates. -pub use frame_support::{ +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use frame_support::{ construct_runtime, parameter_types, - traits::{ - ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, - }, + traits::{ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem}, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - IdentityFee, Weight, + constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, }, - StorageValue, }; -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::CurrencyAdapter; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; /// An index to a block. pub type BlockNumber = u32; @@ -79,12 +72,12 @@ pub mod opaque { pub type Block = generic::Block; /// Opaque block identifier type. pub type BlockId = generic::BlockId; +} - impl_opaque_keys! { - pub struct SessionKeys { - pub sassafras: Sassafras, - pub grandpa: Grandpa, - } +impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, } } @@ -135,12 +128,6 @@ pub const DAYS: BlockNumber = HOURS * 24; pub const MAX_AUTHORITIES: u32 = 32; -/// The Sassafras epoch configuration at genesis. -pub const SASSAFRAS_GENESIS_EPOCH_CONFIG: sp_consensus_sassafras::SassafrasEpochConfiguration = - sp_consensus_sassafras::SassafrasEpochConfiguration { - // TODO-SASS-P2 - }; - /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -222,8 +209,6 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl pallet_randomness_collective_flip::Config for Runtime {} - parameter_types! { pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -232,11 +217,12 @@ parameter_types! { impl pallet_sassafras::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; + #[cfg(feature = "use-session-pallet")] + type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + #[cfg(not(feature = "use-session-pallet"))] type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; type MaxAuthorities = ConstU32; type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; - // TODO-SASS-P4. Add some redundancy before starting tickets drop. - type MaxSubmittedTickets = ConstU32<{ 3 * EPOCH_DURATION_IN_SLOTS as u32 }>; } impl pallet_grandpa::Config for Runtime { @@ -255,7 +241,6 @@ impl pallet_grandpa::Config for Runtime { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; @@ -263,13 +248,11 @@ impl pallet_timestamp::Config for Runtime { } impl pallet_balances::Config for Runtime { + type Event = Event; type MaxLocks = ConstU32<50>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - /// The type for recording an account's balance. type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU128<500>; type AccountStore = System; @@ -290,7 +273,40 @@ impl pallet_sudo::Config for Runtime { type Call = Call; } -// Create the runtime by composing the FRAME pallets that were previously configured. +#[cfg(feature = "use-session-pallet")] +impl pallet_session::Config for Runtime { + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); //pallet_staking::StashOf; + type ShouldEndSession = Sassafras; + type NextSessionRotation = Sassafras; + type SessionManager = (); //pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = pallet_session::weights::SubstrateWeight; +} + +// Create a runtime using session pallet +#[cfg(feature = "use-session-pallet")] +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + Session: pallet_session, + } +); + +// Create a runtime NOT using session pallet +#[cfg(not(feature = "use-session-pallet"))] construct_runtime!( pub enum Runtime where Block = Block, @@ -298,7 +314,6 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, - RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Sassafras: pallet_sassafras, Grandpa: pallet_grandpa, @@ -310,10 +325,13 @@ construct_runtime!( /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; + /// Block header type as expected by this runtime. pub type Header = generic::Header; + /// Block type as expected by this runtime. pub type Block = generic::Block; + /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckNonZeroSender, @@ -325,10 +343,13 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -349,6 +370,8 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] + [pallet_grandpa, Grandpa] + [pallet_sassafras, Sassafras] ); } @@ -411,12 +434,13 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasGenesisConfiguration { - sp_consensus_sassafras::SassafrasGenesisConfiguration { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + sp_consensus_sassafras::SassafrasConfiguration { slot_duration: Sassafras::slot_duration(), - epoch_length: EpochDuration::get(), - genesis_authorities: Sassafras::authorities().to_vec(), + epoch_duration: EpochDuration::get(), + authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), + threshold_params: Sassafras::config(), } } @@ -433,13 +457,13 @@ impl_runtime_apis! { impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { - opaque::SessionKeys::generate(seed) + SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec, ) -> Option, KeyTypeId)>> { - opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + SessionKeys::decode_into_raw_public_keys(&encoded) } } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index c8f39497ffa5e..8f1aa1115d2a5 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,35 +16,32 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Sassafras authority selection and slot claiming. +//! Types and functions related to authority selection and slot claiming. -use crate::Epoch; +use super::*; -use scale_codec::Encode; -use sp_application_crypto::AppKey; use sp_consensus_sassafras::{ - digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, - Ticket, TicketInfo, SASSAFRAS_TICKET_VRF_PREFIX, + digests::PreDigest, + vrf::{make_slot_transcript_data, make_ticket_transcript_data}, + AuthorityId, Slot, Ticket, TicketInfo, }; -use sp_consensus_vrf::schnorrkel::{PublicKey, VRFInOut, VRFOutput, VRFProof}; use sp_core::{twox_64, ByteArray}; -use sp_keystore::{vrf::make_transcript, SyncCryptoStore, SyncCryptoStorePtr}; /// Get secondary authority index for the given epoch and slot. -#[inline] -pub fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { - u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) % - epoch.authorities.len() as u64 +pub(crate) fn secondary_authority_index(slot: Slot, config: &SassafrasConfiguration) -> u64 { + u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) % + config.authorities.len() as u64 } /// Try to claim an epoch slot. /// If ticket is `None`, then the slot should be claimed using the fallback mechanism. -pub fn claim_slot( +fn claim_slot( slot: Slot, epoch: &Epoch, ticket: Option, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { + let config = &epoch.config; let (authority_index, ticket_info) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); @@ -56,97 +53,61 @@ pub fn claim_slot( }, None => { log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); - (secondary_authority_index(slot, epoch), None) + (secondary_authority_index(slot, config), None) }, }; - let authority_id = epoch.authorities.get(authority_index as usize).map(|auth| &auth.0)?; + let authority_id = config.authorities.get(authority_index as usize).map(|auth| &auth.0)?; - let transcript_data = make_slot_transcript_data(&epoch.randomness, slot, epoch.epoch_index); - let result = SyncCryptoStore::sr25519_vrf_sign( + let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_index); + let signature = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, authority_id.as_ref(), transcript_data, - ); - - match result { - Ok(Some(signature)) => { - let pre_digest = PreDigest { - authority_index: authority_index as u32, - slot, - block_vrf_output: VRFOutput(signature.output), - block_vrf_proof: VRFProof(signature.proof.clone()), - ticket_info, - }; - Some((pre_digest, authority_id.clone())) - }, - _ => None, - } -} - -/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: -/// - x: redundancy factor; -/// - s: number of slots in epoch; -/// - a: max number of attempts; -/// - v: number of validator in epoch. -/// The parameters should be chosen such that T <= 1. -/// If `attempts * validators` is zero then we fallback to T = 0 -// TODO-SASS-P3: this formula must be double-checked... -#[inline] -fn calculate_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> u128 { - let den = attempts as u128 * validators as u128; - let num = redundancy as u128 * slots as u128; - let res = u128::MAX.checked_div(den).unwrap_or(0).saturating_mul(num); + ) + .ok() + .flatten()?; - // TODO-SASS-P4 remove me - log::debug!( - target: "sassafras", - "🌳 Tickets threshold: {} {:016x}", num as f64 / den as f64, res, - ); - res -} + let pre_digest = PreDigest { + authority_index: authority_index as u32, + slot, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof.clone()), + ticket_info, + }; -/// Returns true if the given VRF output is lower than the given threshold, false otherwise. -#[inline] -pub fn check_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(SASSAFRAS_TICKET_VRF_PREFIX)) < threshold + Some((pre_digest, authority_id.clone())) } /// Generate the tickets for the given epoch. /// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` /// structure. The additional information will be used during epoch to claim slots. -pub fn generate_epoch_tickets( - epoch: &mut Epoch, - max_attempts: u32, - redundancy_factor: u32, - keystore: &SyncCryptoStorePtr, -) -> Vec { +pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { + let config = &epoch.config; + let max_attempts = config.threshold_params.attempts_number; + let redundancy_factor = config.threshold_params.redundancy_factor; let mut tickets = vec![]; - let threshold = calculate_threshold( + let threshold = sp_consensus_sassafras::compute_threshold( redundancy_factor, - epoch.duration as u32, + config.epoch_duration as u32, max_attempts, - epoch.authorities.len() as u32, + config.authorities.len() as u32, ); + // TODO-SASS-P4 remove me + log::debug!(target: "sassafras", "🌳 Tickets threshold: {:032x}", threshold); - let authorities = epoch.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); + let authorities = config.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); for (authority_index, authority_id) in authorities { - let raw_key = authority_id.to_raw_vec(); - - if !SyncCryptoStore::has_keys(&**keystore, &[(raw_key.clone(), AuthorityId::ID)]) { + if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) + { continue } - let public = match PublicKey::from_bytes(&raw_key) { - Ok(public) => public, - Err(_) => continue, - }; - - let get_ticket = |attempt| { + let make_ticket = |attempt| { let transcript_data = - make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); + make_ticket_transcript_data(&config.randomness, attempt, epoch.epoch_index); // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. @@ -158,13 +119,11 @@ pub fn generate_epoch_tickets( ) .ok()??; - let transcript = make_transcript(transcript_data); - let inout = signature.output.attach_input_hash(&public, transcript).ok()?; - if !check_threshold(&inout, threshold) { + let ticket = VRFOutput(signature.output); + if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { return None } - let ticket = VRFOutput(signature.output); let ticket_info = TicketInfo { attempt: attempt as u32, authority_index: authority_index as u32, @@ -175,7 +134,7 @@ pub fn generate_epoch_tickets( }; for attempt in 0..max_attempts { - if let Some((ticket, ticket_info)) = get_ticket(attempt) { + if let Some((ticket, ticket_info)) = make_ticket(attempt) { tickets.push(ticket); epoch.tickets_info.insert(ticket, ticket_info); } @@ -183,3 +142,427 @@ pub fn generate_epoch_tickets( } tickets } + +struct SassafrasSlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + genesis_config: SassafrasConfiguration, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SassafrasSlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "sassafras" + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn epoch_data( + &self, + parent: &B::Header, + slot: Slot, + ) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .map(|epoch| epoch.as_ref().config.authorities.len()) + } + + async fn claim_slot( + &self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); + + // Get the next slot ticket from the runtime. + let block_id = BlockId::Hash(parent_header.hash()); + let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; + + // TODO-SASS-P2 + debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); + + let claim = authorship::claim_slot( + slot, + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))? + .as_ref(), + ticket, + &self.keystore, + ); + if claim.is_some() { + debug!(target: "sassafras", "🌳 Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + true + } else { + false + }, + } + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![::sassafras_pre_digest(claim.0.clone())] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges<>::Transaction, B>, + (_, public): Self::Claim, + epoch_descriptor: Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>::Transaction>, + sp_consensus::Error, + > { + // Sign the pre-sealed hash of the block and then add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*self.keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = ::sassafras_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok(import_block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO-SASS-P2 + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO-SASS-P2 + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +async fn tickets_worker( + client: Arc, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let tickets = epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| authorship::generate_epoch_tickets(epoch, &keystore)) + .unwrap_or_default(); + + if tickets.is_empty() { + continue + } + + // Get the best block on which we will build and send the tickets. + let best_id = match select_chain.best_chain().await { + Ok(header) => BlockId::Hash(header.hash()), + Err(err) => { + error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); + continue + }, + }; + + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + if let Some(err) = err { + error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + // Remove tickets from epoch tree node. + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| epoch.tickets_info.clear()); + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +/// Parameters for Sassafras. +pub struct SassafrasParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: SyncCryptoStorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + /// The source of timestamps for relative slots + pub sassafras_link: SassafrasLink, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + can_author_with, + }: SassafrasParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + PreCommitActions + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer>, + I: BlockImport> + + Send + + Sync + + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send + Sync + 'static, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let slot_worker = SassafrasSlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + genesis_config: sassafras_link.genesis_config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + sassafras_link.genesis_config.slot_duration(), + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), + sync_oracle, + create_inherent_data_providers, + can_author_with, + ); + + let tickets_worker = tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 59f53415a31d2..07f723341b069 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Schema for Sassafras epoch changes in the auxiliary db. +//! Schema for auxiliary data persistence. use scale_codec::{Decode, Encode}; diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs new file mode 100644 index 0000000000000..3630589aeb46a --- /dev/null +++ b/client/consensus/sassafras/src/block_import.rs @@ -0,0 +1,368 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block import. + +use super::*; + +/// A block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + genesis_config: self.genesis_config.clone(), + } + } +} + +impl SassafrasBlockImport { + /// Constructor. + pub fn new( + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid sassafras headers must contain a predigest; header has been already verified; qed", + ); + let slot = pre_digest.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( + "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ + header has already been verified; qed", + ); + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // If there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + // Use an extra scope to make the compiler happy, because otherwise he complains about the + // mutex, even if we dropped it... + let mut epoch_changes = { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + + let added_weight = pre_digest.ticket_info.is_some() as u32; + let total_weight = parent_weight + added_weight; + + // Search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + }) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "sassafras", + log_level, + "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "sassafras", + log_level, + "🌳 🍁 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| { + ConsensusError::ClientImport(format!( + "Error importing epoch changes: {}", + e + )) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + *epoch_changes = + old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + "No block weight for parent header.".to_string(), + ) + })? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; + // Release the mutex, but it stays locked + epoch_changes.release_mutex() + }; + + let import_result = self.inner.import_block(block, new_cache).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + if import_result.is_err() { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes.upgrade() = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + if info.block_gap.is_none() { + epoch_changes.clear_gap(); + } + + let finalized_slot = { + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect( + "best finalized hash was given by client; finalized headers must exist in db; qed", + ); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + genesis_config: SassafrasConfiguration, + inner_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + HeaderBackend + HeaderMetadata + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let link = SassafrasLink { + epoch_changes: epoch_changes.clone(), + genesis_config: genesis_config.clone(), + }; + + let block_import = + SassafrasBlockImport::new(inner_block_import, client, epoch_changes, genesis_config); + + Ok((block_import, link)) +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c83b84cb0ff37..d81b8788fbae9 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -52,7 +52,8 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, }, - import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue}, + Verifier, }; use sc_consensus_epochs::{ descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, @@ -70,8 +71,8 @@ use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; -use sp_consensus_slots::{Slot, SlotDuration}; -use sp_core::{crypto::ByteArray, ExecutionContext}; +use sp_consensus_slots::Slot; +use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ @@ -84,78 +85,22 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, + vrf::{make_slot_transcript, make_ticket_transcript}, AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, SassafrasGenesisConfiguration, Ticket, TicketInfo, VRFOutput, - VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, TicketInfo, VRFOutput, VRFProof, + SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; mod authorship; mod aux_schema; +mod block_import; mod verification; -/// Sassafras epoch information -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] -pub struct Epoch { - /// The epoch index. - pub epoch_index: u64, - /// The starting slot of the epoch. - pub start_slot: Slot, - /// The duration of this epoch in slots. - pub duration: u64, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], - /// Configuration of the epoch. - pub config: SassafrasEpochConfiguration, - /// Tickets metadata. - pub tickets_info: BTreeMap, -} - -impl EpochT for Epoch { - type NextEpochDescriptor = NextEpochDescriptor; - type Slot = Slot; +pub use authorship::{start_sassafras, SassafrasParams, SassafrasWorker}; +pub use block_import::{block_import, SassafrasBlockImport}; +pub use verification::SassafrasVerifier; - fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - // TODO-SASS-P2: allow config change on epoch change - config: self.config.clone(), - tickets_info: BTreeMap::new(), - } - } - - fn start_slot(&self) -> Slot { - self.start_slot - } - - fn end_slot(&self) -> Slot { - self.start_slot + self.duration - } -} - -impl Epoch { - /// Create the genesis epoch (epoch #0). This is defined to start at the slot of - /// the first block, so that has to be provided. - pub fn genesis(genesis_config: &SassafrasGenesisConfiguration, slot: Slot) -> Epoch { - Epoch { - epoch_index: 0, - start_slot: slot, - duration: genesis_config.epoch_length, - authorities: genesis_config.genesis_authorities.clone(), - randomness: genesis_config.randomness, - config: SassafrasEpochConfiguration {}, - tickets_info: BTreeMap::new(), - } - } -} - -/// Errors encountered by the Sassafras authorship task. -/// TODO-SASS-P2: remove unused errors. +/// Errors encountered by the Sassafras routines. #[derive(Debug, thiserror::Error)] pub enum Error { /// Multiple Sassafras pre-runtime digests @@ -167,12 +112,6 @@ pub enum Error { /// Multiple Sassafras epoch change digests #[error("Multiple Sassafras epoch change digests")] MultipleEpochChangeDigests, - // /// Multiple Sassafras config change digests - // #[error("Multiple Sassafras config change digests, rejecting!")] - // MultipleConfigChangeDigests, - // /// Could not extract timestamp and slot - // #[error("Could not extract timestamp and slot: {0}")] - // Extraction(sp_consensus::Error), /// Could not fetch epoch #[error("Could not fetch epoch at {0:?}")] FetchEpoch(B::Hash), @@ -197,12 +136,6 @@ pub enum Error { /// Bad signature #[error("Bad signature on {0:?}")] BadSignature(B::Hash), - // /// Invalid author: Expected secondary author - // #[error("Invalid author: Expected secondary author: {0:?}, got: {1:?}.")] - // InvalidAuthor(AuthorityId, AuthorityId), - // /// VRF verification of block by author failed - // #[error("VRF verification of block by author {0:?} failed: threshold {1} exceeded")] - // VRFVerificationOfBlockFailed(AuthorityId, u128), /// VRF verification failed #[error("VRF verification failed: {0:?}")] VRFVerificationFailed(SignatureError), @@ -215,9 +148,6 @@ pub enum Error { /// Expected epoch change to happen. #[error("Expected epoch change to happen at {0:?}, s{1}")] ExpectedEpochChange(B::Hash, Slot), - // /// Unexpected config change. - // #[error("Unexpected config change")] - // UnexpectedConfigChange, /// Unexpected epoch change #[error("Unexpected epoch change")] UnexpectedEpochChange, @@ -250,509 +180,107 @@ impl From> for String { } } +// Convenience function fn sassafras_err(error: Error) -> Error { error!(target: "sassafras", "🌳 {}", error); error } -/// Intermediate value passed to block importer. -pub struct SassafrasIntermediate { - /// The epoch descriptor. - pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +/// Sassafras epoch information +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// Epoch configuration + pub config: SassafrasConfiguration, + /// Tickets metadata. + pub tickets_info: BTreeMap, } -/// Intermediate key for Babe engine. -pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; - -/// Configuration for Sassafras used for defining block verification parameters as -/// well as authoring (e.g. the slot duration). -#[derive(Clone)] -pub struct Config { - genesis_config: SassafrasGenesisConfiguration, -} +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; -impl Config { - /// Read Sassafras genesis configuration from the runtime. - /// - /// TODO-SASS-P4: (FIXME) - /// This doesn't return the genesis configuration, but the Configuration at best block. - /// There is an open [PR](https://github.com/paritytech/substrate/pull/11760) for BABE, - /// we'll follow the same strategy once it is closed. - pub fn get(client: &C) -> ClientResult - where - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: SassafrasApi, - { - let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); - if client.usage_info().chain.finalized_state.is_none() { - debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); - best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + let config = SassafrasConfiguration { + slot_duration: self.config.slot_duration, + epoch_duration: self.config.epoch_duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + threshold_params: descriptor.config.unwrap_or(self.config.threshold_params.clone()), + }; + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + config.epoch_duration, + config, + tickets_info: BTreeMap::new(), } - - let genesis_config = client.runtime_api().configuration(&best_block_id)?; - - Ok(Config { genesis_config }) } - /// Get the genesis configuration. - pub fn genesis_config(&self) -> &SassafrasGenesisConfiguration { - &self.genesis_config + fn start_slot(&self) -> Slot { + self.start_slot } - /// Get the slot duration defined in the genesis configuration. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.genesis_config.slot_duration) + fn end_slot(&self) -> Slot { + self.start_slot + self.config.slot_duration } } -/// Parameters for Sassafras. -pub struct SassafrasParams { - /// The client to use - pub client: Arc, - /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, - /// The chain selection strategy - pub select_chain: SC, - /// The environment we are producing blocks for. - pub env: EN, - /// The underlying block-import object to supply our produced blocks to. - /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise - /// critical consensus logic will be omitted. - pub block_import: I, - /// A sync oracle - pub sync_oracle: SO, - /// Hook into the sync module to control the justification sync process. - pub justification_sync_link: L, - /// Something that can create the inherent data providers. - pub create_inherent_data_providers: CIDP, - /// Force authoring of blocks even if we are offline - pub force_authoring: bool, - /// The source of timestamps for relative slots - pub sassafras_link: SassafrasLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, -} - -/// Start the Sassafras worker. -pub fn start_sassafras( - SassafrasParams { - client, - keystore, - select_chain, - env, - block_import, - sync_oracle, - justification_sync_link, - create_inherent_data_providers, - force_authoring, - sassafras_link, - can_author_with, - }: SassafrasParams, -) -> Result, sp_consensus::Error> -where - B: BlockT, - C: ProvideRuntimeApi - + ProvideUncles - + BlockchainEvents - + PreCommitActions - + HeaderBackend - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: SassafrasApi, - SC: SelectChain + 'static, - EN: Environment + Send + Sync + 'static, - EN::Proposer: Proposer>, - I: BlockImport> - + Send - + Sync - + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - L: sc_consensus::JustificationSyncLink + 'static, - CIDP: CreateInherentDataProviders + Send + Sync + 'static, - CIDP::InherentDataProviders: InherentDataProviderExt + Send, - CAW: CanAuthorWith + Send + Sync + 'static, - ER: std::error::Error + Send + From + From + 'static, -{ - info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); - - let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); - - let worker = SassafrasSlotWorker { - client: client.clone(), - block_import, - env, - sync_oracle: sync_oracle.clone(), - justification_sync_link, - force_authoring, - keystore: keystore.clone(), - epoch_changes: sassafras_link.epoch_changes.clone(), - slot_notification_sinks: slot_notification_sinks.clone(), - config: sassafras_link.config.clone(), - }; - - let slot_worker = sc_consensus_slots::start_slot_worker( - sassafras_link.config.slot_duration(), - select_chain.clone(), - sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), - sync_oracle, - create_inherent_data_providers, - can_author_with, - ); - - let ticket_worker = tickets_worker( - client.clone(), - keystore, - sassafras_link.epoch_changes.clone(), - select_chain, - ); - - let inner = future::select(Box::pin(slot_worker), Box::pin(ticket_worker)); - - Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) -} - -async fn tickets_worker( - client: Arc, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - select_chain: SC, -) where - B: BlockT, - C: BlockchainEvents + ProvideRuntimeApi, - C::Api: SassafrasApi, - SC: SelectChain + 'static, -{ - let mut notifications = client.import_notification_stream(); - while let Some(notification) = notifications.next().await { - let epoch_desc = match find_next_epoch_digest::(¬ification.header) { - Ok(Some(epoch_desc)) => epoch_desc, - Err(err) => { - warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); - continue - }, - _ => continue, - }; - - debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); - - let tickets = { - let mut epoch_changes = epoch_changes.shared_data(); - - let number = *notification.header.number(); - let position = if number == One::one() { - EpochIdentifierPosition::Genesis1 - } else { - EpochIdentifierPosition::Regular - }; - let mut epoch_identifier = - EpochIdentifier { position, hash: notification.hash, number }; - - let epoch = match epoch_changes.epoch_mut(&mut epoch_identifier) { - Some(epoch) => epoch, - None => { - warn!(target: "sassafras", "🌳 Unexpected missing epoch data for {}", notification.hash); - continue - }, - }; - - authorship::generate_epoch_tickets(epoch, 30, 1, &keystore) - }; - - if tickets.is_empty() { - continue - } - - // Get the best block on which we will build and send the tickets. - let best_id = match select_chain.best_chain().await { - Ok(header) => BlockId::Hash(header.hash()), - Err(err) => { - error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); - continue - }, - }; - - let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { - Err(err) => Some(err.to_string()), - Ok(false) => Some("Unknown reason".to_string()), - _ => None, - }; - if let Some(err) = err { - error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); - // TODO-SASS-P2: on error remove tickets from epoch... +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(config: &SassafrasConfiguration, slot: Slot) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot, + config: config.clone(), + tickets_info: BTreeMap::new(), } } } -/// Worker for Sassafras which implements `Future`. This must be polled. -pub struct SassafrasWorker { - inner: Pin + Send + 'static>>, - slot_notification_sinks: SlotNotificationSinks, -} - -impl SassafrasWorker { - /// Return an event stream of notifications for when new slot happens, and the corresponding - /// epoch descriptor. - pub fn slot_notification_stream( - &self, - ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { - const CHANNEL_BUFFER_SIZE: usize = 1024; - - let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); - self.slot_notification_sinks.lock().push(sink); - stream - } -} - -impl Future for SassafrasWorker { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.inner.as_mut().poll(cx) - } -} - -/// Slot notification sinks. -type SlotNotificationSinks = Arc< - Mutex::Hash, NumberFor, Epoch>)>>>, ->; - -struct SassafrasSlotWorker { - client: Arc, - block_import: I, - env: E, - sync_oracle: SO, - justification_sync_link: L, - force_authoring: bool, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - slot_notification_sinks: SlotNotificationSinks, - config: Config, -} - -#[async_trait::async_trait] -impl sc_consensus_slots::SimpleSlotWorker - for SassafrasSlotWorker +/// Read latest finalized protocol configuration. +pub fn configuration(client: &C) -> ClientResult where B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C: ProvideRuntimeApi + UsageProvider, C::Api: SassafrasApi, - E: Environment + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Clone + Sync, - L: sc_consensus::JustificationSyncLink, - ER: std::error::Error + Send + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; - type Claim = (PreDigest, AuthorityId); - type SyncOracle = SO; - type JustificationSyncLink = L; - type CreateProposer = - Pin> + Send + 'static>>; - type Proposer = E::Proposer; - type BlockImport = I; - - fn logging_target(&self) -> &'static str { - "sassafras" - } - - fn block_import(&mut self) -> &mut Self::BlockImport { - &mut self.block_import - } - - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { - self.epoch_changes - .shared_data() - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - *parent.number(), - slot, - ) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) - } - - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .map(|epoch| epoch.as_ref().authorities.len()) - } - - async fn claim_slot( - &self, - parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) -> Option { - debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); - - // Get the next slot ticket from the runtime. - let block_id = BlockId::Hash(parent_header.hash()); - let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; - - // TODO-SASS-P2 - debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); - - let claim = authorship::claim_slot( - slot, - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - })? - .as_ref(), - ticket, - &self.keystore, - ); - - if claim.is_some() { - debug!(target: "sassafras", "🌳 Claimed slot {}", slot); - } - claim - } - - fn notify_slot( - &self, - _parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } - }); - } - - fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { - vec![::sassafras_pre_digest(claim.0.clone())] - } - - async fn block_import_params( - &self, - header: B::Header, - header_hash: &B::Hash, - body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, - (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, - > { - // Sign the pre-sealed hash of the block and then add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - ::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), - ) - })?; - let signature: AuthoritySignature = signature - .clone() - .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = ::sassafras_seal(signature); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - - Ok(import_block) - } - - fn force_authoring(&self) -> bool { - self.force_authoring - } - - fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { - // TODO-SASS-P2 - false - } - - fn sync_oracle(&mut self) -> &mut Self::SyncOracle { - &mut self.sync_oracle - } - - fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { - &mut self.justification_sync_link - } - - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin( - self.env - .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), - ) - } - - fn telemetry(&self) -> Option { - // TODO-SASS-P2 - None - } - - fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); - - // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' - let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + let info = client.usage_info().chain; + let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { + debug!(target: "sassafras", "🌳 Reading config from genesis"); + info.genesis_hash + }); + + let config = client.runtime_api().configuration(&BlockId::Hash(hash))?; + Ok(config) +} - sc_consensus_slots::proposing_remaining_duration( - parent_slot, - slot_info, - &block_proposal_slot_portion, - None, - sc_consensus_slots::SlotLenienceType::Exponential, - self.logging_target(), - ) - } +/// Intermediate value passed to block importer from authoring or validation logic. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; + /// Extract the Sassafras pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -pub fn find_pre_digest(header: &B::Header) -> Result> { +fn find_pre_digest(header: &B::Header) -> Result> { // Genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { const PROOF: &str = "zero sequence is a valid vrf output/proof; qed"; - let block_vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); - let block_vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); + let vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); + let vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); return Ok(PreDigest { authority_index: 0, slot: 0.into(), - block_vrf_output, - block_vrf_proof, + vrf_output, + vrf_proof, ticket_info: None, }) } @@ -791,639 +319,17 @@ fn find_next_epoch_digest( /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct SassafrasLink { + /// Epoch changes tree epoch_changes: SharedEpochChanges, - config: Config, + /// Startup configuration. Read from runtime at last finalized block. + genesis_config: SassafrasConfiguration, } impl SassafrasLink { - /// Get the epoch changes of this link. - pub fn epoch_changes(&self) -> &SharedEpochChanges { - &self.epoch_changes - } - /// Get the config of this link. - pub fn config(&self) -> &Config { - &self.config - } -} - -/// A verifier for Sassafras blocks. -pub struct SassafrasVerifier { - client: Arc, - select_chain: SelectChain, - create_inherent_data_providers: CIDP, - config: Config, - epoch_changes: SharedEpochChanges, - can_author_with: CAW, - telemetry: Option, -} - -impl SassafrasVerifier -where - Block: BlockT, - Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith, - CIDP: CreateInherentDataProviders, -{ - async fn check_inherents( - &self, - block: Block, - block_id: BlockId, - inherent_data: InherentData, - create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, - ) -> Result<(), Error> { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "sassafras", - "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - - let inherent_res = self - .client - .runtime_api() - .check_inherents_with_context(&block_id, execution_context, block, inherent_data) - .map_err(Error::RuntimeApi)?; - - if !inherent_res.ok() { - for (i, e) in inherent_res.into_errors() { - match create_inherent_data_providers.try_handle_error(&i, &e).await { - Some(res) => res.map_err(|e| Error::CheckInherents(e))?, - None => return Err(Error::CheckInherentsUnhandled(i)), - } - } - } - - Ok(()) - } - - async fn check_and_report_equivocation( - &self, - slot_now: Slot, - slot: Slot, - header: &Block::Header, - author: &AuthorityId, - origin: &BlockOrigin, - ) -> Result<(), Error> { - // Don't report any equivocations during initial sync as they are most likely stale. - if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()) - } - - // Check if authorship of this header is an equivocation and return a proof if so. - let equivocation_proof = - match check_equivocation(&*self.client, slot_now, slot, header, author) - .map_err(Error::Client)? - { - Some(proof) => proof, - None => return Ok(()), - }; - - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - slot, - equivocation_proof.first_header.hash(), - equivocation_proof.second_header.hash(), - ); - - // Get the best block on which we will build and send the equivocation report. - let _best_id: BlockId = self - .select_chain - .best_chain() - .await - .map(|h| BlockId::Hash(h.hash())) - .map_err(|e| Error::Client(e.into()))?; - - // TODO-SASS-P2 - - Ok(()) - } -} - -type BlockVerificationResult = - Result<(BlockImportParams, Option)>>), String>; - -#[async_trait::async_trait] -impl Verifier - for SassafrasVerifier -where - Block: BlockT, - Client: HeaderMetadata - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + AuxStore, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith + Send + Sync, - CIDP: CreateInherentDataProviders + Send + Sync, - CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, -{ - async fn verify( - &mut self, - mut block: BlockImportParams, - ) -> BlockVerificationResult { - trace!( - target: "sassafras", - "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", - block.origin, - block.header, - block.justifications, - block.body, - ); - - if block.with_state() { - // When importing whole state we don't calculate epoch descriptor, but rather - // read it from the state after import. We also skip all verifications - // because there's no parent state and we trust the sync module to verify - // that the state is correct and finalized. - return Ok((block, Default::default())) - } - - trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); - - let hash = block.header.hash(); - let parent_hash = *block.header.parent_hash(); - - let create_inherent_data_providers = self - .create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await - .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; - - let slot_now = create_inherent_data_providers.slot(); - - let parent_header_metadata = self - .client - .header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; - - let pre_digest = find_pre_digest::(&block.header)?; - - let (check_header, epoch_descriptor) = { - let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot, - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or(Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or(Error::::FetchEpoch(parent_hash))?; - - let ticket = self - .client - .runtime_api() - .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) - .map_err(|err| err.to_string())?; - - let v_params = verification::VerificationParams { - header: block.header.clone(), - pre_digest, - slot_now, - epoch: viable_epoch.as_ref(), - ticket, - }; - - (verification::check_header::(v_params)?, epoch_descriptor) - }; - - match check_header { - CheckedHeader::Checked(pre_header, verified_info) => { - let sassafras_pre_digest = verified_info - .pre_digest - .as_sassafras_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - let slot = sassafras_pre_digest.slot; - - // The header is valid but let's check if there was something else already - // proposed at the same slot by the given author. If there was, we will - // report the equivocation to the runtime. - if let Err(err) = self - .check_and_report_equivocation( - slot_now, - slot, - &block.header, - &verified_info.author, - &block.origin, - ) - .await - { - warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); - } - - // If the body is passed through, we need to use the runtime to check that the - // internally-set timestamp in the inherents actually matches the slot set in the - // seal. - if let Some(inner_body) = block.body { - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(slot); - let new_block = Block::new(pre_header.clone(), inner_body); - - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; - - let (_, inner_body) = new_block.deconstruct(); - block.body = Some(inner_body); - } - - trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); - telemetry!( - self.telemetry; - CONSENSUS_TRACE; - "sassafras.checked_and_importing"; - "pre_header" => ?pre_header, - ); - - block.header = pre_header; - block.post_digests.push(verified_info.seal); - block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - block.post_hash = Some(hash); - - Ok((block, Default::default())) - }, - CheckedHeader::Deferred(a, b) => { - debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!( - self.telemetry; - CONSENSUS_DEBUG; - "sassafras.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(Error::::TooFarInFuture(hash).into()) - }, - } - } -} - -/// A block-import handler for Sassafras. -/// -/// This scans each imported block for epoch change announcements. The announcements are -/// tracked in a tree (of all forks), and the import logic validates all epoch change -/// transitions, i.e. whether a given epoch change is expected or whether it is missing. -/// -/// The epoch change tree should be pruned as blocks are finalized. -pub struct SassafrasBlockImport { - inner: I, - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, -} - -impl Clone for SassafrasBlockImport { - fn clone(&self) -> Self { - SassafrasBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - epoch_changes: self.epoch_changes.clone(), - config: self.config.clone(), - } - } -} - -impl SassafrasBlockImport { - fn new( - client: Arc, - epoch_changes: SharedEpochChanges, - block_import: I, - config: Config, - ) -> Self { - SassafrasBlockImport { client, inner: block_import, epoch_changes, config } - } -} - -#[async_trait::async_trait] -impl BlockImport for SassafrasBlockImport -where - Block: BlockT, - Inner: BlockImport> + Send + Sync, - Inner::Error: Into, - Client: HeaderBackend - + HeaderMetadata - + AuxStore - + ProvideRuntimeApi - + Send - + Sync, - Client::Api: SassafrasApi + ApiExt, -{ - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - async fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let number = *block.header.number(); - - let pre_digest = find_pre_digest::(&block.header).expect( - "valid sassafras headers must contain a predigest; header has been already verified; qed", - ); - let slot = pre_digest.slot; - - let parent_hash = *block.header.parent_hash(); - let parent_header = self - .client - .header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), - ) - })?; - - let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( - "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ - header has already been verified; qed", - ); - - // Make sure that slot number is strictly increasing - if slot <= parent_slot { - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), - )) - } - - // If there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; - - // Use an extra scope to make the compiler happy, because otherwise he complains about the - // mutex, even if we dropped it... - let mut epoch_changes = { - let mut epoch_changes = self.epoch_changes.shared_data_locked(); - - // Check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ClientImport( - sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) - .into(), - ) - })? - }; - - let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - - let added_weight = pre_digest.ticket_info.is_some() as u32; - let total_weight = parent_weight + added_weight; - - // Search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some()) { - (true, false) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )), - (false, true) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::UnexpectedEpochChange).into(), - )), - _ => (), - } - - let info = self.client.info(); - - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some((*epoch_changes).clone()); - - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; - - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; - - log!(target: "sassafras", - log_level, - "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); - - let next_epoch = viable_epoch.increment(next_epoch_descriptor); - - log!(target: "sassafras", - log_level, - "🌳 🍁 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); - - // Prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized(self.client.clone(), &mut epoch_changes)?; - - epoch_changes - .import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ) - .map_err(|e| { - ConsensusError::ClientImport(format!( - "Error importing epoch changes: {}", - e - )) - })?; - - Ok(()) - }; - - if let Err(e) = prune_and_import() { - debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); - *epoch_changes = - old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e) - } - - aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { - block - .auxiliary - .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - } - - aux_schema::write_block_weight(hash, total_weight, |values| { - block - .auxiliary - .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - "No block weight for parent header.".to_string(), - ) - })? - }; - - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) - }; - // Release the mutex, but it stays locked - epoch_changes.release_mutex() - }; - - let import_result = self.inner.import_block(block, new_cache).await; - - // Revert to the original epoch changes in case there's an error - // importing the block - if import_result.is_err() { - if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes.upgrade() = old_epoch_changes; - } - } - - import_result.map_err(Into::into) - } - - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).await.map_err(Into::into) - } -} - -/// Gets the best finalized block and its slot, and prunes the given epoch tree. -fn prune_finalized( - client: Arc, - epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> -where - B: BlockT, - C: HeaderBackend + HeaderMetadata, -{ - let info = client.info(); - if info.block_gap.is_none() { - epoch_changes.clear_gap(); + pub fn genesis_config(&self) -> &SassafrasConfiguration { + &self.genesis_config } - - let finalized_slot = { - let finalized_header = client - .header(BlockId::Hash(info.finalized_hash)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect( - "best finalized hash was given by client; finalized headers must exist in db; qed", - ); - - find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; valid blocks have a pre-digest; qed") - .slot - }; - - epoch_changes - .prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - Ok(()) -} - -/// Produce a Sassafras block-import object to be used later on in the construction of -/// an import-queue. -/// -/// Also returns a link object used to correctly instantiate the import queue -/// and background worker. -pub fn block_import( - config: Config, - wrapped_block_import: I, - client: Arc, -) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> -where - C: AuxStore + HeaderBackend + HeaderMetadata + 'static, -{ - let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; - - let link = SassafrasLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; - - // NOTE: this isn't entirely necessary, but since we didn't use to prune the - // epoch tree it is useful as a migration, so that nodes prune long trees on - // startup rather than waiting until importing the next epoch change block. - prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; - - let import = SassafrasBlockImport::new(client, epoch_changes, wrapped_block_import, config); - - Ok((import, link)) } /// Start an import queue for the Sassafras consensus algorithm. @@ -1434,9 +340,9 @@ where /// /// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, /// otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( sassafras_link: SassafrasLink, - block_import: Inner, + block_import: BI, justification_import: Option>, client: Arc, select_chain: SelectChain, @@ -1447,13 +353,6 @@ pub fn import_queue( telemetry: Option, ) -> ClientResult> where - Inner: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync - + 'static, Client: ProvideRuntimeApi + HeaderBackend + HeaderMetadata @@ -1462,20 +361,27 @@ where + Sync + 'static, Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + BI: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = SassafrasVerifier { + let verifier = SassafrasVerifier::new( + client, select_chain, create_inherent_data_providers, - config: sassafras_link.config, - epoch_changes: sassafras_link.epoch_changes, + sassafras_link.epoch_changes, can_author_with, telemetry, - client, - }; + sassafras_link.genesis_config, + ); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 3c4dbef92f01a..b162fe390ef03 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -16,17 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Verification for Sassafras headers. - -use super::{authorship, sassafras_err, BlockT, Epoch, Error}; -use sc_consensus_slots::CheckedHeader; -use sp_consensus_sassafras::{ - digests::{CompatibleDigestItem, PreDigest}, - make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, Ticket, -}; -use sp_consensus_slots::Slot; -use sp_core::{ByteArray, Pair}; -use sp_runtime::{traits::Header, DigestItem}; +//! Types and functions related to block verification. + +use super::*; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; @@ -64,13 +56,14 @@ pub fn check_header( params: VerificationParams, ) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch, ticket } = params; + let config = &epoch.config; // Check that the slot is not in the future, with some drift being allowed. if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let author = match epoch.authorities.get(pre_digest.authority_index as usize) { + let author = match config.authorities.get(pre_digest.authority_index as usize) { Some(author) => author.0.clone(), None => return Err(sassafras_err(Error::SlotAuthorNotFound)), }; @@ -100,18 +93,15 @@ pub fn check_header( // TODO-SASS-P2 ... we can eventually remove auth index from ticket info log::error!(target: "sassafras", "🌳 Wrong primary authority index"); } - let transcript = make_ticket_transcript( - &epoch.randomness, - ticket_info.attempt as u64, - epoch.epoch_index, - ); + let transcript = + make_ticket_transcript(&config.randomness, ticket_info.attempt, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { log::debug!(target: "sassafras", "🌳 checking secondary"); - let idx = authorship::secondary_authority_index(pre_digest.slot, params.epoch); + let idx = authorship::secondary_authority_index(pre_digest.slot, config); if idx != pre_digest.authority_index as u64 { log::error!(target: "sassafras", "🌳 Wrong secondary authority index"); } @@ -128,13 +118,11 @@ pub fn check_header( }, } - // Check block-vrf proof + // Check slot-vrf proof - let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| { - p.vrf_verify(transcript, &pre_digest.block_vrf_output, &pre_digest.block_vrf_proof) - }) + .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; let info = VerifiedHeaderInfo { @@ -145,3 +133,300 @@ pub fn check_header( Ok(CheckedHeader::Checked(header, info)) } + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, +} + +impl + SassafrasVerifier +{ + /// Constructor. + pub fn new( + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasVerifier { + client, + select_chain, + create_inherent_data_providers, + epoch_changes, + can_author_with, + telemetry, + genesis_config, + } + } +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "sassafras", + "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let _best_id: BlockId = self + .select_chain + .best_chain() + .await + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // TODO-SASS-P2 + + Ok(()) + } +} + +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> BlockVerificationResult { + trace!( + target: "sassafras", + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } + + trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&block.header)?; + + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let ticket = self + .client + .runtime_api() + .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) + .map_err(|err| err.to_string())?; + + let v_params = VerificationParams { + header: block.header.clone(), + pre_digest, + slot_now, + epoch: viable_epoch.as_ref(), + ticket, + }; + + (check_header::(v_params)?, epoch_descriptor) + }; + + match check_header { + CheckedHeader::Checked(pre_header, verified_info) => { + let sassafras_pre_digest = verified_info + .pre_digest + .as_sassafras_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + let slot = sassafras_pre_digest.slot; + + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &block.header, + &verified_info.author, + &block.origin, + ) + .await + { + warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(slot); + let new_block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + block.post_hash = Some(hash); + + Ok((block, Default::default())) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 1d3839a9dcfb9..fc0c1940cc50d 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -23,7 +23,6 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -31,6 +30,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +hex-literal = "0.3" [features] default = ["std"] @@ -45,7 +45,6 @@ std = [ "scale-info/std", "sp-application-crypto/std", "sp-consensus-sassafras/std", - "sp-consensus-vrf/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs new file mode 100644 index 0000000000000..2f1818e5b52cd --- /dev/null +++ b/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use super::*; +use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_io::hashing; + +fn make_dummy_ticket(i: usize) -> Ticket { + let buf = i.to_le_bytes(); + hashing::twox_256(&buf).try_into().unwrap() +} + +benchmarks! { + submit_tickets { + let x in 0 .. 100; + + // Almost fill the available tickets space. + + let max_tickets: u32 = ::MaxTickets::get() - 10; + let tickets: Vec = (0..max_tickets as usize).into_iter().map(|i| { + make_dummy_ticket(i) + }).collect(); + let _ = Pallet::::submit_tickets(RawOrigin::None.into(), tickets); + + // Create the tickets to submit during the benchmark + + let tickets: Vec = (0..x as usize).into_iter().map(|i| { + make_dummy_ticket(i + max_tickets as usize) + }).collect(); + }: _(RawOrigin::None, tickets) + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 06155ec86877d..31678a6199ec7 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -1,4 +1,4 @@ -// Sassafras This file is part of Substrate. +// This file is part of Substrate. // Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Consensus extension module for Sassafras consensus. +//! Extension module for Sassafras consensus. //! //! Sassafras is a constant-time block production protocol that aims to ensure that //! there is exactly one block produced with constant time intervals rather multiple @@ -47,12 +47,16 @@ #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use scale_codec::{Decode, Encode}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; -use frame_support::{traits::Get, weights::Weight, BoundedBTreeSet, BoundedVec, WeakBoundedVec}; +use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; -use sp_application_crypto::ByteArray; -use sp_consensus_vrf::schnorrkel; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, + AuthorityId, Randomness, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, + SASSAFRAS_ENGINE_ID, +}; use sp_runtime::{ generic::DigestItem, traits::{One, Saturating}, @@ -60,53 +64,27 @@ use sp_runtime::{ }; use sp_std::prelude::Vec; -pub use sp_consensus_sassafras::{ - digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, - PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, -}; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] +mod mock; +#[cfg(all(feature = "std", test))] +mod tests; -// TODO-SASS-P2: tests and benches - -//#[cfg(test)] -//mod mock; -// -//#[cfg(test)] -//mod tests; -// -//#[cfg(feature = "runtime-benchmarks")] -//mod benchmarking; +pub mod session; pub use pallet::*; -/// Trigger an epoch change, if any should take place. -pub trait EpochChangeTrigger { - /// Trigger an epoch change, if any should take place. This should be called - /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); -} - -/// A type signifying to Sassafras that an external trigger for epoch changes -/// (e.g. pallet-session) is used. -pub struct ExternalTrigger; - -impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. -} - -/// A type signifying to Sassafras that it should perform epoch changes with an internal -/// trigger, recycling the same authorities forever. -pub struct SameAuthoritiesForever; - -impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); - let next_authorities = authorities.clone(); - - >::enact_epoch_change(authorities, next_authorities); - } - } +/// Tickets related metadata that is commonly used together. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of tickets available for even and odd session indices respectivelly. + /// I.e. the index is computed as session-index modulo 2. + pub tickets_count: [u32; 2], + /// Number of tickets segments + pub segments_count: u32, + /// Last segment has been already sorted + pub sort_started: bool, } #[frame_support::pallet] @@ -152,10 +130,6 @@ pub mod pallet { /// Max number of tickets that are considered for each epoch. #[pallet::constant] type MaxTickets: Get; - - /// Max number of tickets that we are going to consider for each epoch. - #[pallet::constant] - type MaxSubmittedTickets: Get; } // TODO-SASS-P2 @@ -212,37 +186,61 @@ pub mod pallet { /// adversary, for purposes such as public-coin zero-knowledge proofs. #[pallet::storage] #[pallet::getter(fn randomness)] - pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; /// Next epoch randomness. #[pallet::storage] - pub type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; - /// Current epoch randomness accumulator. + /// Randomness accumulator. #[pallet::storage] - pub type RandomnessAccumulator = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. #[pallet::storage] #[pallet::getter(fn initialized)] - pub type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, PreDigest>; + + /// The configuration for the current epoch. + #[pallet::storage] + #[pallet::getter(fn config)] + pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; - /// The configuration for the current epoch. Should never be `None` as it is initialized in - /// genesis. + /// The configuration for the next epoch. + #[pallet::storage] + pub type NextEpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// TODO-SASS-P2: better doc? Double check if next epoch tickets were computed using NextEpoch + /// params in the native ecode. + /// In other words a config change submitted during session N will be enacted on session N+2. + /// This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for session N+1. #[pallet::storage] - pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + pub(super) type PendingEpochConfigChange = StorageValue<_, SassafrasEpochConfiguration>; - /// Current session tickets. + /// Stored tickets metadata. #[pallet::storage] - pub type Tickets = StorageValue<_, BoundedVec, ValueQuery>; + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets to be used for current and next session. + /// The key consists of a + /// - `u8` equal to session-index mod 2 + /// - `u32` equal to the slot-index. + #[pallet::storage] + pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; + + // /// Next session tickets temporary accumulator length. + // #[pallet::storage] + // pub type NextTicketsSegmentsCount = StorageValue<_, u32, ValueQuery>; - /// Next session tickets. - // TODO-SASS-P2: probably the best thing is to store the tickets in a map - // Each map entry contains a vector of tickets as they are received. + /// Next session tickets temporary accumulator. + /// Special u32::MAX key is reserved for partially sorted segment. #[pallet::storage] - pub type NextTickets = - StorageValue<_, BoundedBTreeSet, ValueQuery>; + pub type NextTicketsSegments = + StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; /// Genesis configuration for Sassafras protocol. #[cfg_attr(feature = "std", derive(Default))] @@ -251,16 +249,14 @@ pub mod pallet { /// Genesis authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Genesis epoch configuration. - pub epoch_config: Option, + pub epoch_config: SassafrasEpochConfiguration, } #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); - EpochConfig::::put( - self.epoch_config.clone().expect("epoch_config must not be None"), - ); + EpochConfig::::put(self.epoch_config.clone()); } } @@ -277,88 +273,50 @@ pub mod pallet { // At the end of the block, we can safely include the new VRF output from // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has - // already occurred at this point, so the under-construction randomness - // will only contain outputs from the right epoch. - // TODO-SASS-P2: maybe here we can `expect` that is initialized (panic if not) - if let Some(pre_digest) = Initialized::::take().flatten() { - let authority_index = pre_digest.authority_index; - - let randomness: Option = Authorities::::get() - .get(authority_index as usize) - .and_then(|(authority, _)| { - schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() - }) - .and_then(|pubkey| { - let current_slot = CurrentSlot::::get(); - - let transcript = sp_consensus_sassafras::make_slot_transcript( - &Self::randomness(), - current_slot, - EpochIndex::::get(), - ); - - let vrf_output = pre_digest.block_vrf_output; - - // This has already been verified by the client on block import. - debug_assert!(pubkey - .vrf_verify( - transcript.clone(), - &vrf_output, - &pre_digest.block_vrf_proof - ) - .is_ok()); - - vrf_output.0.attach_input_hash(&pubkey, transcript).ok() - }) - .map(|inout| { - inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX) - }); - - // TODO-SASS-P2: this should be infallible. Randomness should be always deposited. - // Eventually better to panic here? - if let Some(randomness) = randomness { - Self::deposit_randomness(&randomness); - } - } + // already occurred at this point, so the + let pre_digest = Initialized::::take() + .expect("Finalization is called after initialization; qed."); + Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); } } #[pallet::call] impl Pallet { /// Submit next epoch tickets. + /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remov ethe weight? #[pallet::weight(10_000)] - pub fn submit_tickets(origin: OriginFor, tickets: Vec) -> DispatchResult { + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec, + ) -> DispatchResult { ensure_none(origin)?; + let mut metadata = TicketsMeta::::get(); + log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); - // We have to traverse the tickets list one by one to verify the SNARK proofs. - let mut next_tickets = NextTickets::::get(); - - // 1. validate proof - // 2. append to sorted list - // TODO-SASS-P2: use a scattered structure for tickets - next_tickets = next_tickets.try_mutate(|tree| { - for ticket in tickets.iter() { - tree.insert(*ticket); - } - let max_tickets = T::MaxTickets::get() as usize; - if tree.len() > max_tickets { - // Remove the mid values - // TODO-SASS-P2: with the new structure this will be reimplemented... - let diff = tree.len() - max_tickets; - let off = max_tickets / 2; - let val = tree.iter().nth(off).cloned().unwrap(); - let mut mid = tree.split_off(&val); - let val = mid.iter().nth(diff).cloned().unwrap(); - let mut tail = mid.split_off(&val); - tree.append(&mut tail); - log::warn!(target: "sassafras", "🌳 TICKETS OVERFLOW, drop {} tickets... (len = {})", diff, tree.len()); - } - }).expect("Tickets list len is within the allowed bounds; qed."); - - NextTickets::::put(next_tickets); + // We just require a unique key to save the partial tickets list. + metadata.segments_count += 1; + NextTicketsSegments::::insert(metadata.segments_count, tickets); + TicketsMeta::::set(metadata); + Ok(()) + } + /// Plan an epoch config change. The epoch config change is recorded and will be enacted on + /// the next call to `enact_session_change`. The config will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + #[pallet::weight(10_000)] + pub fn plan_config_change( + origin: OriginFor, + config: SassafrasEpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); Ok(()) } } @@ -384,6 +342,10 @@ pub mod pallet { // submit our tickets if we don't have enough authoring slots. // If we have 0 slots => we have zero chances. // Maybe this is one valid reason to introduce proxies. + // In short the question is >>> WHO HAS THE RIGHT TO SUBMIT A TICKET? <<< + // A) The current epoch validators + // B) The next epoch validators + // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) log::warn!( target: "sassafras::runtime", "🌳 Rejecting unsigned transaction from external sources.", @@ -392,7 +354,9 @@ pub mod pallet { } // Current slot should be less than half of epoch duration. - if Self::current_slot_epoch_index() >= T::EpochDuration::get() / 2 { + let epoch_duration = T::EpochDuration::get(); + + if Self::current_slot_epoch_index() >= epoch_duration / 2 { log::warn!( target: "sassafras::runtime", "🌳 Timeout to propose tickets, bailing out.", @@ -400,10 +364,27 @@ pub mod pallet { return InvalidTransaction::Stale.into() } - // TODO-SASS-P2 more validation steps: - // 1. epoch index - // 2. signed by an authority for current epoch - // 3. single submission attempt from validator? + // Check tickets are below threshold + + let next_auth = NextAuthorities::::get(); + let epoch_config = EpochConfig::::get(); + let threshold = sp_consensus_sassafras::compute_threshold( + epoch_config.redundancy_factor, + epoch_duration as u32, + epoch_config.attempts_number, + next_auth.len() as u32, + ); + + // TODO-SASS-P2: if we move this in the `submit_tickets` call then we can + // can drop only the invalid tickets. + // In this way we don't penalize validators that submit tickets together + // with faulty validators. + if !tickets + .iter() + .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) + { + return InvalidTransaction::Custom(0).into() + } ValidTransaction::with_tag_prefix("Sassafras") // We assign the maximum priority for any equivocation report. @@ -411,8 +392,8 @@ pub mod pallet { // TODO-SASS-P2: if possible use a more efficient way to distinquish // duplicates... .and_provides(tickets) - // TODO-SASS-P2: this should be set such that it is discarded after the first - // half + // TODO-SASS-P2: this sholot_tld be set such that it is discarded after the + // first half .longevity(3_u64) .propagate(true) .build() @@ -435,7 +416,7 @@ impl Pallet { /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_epoch_change(now: T::BlockNumber) -> bool { + pub fn should_end_session(now: T::BlockNumber) -> bool { // The epoch has technically ended during the passage of time between this block and the // last, but we have to "end" the epoch now, since there is no earlier possible block we // could have done it. @@ -443,6 +424,11 @@ impl Pallet { // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having // started at the slot of block 1. We want to use the same randomness and validator set as // signalled in the genesis, so we don't rotate the epoch. + + // TODO-SASS-P2 + // Is now != One required??? + // What if we want epochs with len = 1. In this case we doesn't change epoch correctly + // in slot 1. now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() } @@ -451,100 +437,106 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - if *GenesisSlot::::get() == 0 { - return 0 - } + // TODO-SASS-P2 : is this required? + // if *GenesisSlot::::get() == 0 { + // return 0 + // } *slot.saturating_sub(Self::current_epoch_start()) } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_session` /// has returned `true`, and the caller is the only caller of this function. /// /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. - pub fn enact_epoch_change( + /// + /// TODO-SASS-P3: + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. + /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. + pub(crate) fn enact_session_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< (AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities, >, ) { - // TODO-SASS-P2: we don't depend on session module... - - // PRECONDITION: caller has done initialization and is guaranteed by the session module to - // be called before this. + // PRECONDITION: caller has done initialization. + // If using the internal trigger or the session pallet then this is guaranteed. debug_assert!(Self::initialized().is_some()); + // Update authorities + Authorities::::put(authorities); + NextAuthorities::::put(&next_authorities); + // Update epoch index - let epoch_index = EpochIndex::::get() + let mut epoch_idx = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - EpochIndex::::put(epoch_index); - // Update authorities - Authorities::::put(authorities); - NextAuthorities::::put(&next_authorities); + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); + if slot_idx >= T::EpochDuration::get() { + // Detected one or more skipped epochs, kill tickets and recompute the `epoch_index`. + TicketsMeta::::kill(); + // TODO-SASS-P2: adjust epoch index (TEST ME) + let idx: u64 = slot_idx.into(); + epoch_idx += idx / T::EpochDuration::get(); + } + EpochIndex::::put(epoch_idx); - // Update epoch randomness. - let next_epoch_index = epoch_index + let next_epoch_index = epoch_idx .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - // Returns randomness for the current epoch and computes the *next* - // epoch randomness. - let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::::put(randomness); + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_randomness(next_epoch_index); - // // Update the start blocks of the previous and new current epoch. - // >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { - // *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); - // *current_epoch_start_block = >::block_number(); - // }); + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config.clone() { + NextEpochConfig::::put(next_config); + } // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - - let next_randomness = NextRandomness::::get(); - let next_epoch = NextEpochDescriptor { authorities: next_authorities.to_vec(), randomness: next_randomness, + config: next_config, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - // if let Some(next_config) = NextEpochConfig::::get() { - // EpochConfig::::put(next_config); - // } - - // if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { - // let next_epoch_config: BabeEpochConfiguration = - // pending_epoch_config_change.clone().into(); - // NextEpochConfig::::put(next_epoch_config); - // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); - // } - - Self::enact_tickets(); + let epoch_key = (epoch_idx & 1) as u8; + let mut tickets_metadata = TicketsMeta::::get(); + // Optionally finish sorting + if tickets_metadata.segments_count != 0 { + Self::sort_tickets(u32::MAX, epoch_key, &mut tickets_metadata); + } + // Clear the prev (equal to the next) epoch tickets counter. + let next_epoch_key = epoch_key ^ 1; + tickets_metadata.tickets_count[next_epoch_key as usize] = 0; + TicketsMeta::::set(tickets_metadata); } - /// Enact next epoch tickets list. - /// To work properly this should be done as the last action of the last epoch slot. - /// (i.e. current tickets list is not used at this point) - fn enact_tickets() { - // TODO-SASS-P2: manage skipped epoch by killing both Tickets and NextTickets + /// Call this function on epoch change to update the randomness. + /// Returns the next epoch randomness. + fn update_randomness(next_epoch_index: u64) -> Randomness { + let curr_randomness = NextRandomness::::get(); + CurrentRandomness::::put(curr_randomness); - let mut tickets = NextTickets::::get().into_iter().collect::>(); - log::debug!(target: "sassafras", "🌳 @@@@@@@@@ Enacting {} tickets", tickets.len()); + let accumulator = RandomnessAccumulator::::get(); + let mut s = Vec::with_capacity(2 * curr_randomness.len() + 8); + s.extend_from_slice(&curr_randomness); + s.extend_from_slice(&next_epoch_index.to_le_bytes()); + s.extend_from_slice(&accumulator); - if tickets.len() > T::MaxTickets::get() as usize { - log::error!(target: "sassafras", "🌳 should never happen..."); - let max = T::MaxTickets::get() as usize; - tickets.truncate(max); - } - let tickets = BoundedVec::::try_from(tickets) - .expect("vector has been eventually truncated; qed"); + let next_randomness = sp_io::hashing::blake2_256(&s); + NextRandomness::::put(&next_randomness); - Tickets::::put(tickets); - NextTickets::::kill(); + next_randomness } /// Finds the start slot of the current epoch. Only guaranteed to give correct results after @@ -567,7 +559,7 @@ impl Pallet { >::deposit_log(log) } - fn deposit_randomness(randomness: &schnorrkel::Randomness) { + fn deposit_randomness(randomness: &Randomness) { let mut s = RandomnessAccumulator::::get().to_vec(); s.extend_from_slice(randomness); let accumulator = sp_io::hashing::blake2_256(&s); @@ -575,36 +567,43 @@ impl Pallet { } // Initialize authorities on genesis phase. - // TODO-SASS-P2: temporary fix to make the compiler happy - #[allow(dead_code)] fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { - if !authorities.is_empty() { - assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); - let bounded_authorities = - WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) - .expect("Initial number of authorities should be lower than T::MaxAuthorities"); - Authorities::::put(&bounded_authorities); - NextAuthorities::::put(&bounded_authorities); + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // If this function has already been called with some authorities, then the new list + // should be match the previously set one. + let prev_authorities = Authorities::::get(); + if !prev_authorities.is_empty() { + if prev_authorities.to_vec() == authorities { + return + } else { + panic!("Authorities already were already initialized"); + } } + + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); } fn initialize_genesis_epoch(genesis_slot: Slot) { GenesisSlot::::put(genesis_slot); - debug_assert_ne!(*GenesisSlot::::get(), 0); - // Deposit a log because this is the first block in epoch #0. We use the same values - // as genesis because we haven't collected any randomness yet. + // Deposit a log because this is the first block in epoch #0. + // We use the same values as genesis because we haven't collected any randomness yet. let next = NextEpochDescriptor { authorities: Self::authorities().to_vec(), randomness: Self::randomness(), + config: None, }; - Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } fn initialize(now: T::BlockNumber) { // Since `initialize` can be called twice (e.g. if session module is present) - // let's ensure that we only do the initialization once per block + // let's ensure that we only do the initialization once per block. + // TODO-SASS-P2: why session calls initialize? if Self::initialized().is_some() { return } @@ -622,89 +621,186 @@ impl Pallet { }) .next(); - // TODO-SASS-P2: maybe here we have to assert! the presence of pre_digest... - // Every valid sassafras block should come with a pre-digest - - if let Some(ref pre_digest) = pre_digest { - // The slot number of the current block being initialized - let current_slot = pre_digest.slot; - - // On the first non-zero block (i.e. block #1) this is where the first epoch - // (epoch #0) actually starts. We need to adjust internal storage accordingly. - if *GenesisSlot::::get() == 0 { - Self::initialize_genesis_epoch(current_slot) - } + let pre_digest = pre_digest.expect("Valid Sassafras block should have a pre-digest. qed"); // let Some(ref pre_digest) = pre_digest { + // + let current_slot = pre_digest.slot; + CurrentSlot::::put(current_slot); - CurrentSlot::::put(current_slot); + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + Self::initialize_genesis_epoch(current_slot) } Initialized::::put(pre_digest); - // enact epoch change, if necessary. - T::EpochChangeTrigger::trigger::(now); - } - - /// Call this function exactly once when an epoch changes, to update the randomness. - /// Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::::get(); - let accumulator = RandomnessAccumulator::::get(); - - let mut s = Vec::with_capacity(2 * this_randomness.len() + 8); - s.extend_from_slice(&this_randomness); - s.extend_from_slice(&next_epoch_index.to_le_bytes()); - s.extend_from_slice(&accumulator); - - let next_randomness = sp_io::hashing::blake2_256(&s); - NextRandomness::::put(&next_randomness); + // TODO-SASS-P2: incremental parial ordering for NextTickets - this_randomness + // Enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now); } - /// Fetch expected ticket for the given slot. - // TODO-SASS-P2: This is a very inefficient and temporary solution. - // On refactory we will come up with a better solution (like a scattered vector). + /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the `Tickets` + /// list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the `NextTickets` + /// list. Note that in this case we may have not finished receiving all the tickets for that + /// epoch yet. The next epoch tickets should be considered "stable" only after the current + /// epoch first half (see the [`submit_tickets_unsigned_extrinsic`]). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. pub fn slot_ticket(slot: Slot) -> Option { + let epoch_idx = EpochIndex::::get(); let duration = T::EpochDuration::get(); - let slot_idx = Self::slot_epoch_index(slot); // % duration; + let mut slot_idx = Self::slot_epoch_index(slot); + let mut tickets_meta = TicketsMeta::::get(); - // Given a list of ordered tickets: t0, t1, t2, ..., tk to be assigned to N slots (N>k) - // The tickets are assigned to the slots in the following order: t1, t3, ..., t4, t2, t0. - - let ticket_index = |slot_idx| { + let get_ticket_idx = |slot_idx| { let ticket_idx = if slot_idx < duration / 2 { 2 * slot_idx + 1 } else { 2 * (duration - (slot_idx + 1)) }; - log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); - ticket_idx as usize + log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); + ticket_idx as u32 + }; + + let mut epoch_key = (epoch_idx & 1) as u8; + + if duration <= slot_idx && slot_idx < 2 * duration { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_key ^= 1; + slot_idx -= duration; + if tickets_meta.segments_count != 0 { + Self::sort_tickets(tickets_meta.segments_count, epoch_key, &mut tickets_meta); + TicketsMeta::::set(tickets_meta.clone()); + } + } else if slot_idx >= 2 * duration { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < tickets_meta.tickets_count[epoch_key as usize] { + Tickets::::get((epoch_key, ticket_idx)) + } else { + None + } + } + + // Sort the tickets that belong to at most `max_iter` segments starting from the last. + // If the `max_iter` value is equal to the number of segments then the result is truncated + // and saved as the tickets associated to `epoch_key`. + // Else the result is saved within the structure itself to be used on next iterations. + fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { + let mut segments_count = metadata.segments_count; + let max_iter = max_iter.min(segments_count); + let max_tickets = T::MaxTickets::get() as usize; + + let mut new_segment = if metadata.sort_started { + NextTicketsSegments::::take(u32::MAX).into_inner() + } else { + Vec::new() }; - // If this is a ticket for an epoch not enacted yet we have to fetch it from the - // `NextTickets` list. For example, this may happen when an author request the first - // ticket of a new epoch. - if slot_idx < duration { - let tickets = Tickets::::get(); - let idx = ticket_index(slot_idx); - tickets.get(idx).cloned() + let mut require_sort = max_iter != 0; + + let mut sup = if new_segment.len() >= max_tickets { + new_segment[new_segment.len() - 1] + } else { + Ticket::try_from([0xFF; 32]).expect("This is a valid ticket value; qed") + }; + + for _ in 0..max_iter { + let segment = NextTicketsSegments::::take(segments_count); + + segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); + if new_segment.len() > max_tickets { + require_sort = false; + new_segment.sort_unstable(); + new_segment.truncate(max_tickets); + sup = new_segment[new_segment.len() - 1]; + } + + segments_count -= 1; + } + + if require_sort { + new_segment.sort_unstable(); + } + + if segments_count == 0 { + // Sort is over, write to the map. + // TODO-SASS-P2: is there a better way to write a map from a vector? + new_segment.iter().enumerate().for_each(|(i, t)| { + Tickets::::insert((epoch_key, i as u32), t); + }); + metadata.tickets_count[epoch_key as usize] = new_segment.len() as u32; } else { - let tickets = NextTickets::::get(); - // Do not use modulus since we want to eventually return `None` for slots crossing the - // epoch boundaries. - let idx = ticket_index(slot_idx - duration); - tickets.iter().nth(idx).cloned() + NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); + metadata.sort_started = true; } + + metadata.segments_count = segments_count; } /// Submit next epoch validator tickets via an unsigned extrinsic. - pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has + /// is called within the first half of the epoch. That is, tickets received within the + /// second half are dropped. + /// TODO-SASS-P3: we have to add the zk validity proofs + pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); + tickets.sort_unstable(); + let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; SubmitTransaction::>::submit_unsigned_transaction(call.into()).is_ok() } } +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); +} + +/// A type signifying to Sassafras that an external trigger for epoch changes +/// (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes with an internal +/// trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: T::BlockNumber) { + if >::should_end_session(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_session_change(authorities, next_authorities); + } + } +} + impl BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs new file mode 100644 index 0000000000000..25ef4f61fb881 --- /dev/null +++ b/frame/sassafras/src/mock.rs @@ -0,0 +1,231 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras pallet. + +use crate::{self as pallet_sassafras, SameAuthoritiesForever}; + +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}, +}; +use scale_codec::Encode; +use sp_consensus_sassafras::{ + digests::PreDigest, + vrf::{self, VRFOutput, VRFProof}, + AuthorityIndex, AuthorityPair, Slot, +}; +use sp_core::{ + crypto::{IsWrappedBy, Pair}, + H256, U256, +}; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, TestXt}, + traits::IdentityLookup, +}; + +const EPOCH_DURATION: u64 = 10; +const MAX_TICKETS: u32 = 6; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +type DummyValidatorId = u64; + +type AccountData = u128; + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} + +impl frame_system::Config for Test { + type Event = Event; + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Version = (); + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = DummyValidatorId; + type Lookup = IdentityLookup; + type Header = Header; + type BlockHashCount = ConstU64<250>; + type PalletInfo = PalletInfo; + type AccountData = AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); //Sassafras; + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type EpochDuration = ConstU64; + type ExpectedBlockTime = ConstU64<1>; + type EpochChangeTrigger = SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32; +} + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len).1 +} + +pub fn new_test_ext_with_pairs( + authorities_len: usize, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities = pairs.iter().map(|p| (p.public(), 1)).collect(); + + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let config = pallet_sassafras::GenesisConfig { authorities, epoch_config: Default::default() }; + >::assimilate_storage(&config, &mut t) + .unwrap(); + + (pairs, t.into()) +} + +fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization + let epoch_start = Sassafras::current_epoch_start(); + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); + } + + let transcript = vrf::make_ticket_transcript(&randomness, attempt, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_vrf(slot, attempt, pair)) + .collect() +} + +fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization + let epoch_start = Sassafras::current_epoch_start(); + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); + } + + let transcript = vrf::make_slot_transcript(&randomness, slot, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_pre_digest( + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> PreDigest { + let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); + PreDigest { authority_index, slot, vrf_output, vrf_proof, ticket_info: None } +} + +pub fn make_wrapped_pre_digest( + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> Digest { + let pre_digest = make_pre_digest(authority_index, slot, pair); + let log = + DigestItem::PreRuntime(sp_consensus_sassafras::SASSAFRAS_ENGINE_ID, pre_digest.encode()); + Digest { logs: vec![log] } +} + +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_wrapped_pre_digest(0, slot, pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Slots will grow accordingly to blocks +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot = slot + 1; + } + digest +} diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs new file mode 100644 index 0000000000000..15cdab95d8887 --- /dev/null +++ b/frame/sassafras/src/session.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras implementation of traits required by session pallet. + +use super::*; +use frame_support::traits::{EstimateNextSessionRotation, OneSessionHandler}; +use pallet_session::ShouldEndSession; +use sp_runtime::{traits::SaturatedConversion, Permill}; + +impl ShouldEndSession for Pallet { + fn should_end_session(now: T::BlockNumber) -> bool { + // It might be (and it is in current implementation) that session module is calling + // `should_end_session` from it's own `on_initialize` handler, in which case it's + // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we + // have initialized the pallet and updated the current slot. + Self::initialize(now); + Self::should_end_session(now) + } +} + +impl OneSessionHandler for Pallet { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_genesis_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self::enact_session_change(bounded_authorities, next_bounded_authorities) + } + + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) + } +} + +impl EstimateNextSessionRotation for Pallet { + fn average_session_length() -> T::BlockNumber { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + let progress = Permill::from_rational(*elapsed, T::EpochDuration::get()); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (Some(progress), T::DbWeight::get().reads(3)) + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an internal error and should + /// not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // + // ## IMPORTANT NOTE + // + // This implementation is linked to how [`should_session_change`] is working. This might need + // to be updated accordingly, if the underlying mechanics of slot and epochs change. + fn estimate_next_session_rotation(now: T::BlockNumber) -> (Option, Weight) { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (upper_bound, T::DbWeight::get().reads(3)) + } +} diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs new file mode 100644 index 0000000000000..3eadff59cdd6f --- /dev/null +++ b/frame/sassafras/src/tests.rs @@ -0,0 +1,414 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras pallet. + +use crate::*; +use mock::*; + +use frame_support::traits::{OnFinalize, OnInitialize}; +use hex_literal::hex; +use sp_consensus_sassafras::Slot; +use sp_runtime::traits::Get; + +#[test] +fn slot_ticket_fetch() { + let max_tickets: u32 = ::MaxTickets::get(); + assert_eq!(max_tickets, 6); + + let curr_tickets: Vec = (0..max_tickets as u8) + .into_iter() + .map(|i| [i; 32].try_into().unwrap()) + .collect(); + + let next_tickets: Vec = (0..(max_tickets - 1) as u8) + .into_iter() + .map(|i| [max_tickets as u8 + i; 32].try_into().unwrap()) + .collect(); + + new_test_ext(4).execute_with(|| { + curr_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((0, i as u32), ticket); + }); + next_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((1, i as u32), ticket); + }); + TicketsMeta::::set(TicketsMetadata { + tickets_count: [max_tickets, max_tickets - 1], + segments_count: 0, + sort_started: false, + }); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(1.into()), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket(2.into()), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket(3.into()), None); + assert_eq!(Sassafras::slot_ticket(4.into()), None); + assert_eq!(Sassafras::slot_ticket(5.into()), None); + assert_eq!(Sassafras::slot_ticket(6.into()), None); + assert_eq!(Sassafras::slot_ticket(7.into()), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket(8.into()), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket(9.into()), Some(curr_tickets[0])); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(10.into()), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket(11.into()), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket(12.into()), None); //Some(next_tickets[5])); + assert_eq!(Sassafras::slot_ticket(13.into()), None); + assert_eq!(Sassafras::slot_ticket(14.into()), None); + assert_eq!(Sassafras::slot_ticket(15.into()), None); + assert_eq!(Sassafras::slot_ticket(16.into()), None); + assert_eq!(Sassafras::slot_ticket(17.into()), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket(18.into()), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket(19.into()), Some(next_tickets[0])); + + // Test fetch beyend next session + assert_eq!(Sassafras::slot_ticket(20.into()), None); + assert_eq!(Sassafras::slot_ticket(42.into()), None); + }); +} + +#[test] +fn genesis_values() { + new_test_ext(4).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 4); + assert_eq!(EpochConfig::::get(), Default::default()); + }); +} + +#[test] +fn on_first_block_after_genesis() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!(RandomnessAccumulator::::get(), [0; 32]); + + Sassafras::on_finalize(1); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + ); + + Sassafras::on_finalize(2); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("ea16f22af4afe5bfb8e3be3e257c3a88ae0c2406a4afc067871b6e5a7ae8756e"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }); +} + +#[test] +fn epoch_change_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32],); + assert_eq!( + NextRandomness::::get(), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("ec9f2acd75e3a901b3a3fad95267a275af1aded3df8bebebb8d14ebd2190ce59"), + ); + + Sassafras::on_finalize(start_block + epoch_duration); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!( + NextRandomness::::get(), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("d017578d6bad1856315866ce1ef845c2584873fcbc011db7dcb99f1f19baa6f3"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn submit_enact_claim_tickets() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let _digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Check state before tickets submission + assert!(Tickets::::iter().next().is_none()); + + // Submit authoring tickets in three different batches. + // We can ignore the threshold since we are not passing through the unsigned extrinsic + // validation. + let mut tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets0).unwrap(); + let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets1).unwrap(); + let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets2).unwrap(); + + tickets.sort(); + tickets.truncate(max_tickets as usize); + let expected_tickets = tickets; + + // Check state after submit + let meta = TicketsMeta::::get(); + assert!(Tickets::::iter().next().is_none()); + assert_eq!(meta.segments_count, 3); + assert_eq!(meta.tickets_count, [0, 0]); + + // Process up to the last epoch slot (do not enact epoch change) + let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); + + // TODO-SASS-P2: at this point next tickets should have been sorted + //assert_eq!(NextTicketsSegmentsCount::::get(), 0); + //assert!(Tickets::::iter().next().is_some()); + + // Check if we can claim next epoch tickets in outside-in fashion. + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 3).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 4).is_none()); + assert!(Sassafras::slot_ticket(slot + 7).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 10).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 11).is_none()); + + // Enact session change by progressing one more block + + let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); + + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 3).is_none()); + assert!(Sassafras::slot_ticket(slot + 6).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 7).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 10).is_none()); + }); +} + +#[test] +fn block_skips_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let epoch_duration: u64 = ::EpochDuration::get(); + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + Sassafras::submit_tickets(Origin::none(), BoundedVec::truncate_from(tickets.clone())) + .unwrap(); + + // Force enact of next tickets + assert_eq!(TicketsMeta::::get().segments_count, 1); + Sassafras::slot_ticket(start_slot + epoch_duration).unwrap(); + assert_eq!(TicketsMeta::::get().segments_count, 0); + + let next_random = NextRandomness::::get(); + + // We want to trigger an skip epoch in this test. + let offset = 3 * epoch_duration; + let _digest = go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 3); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + + // Tickets were discarded + let meta = TicketsMeta::::get(); + assert_eq!(meta, TicketsMetadata::default()); + // We've used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); +} diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 68116c6b91f70..eb318a5caa379 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Private implementation details of Sassafras digests. use super::{ - AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, Slot, TicketInfo, - SASSAFRAS_ENGINE_ID, + AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, Slot, TicketInfo, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -36,22 +36,24 @@ pub struct PreDigest { pub authority_index: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, - /// Block VRF output. - pub block_vrf_output: VRFOutput, - /// Block VRF proof. - pub block_vrf_proof: VRFProof, + /// Slot VRF output. + pub vrf_output: VRFOutput, + /// Slot VRF proof. + pub vrf_proof: VRFProof, /// Ticket information. pub ticket_info: Option, } /// Information about the next epoch. This is broadcast in the first block /// of the epoch. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { /// The authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// The value of randomness to use for the slot-assignment. pub randomness: Randomness, + /// Algorithm parameters. If not present, previous epoch parameters are used. + pub config: Option, } /// An consensus log item for BABE. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 0546c99c52984..4754081fbc126 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -22,42 +22,35 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -pub use merlin::Transcript; - use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; +use sp_core::{crypto, U256}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + PublicKey, Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, + VRF_PROOF_LENGTH, }; -/// Key type for Sassafras module. -pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; - pub mod digests; pub mod inherents; +pub mod vrf; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; app_crypto!(sr25519, SASSAFRAS); } +/// Key type for Sassafras protocol. +pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + /// The index of an authority. pub type AuthorityIndex = u32; -/// The prefix used by Sassafras for its ticket VRF keys. -pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf"; - -/// The prefix used by Sassafras for its post-block VRF keys. -pub const SASSAFRAS_BLOCK_VRF_PREFIX: &[u8] = b"substrate-sassafras-block-vrf"; - /// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Sassafras module. If that ever changes, then this must, too. #[cfg(feature = "std")] @@ -87,25 +80,34 @@ pub type SassafrasBlockWeight = u32; /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] -pub struct SassafrasGenesisConfiguration { - /// The slot duration in milliseconds for Sassafras. +pub struct SassafrasConfiguration { + /// The slot duration in milliseconds. pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: u64, - /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// The randomness for the genesis epoch. + pub epoch_duration: u64, + /// The authorities for the epoch. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// The randomness for the epoch. pub randomness: Randomness, + /// Tickets threshold parameters. + pub threshold_params: SassafrasEpochConfiguration, +} + +impl SassafrasConfiguration { + /// Get the slot duration defined in the genesis configuration. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.slot_duration) + } } /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct SassafrasEpochConfiguration { - // TODO-SASS-P2 - // x: redundancy_factor - // a: attempts number - // L: bound on aa number of tickets that can be gossiped + /// Redundancy factor. + pub redundancy_factor: u32, + /// Number of attempts for tickets generation. + pub attempts_number: u32, } /// Ticket type. @@ -122,65 +124,36 @@ pub struct TicketInfo { pub proof: VRFProof, } -/// Make slot VRF transcript. -pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_u64(b"slot number", *slot); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", &randomness[..]); - transcript +/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: +/// - x: redundancy factor; +/// - s: number of slots in epoch; +/// - a: max number of attempts; +/// - v: number of validator in epoch. +/// The parameters should be chosen such that T <= 1. +/// If `attempts * validators` is zero then we fallback to T = 0 +// TODO-SASS-P3: this formula must be double-checked... +#[inline] +pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> U256 { + let den = attempts as u64 * validators as u64; + let num = redundancy as u64 * slots as u64; + U256::max_value() + .checked_div(den.into()) + .unwrap_or(U256::zero()) + .saturating_mul(num.into()) } -/// Make slot VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_slot_transcript_data( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - ("slot number", VRFTranscriptValue::U64(*slot)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} - -/// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &[u8], attempt: u64, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(b"type", b"ticket"); - transcript.append_u64(b"attempt", attempt); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", randomness); - transcript -} - -/// Make ticket VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_ticket_transcript_data( - randomness: &[u8], - attempt: u64, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - ("type", VRFTranscriptValue::Bytes(b"ticket".to_vec())), - ("attempt", VRFTranscriptValue::U64(attempt)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } +/// Returns true if the given VRF output is lower than the given threshold, false otherwise. +#[inline] +pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { + U256::from(ticket.as_bytes()) < threshold } +// Runtime API. sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. - fn configuration() -> SassafrasGenesisConfiguration; + fn configuration() -> SassafrasConfiguration; /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..1c46fe77a6c6e --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives related to VRF input and output. + +pub use merlin::Transcript; + +pub use sp_consensus_slots::Slot; +pub use sp_consensus_vrf::schnorrkel::{PublicKey, Randomness, VRFOutput, VRFProof}; +#[cfg(feature = "std")] +use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; + +use crate::SASSAFRAS_ENGINE_ID; + +const TYPE_LABEL: &str = "type"; +const EPOCH_LABEL: &str = "epoch"; +const SLOT_LABEL: &str = "slot"; +const ATTEMPT_LABEL: &str = "slot"; +const RANDOMNESS_LABEL: &str = "randomness"; + +const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; +const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; + +/// Make slot VRF transcript. +pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make slot VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_slot_transcript_data( + randomness: &Randomness, + slot: Slot, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), + (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} + +/// Make ticket VRF transcript. +pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make ticket VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_ticket_transcript_data( + randomness: &Randomness, + attempt: u32, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), + (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +}